diff --git a/go.mod b/go.mod index 0df9c74b..44d5c6e9 100644 --- a/go.mod +++ b/go.mod @@ -3,8 +3,8 @@ module github.com/containers/prometheus-podman-exporter go 1.20 require ( - github.com/containers/common v0.57.4 - github.com/containers/image/v5 v5.29.2 + github.com/containers/common v0.58.0 + github.com/containers/image/v5 v5.30.0 github.com/containers/podman/v4 v4.9.3 github.com/go-kit/log v0.2.1 github.com/onsi/ginkgo/v2 v2.15.0 @@ -22,7 +22,7 @@ require ( github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect github.com/BurntSushi/toml v1.3.2 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect - github.com/Microsoft/hcsshim v0.12.0-rc.1 // indirect + github.com/Microsoft/hcsshim v0.12.0-rc.3 // indirect github.com/VividCortex/ewma v1.2.0 // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect github.com/aead/serpent v0.0.0-20160714141033-fba169763ea6 // indirect @@ -34,46 +34,49 @@ require ( github.com/checkpoint-restore/go-criu/v7 v7.0.0 // indirect github.com/chzyer/readline v1.5.1 // indirect github.com/containerd/cgroups/v3 v3.0.2 // indirect - github.com/containerd/containerd v1.7.9 // indirect + github.com/containerd/containerd v1.7.13 // indirect + github.com/containerd/errdefs v0.1.0 // indirect github.com/containerd/log v0.1.0 // indirect github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect github.com/containerd/typeurl/v2 v2.1.1 // indirect github.com/containernetworking/cni v1.1.2 // indirect - github.com/containernetworking/plugins v1.3.0 // indirect + github.com/containernetworking/plugins v1.4.0 // indirect github.com/containers/buildah v1.33.5 // indirect github.com/containers/conmon v2.0.20+incompatible // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect github.com/containers/luksy v0.0.0-20231030195837-b5a7f79da98b // indirect github.com/containers/ocicrypt v1.1.9 // indirect github.com/containers/psgo v1.8.0 // indirect - github.com/containers/storage v1.51.0 // indirect + github.com/containers/storage v1.53.0 // indirect github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09 // indirect - github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 // indirect + github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/disiqueira/gotree/v3 v3.0.2 // indirect github.com/distribution/reference v0.5.0 // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker v24.0.7+incompatible // indirect - github.com/docker/docker-credential-helpers v0.8.0 // indirect - github.com/docker/go-connections v0.4.1-0.20231031175723-0b8c1f4e07a0 // indirect + github.com/docker/docker v25.0.3+incompatible // indirect + github.com/docker/docker-credential-helpers v0.8.1 // indirect + github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-plugins-helpers v0.0.0-20211224144127-6eecb7beb651 // indirect github.com/docker/go-units v0.5.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fsouza/go-dockerclient v1.10.0 // indirect - github.com/go-jose/go-jose/v3 v3.0.1 // indirect + github.com/go-jose/go-jose/v3 v3.0.2 // indirect github.com/go-logfmt/logfmt v0.5.1 // indirect github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/analysis v0.21.4 // indirect - github.com/go-openapi/errors v0.20.4 // indirect + github.com/go-openapi/errors v0.21.1 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/loads v0.21.2 // indirect github.com/go-openapi/runtime v0.26.0 // indirect github.com/go-openapi/spec v0.20.9 // indirect - github.com/go-openapi/strfmt v0.21.7 // indirect - github.com/go-openapi/swag v0.22.4 // indirect + github.com/go-openapi/strfmt v0.22.2 // indirect + github.com/go-openapi/swag v0.22.10 // indirect github.com/go-openapi/validate v0.22.1 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 // indirect @@ -81,12 +84,12 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/go-cmp v0.6.0 // indirect - github.com/google/go-containerregistry v0.16.1 // indirect + github.com/google/go-containerregistry v0.19.0 // indirect github.com/google/go-intervals v0.0.2 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20230323073829-e72429f035bd // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/google/uuid v1.4.0 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/gorilla/mux v1.8.1 // indirect github.com/gorilla/schema v1.2.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect @@ -96,15 +99,15 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.3 // indirect + github.com/klauspost/compress v1.17.7 // indirect github.com/klauspost/pgzip v1.2.6 // indirect github.com/kr/fs v0.1.0 // indirect - github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 // indirect + github.com/letsencrypt/boulder v0.0.0-20230907030200-6d76a0f91e1e // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/manifoldco/promptui v0.9.0 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect github.com/mattn/go-shellwords v1.0.12 // indirect - github.com/mattn/go-sqlite3 v1.14.18 // indirect + github.com/mattn/go-sqlite3 v1.14.22 // indirect github.com/miekg/pkcs11 v1.1.1 // indirect github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect @@ -112,6 +115,7 @@ require ( github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/mountinfo v0.7.1 // indirect github.com/moby/sys/sequential v0.5.0 // indirect + github.com/moby/sys/user v0.1.0 // indirect github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect @@ -120,9 +124,9 @@ require ( github.com/nxadm/tail v1.4.11 // indirect github.com/oklog/ulid v1.3.1 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0-rc5 // indirect - github.com/opencontainers/runc v1.1.10 // indirect - github.com/opencontainers/runtime-spec v1.1.1-0.20230922153023-c0e90434df2a // indirect + github.com/opencontainers/image-spec v1.1.0 // indirect + github.com/opencontainers/runc v1.1.12 // indirect + github.com/opencontainers/runtime-spec v1.2.0 // indirect github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc // indirect github.com/opencontainers/selinux v1.11.0 // indirect github.com/openshift/imagebuilder v1.2.6-0.20231110114814-35a50d57f722 // indirect @@ -134,38 +138,42 @@ require ( github.com/prometheus/procfs v0.12.0 // indirect github.com/rivo/uniseg v0.4.4 // indirect github.com/seccomp/libseccomp-golang v0.10.0 // indirect - github.com/secure-systems-lab/go-securesystemslib v0.7.0 // indirect + github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect github.com/sigstore/fulcio v1.4.3 // indirect github.com/sigstore/rekor v1.2.2 // indirect - github.com/sigstore/sigstore v1.7.5 // indirect + github.com/sigstore/sigstore v1.8.2 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect - github.com/sylabs/sif/v2 v2.15.0 // indirect + github.com/sylabs/sif/v2 v2.15.1 // indirect github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect github.com/tchap/go-patricia/v2 v2.3.1 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect github.com/ulikunitz/xz v0.5.11 // indirect github.com/vbatts/tar-split v0.11.5 // indirect - github.com/vbauerster/mpb/v8 v8.6.2 // indirect + github.com/vbauerster/mpb/v8 v8.7.2 // indirect github.com/vishvananda/netlink v1.2.1-beta.2 // indirect github.com/vishvananda/netns v0.0.4 // indirect - go.etcd.io/bbolt v1.3.8 // indirect - go.mongodb.org/mongo-driver v1.11.3 // indirect + go.etcd.io/bbolt v1.3.9 // indirect + go.mongodb.org/mongo-driver v1.14.0 // indirect go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect go.opencensus.io v0.24.0 // indirect - golang.org/x/crypto v0.18.0 // indirect - golang.org/x/exp v0.0.0-20231006140011-7918f672742d // indirect - golang.org/x/mod v0.14.0 // indirect - golang.org/x/net v0.20.0 // indirect - golang.org/x/oauth2 v0.16.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect + go.opentelemetry.io/otel v1.19.0 // indirect + go.opentelemetry.io/otel/metric v1.19.0 // indirect + go.opentelemetry.io/otel/trace v1.19.0 // indirect + golang.org/x/crypto v0.21.0 // indirect + golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect + golang.org/x/mod v0.15.0 // indirect + golang.org/x/net v0.22.0 // indirect + golang.org/x/oauth2 v0.18.0 // indirect golang.org/x/sync v0.6.0 // indirect - golang.org/x/sys v0.16.0 // indirect - golang.org/x/term v0.16.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/term v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect - golang.org/x/tools v0.16.1 // indirect + golang.org/x/tools v0.18.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 // indirect - google.golang.org/grpc v1.58.3 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 // indirect + google.golang.org/grpc v1.59.0 // indirect google.golang.org/protobuf v1.32.0 // indirect gopkg.in/go-jose/go-jose.v2 v2.6.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index 68d75347..12801216 100644 --- a/go.sum +++ b/go.sum @@ -67,8 +67,8 @@ github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= github.com/Microsoft/hcsshim v0.9.4/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= -github.com/Microsoft/hcsshim v0.12.0-rc.1 h1:Hy+xzYujv7urO5wrgcG58SPMOXNLrj4WCJbySs2XX/A= -github.com/Microsoft/hcsshim v0.12.0-rc.1/go.mod h1:Y1a1S0QlYp1mBpyvGiuEdOfZqnao+0uX5AWHXQ5NhZU= +github.com/Microsoft/hcsshim v0.12.0-rc.3 h1:5GNGrobGs/sN/0nFO21W9k4lFn+iXXZAE8fCZbmdRak= +github.com/Microsoft/hcsshim v0.12.0-rc.3/go.mod h1:WuNfcaYNaw+KpCEsZCIM6HCEmu0c5HfXpi+dDSmveP0= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= @@ -115,6 +115,7 @@ github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8n github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -180,8 +181,8 @@ github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09Zvgq github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= -github.com/containerd/containerd v1.7.9 h1:KOhK01szQbM80YfW1H6RZKh85PHGqY/9OcEZ35Je8sc= -github.com/containerd/containerd v1.7.9/go.mod h1:0/W44LWEYfSHoxBtsHIiNU/duEkgpMokemafHVCpq9Y= +github.com/containerd/containerd v1.7.13 h1:wPYKIeGMN8vaggSKuV1X0wZulpMz4CrgEsZdaCyB6Is= +github.com/containerd/containerd v1.7.13/go.mod h1:zT3up6yTRfEUa6+GsITYIJNgSVL9NQ4x4h1RPzk0Wu4= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -189,6 +190,8 @@ github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cE github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= +github.com/containerd/errdefs v0.1.0 h1:m0wCRBiu1WJT/Fr+iOoQHMQS/eP5myQ8lCv4Dz5ZURM= +github.com/containerd/errdefs v0.1.0/go.mod h1:YgWiiHtLmSeBrvpw+UfPijzbLaB77mEG1WwJTDETIV0= github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= @@ -239,16 +242,16 @@ github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl3 github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= -github.com/containernetworking/plugins v1.3.0 h1:QVNXMT6XloyMUoO2wUOqWTC1hWFV62Q6mVDp5H1HnjM= -github.com/containernetworking/plugins v1.3.0/go.mod h1:Pc2wcedTQQCVuROOOaLBPPxrEXqqXBFt3cZ+/yVg6l0= +github.com/containernetworking/plugins v1.4.0 h1:+w22VPYgk7nQHw7KT92lsRmuToHvb7wwSv9iTbXzzic= +github.com/containernetworking/plugins v1.4.0/go.mod h1:UYhcOyjefnrQvKvmmyEKsUA+M9Nfn7tqULPpH0Pkcj0= github.com/containers/buildah v1.33.5 h1:BGzGAbK6B5VLOQlqX0V8DrteMbKGh6Q8ntHTahGqGMY= github.com/containers/buildah v1.33.5/go.mod h1:w3paMzMa5/3uaNSi2xq4hsQDTH9aFkoBmrHyPQ8Ixjo= -github.com/containers/common v0.57.4 h1:kmfBad92kUjP5X44BPpOwMe+eZQqaKETfS+ASeL0g+g= -github.com/containers/common v0.57.4/go.mod h1:o3L3CyOI9yr+JC8l4dZgvqTxcjs3qdKmkek00uchgvw= +github.com/containers/common v0.58.0 h1:iQuwMxDD4ubZ9s1tmgdsiaHxMU4TdVBpV6kctJc6Bk8= +github.com/containers/common v0.58.0/go.mod h1:l3vMqanJGj7tZ3W/i76gEJ128VXgFUO1tLaohJXPvdk= github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg= github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I= -github.com/containers/image/v5 v5.29.2 h1:b8U0XYWhaQbKucK73IbmSm8WQyKAhKDbAHQc45XlsOw= -github.com/containers/image/v5 v5.29.2/go.mod h1:kQ7qcDsps424ZAz24thD+x7+dJw1vgur3A9tTDsj97E= +github.com/containers/image/v5 v5.30.0 h1:CmHeSwI6W2kTRWnUsxATDFY5TEX4b58gPkaQcEyrLIA= +github.com/containers/image/v5 v5.30.0/go.mod h1:gSD8MVOyqBspc0ynLsuiMR9qmt8UQ4jpVImjmK0uXfk= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/luksy v0.0.0-20231030195837-b5a7f79da98b h1:8XvNAm+g7ivwPUkyiHvBs7z356JWpK9a0FDaek86+sY= @@ -263,8 +266,8 @@ github.com/containers/podman/v4 v4.9.3/go.mod h1:J2qLop+mWjAOxh0QQyYPdnPA3jI6ay2 github.com/containers/psgo v1.8.0 h1:2loGekmGAxM9ir5OsXWEfGwFxorMPYnc6gEDsGFQvhY= github.com/containers/psgo v1.8.0/go.mod h1:T8ZxnX3Ur4RvnhxFJ7t8xJ1F48RhiZB4rSrOaR/qGHc= github.com/containers/storage v1.43.0/go.mod h1:uZ147thiIFGdVTjMmIw19knttQnUCl3y9zjreHrg11s= -github.com/containers/storage v1.51.0 h1:AowbcpiWXzAjHosKz7MKvPEqpyX+ryZA/ZurytRrFNA= -github.com/containers/storage v1.51.0/go.mod h1:ybl8a3j1PPtpyaEi/5A6TOFs+5TrEyObeKJzVtkUlfc= +github.com/containers/storage v1.53.0 h1:VSES3C/u1pxjTJIXvLrSmyP7OBtDky04oGu07UvdTEA= +github.com/containers/storage v1.53.0/go.mod h1:pujcoOSc+upx15Jirdkebhtd8uJiLwbSd/mYT6zDJK8= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= @@ -292,8 +295,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46t github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= -github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 h1:2Dx4IHfC1yHWI12AxQDJM1QbRCDfk6M+blLzlZCXdrc= -github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= +github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f h1:eHnXnuK47UlSTOQexbzxAZfekVz6i+LKRdj1CU5DPaM= +github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= @@ -314,21 +317,21 @@ github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v24.0.7+incompatible h1:wa/nIwYFW7BVTGa7SWPVyyXU9lgORqUb1xfI36MSkFg= +github.com/docker/cli v25.0.3+incompatible h1:KLeNs7zws74oFuVhgZQ5ONGZiXUUdgsdy6/EsX/6284= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM= -github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.3+incompatible h1:D5fy/lYmY7bvZa0XTZ5/UJPljor41F+vdyJG5luQLfQ= +github.com/docker/docker v25.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= -github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8= -github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40= +github.com/docker/docker-credential-helpers v0.8.1 h1:j/eKUktUltBtMzKqmfLB0PAgqYyMHOp5vfsD1807oKo= +github.com/docker/docker-credential-helpers v0.8.1/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-connections v0.4.1-0.20231031175723-0b8c1f4e07a0 h1:dPD5pdqsujF9jz2NQMQCDzrBSAF3M6kIxmfU98IOp9c= -github.com/docker/go-connections v0.4.1-0.20231031175723-0b8c1f4e07a0/go.mod h1:a6bNUGTbQBsY6VRHTr4h/rkOXjl244DyRD0tx3fgq4Q= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= @@ -354,10 +357,9 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= -github.com/facebookgo/limitgroup v0.0.0-20150612190941-6abd8d71ec01 h1:IeaD1VDVBPlx3viJT9Md8if8IxxJnO+x0JCGb054heg= -github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52 h1:a4DFiKFJiDRGFD1qIcqGLX/WlUMD9dyLSLDt+9QZgt8= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= @@ -376,8 +378,8 @@ github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= -github.com/go-jose/go-jose/v3 v3.0.1 h1:pWmKFVtt+Jl0vBZTIpz/eAKwsm6LkIxDVVbFHKkchhA= -github.com/go-jose/go-jose/v3 v3.0.1/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= +github.com/go-jose/go-jose/v3 v3.0.2 h1:2Edjn8Nrb44UvTdp84KU0bBPs1cO7noRCybtS3eJEUQ= +github.com/go-jose/go-jose/v3 v3.0.2/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= @@ -388,16 +390,19 @@ github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNV github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.20.4 h1:unTcVm6PispJsMECE3zWgvG4xTiKda1LIR5rCRWLG6M= -github.com/go-openapi/errors v0.20.4/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= +github.com/go-openapi/errors v0.21.1 h1:rVisxQPdETctjlYntm0Ek4dKf68nAQocCloCT50vWuI= +github.com/go-openapi/errors v0.21.1/go.mod h1:LyiY9bgc7AVVh6wtVvMYEyoj3KJYNoRw92mmvnMWgj8= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= @@ -425,16 +430,16 @@ github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6 github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= -github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k= -github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= +github.com/go-openapi/strfmt v0.22.2 h1:DPYOrm6gexCfZZfXUaXFS4+Jw6HAaIIG0SZ5630f8yw= +github.com/go-openapi/strfmt v0.22.2/go.mod h1:HB/b7TCm91rno75Dembc1dFW/0FPLk5CEXsoF9ReNc4= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= -github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.10 h1:4y86NVn7Z2yYd6pfS4Z+Nyh3aAUL3Nul+LMbhFKy0gA= +github.com/go-openapi/swag v0.22.10/go.mod h1:Cnn8BYtRlx6BNE3DPN86f/xkapGIcLWzh3CLEb4C1jI= github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU= github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= @@ -535,8 +540,8 @@ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= -github.com/google/go-containerregistry v0.16.1 h1:rUEt426sR6nyrL3gt+18ibRcvYpKYdpsa5ZW7MA08dQ= -github.com/google/go-containerregistry v0.16.1/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ= +github.com/google/go-containerregistry v0.19.0 h1:uIsMRBV7m/HDkDxE/nXMnv1q+lOOSPlQ/ywc5JbB8Ic= +github.com/google/go-containerregistry v0.19.0/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ= github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM= github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -560,8 +565,8 @@ github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= -github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= @@ -582,7 +587,9 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0 h1:RtRsiaGvWxcwd8y3BiRZxsylPT8hLWZ5SPcfI+3IDNk= github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= @@ -594,8 +601,6 @@ github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9 github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/honeycombio/beeline-go v1.10.0 h1:cUDe555oqvw8oD76BQJ8alk7FP0JZ/M/zXpNvOEDLDc= -github.com/honeycombio/libhoney-go v1.16.0 h1:kPpqoz6vbOzgp7jC6SR7SkNj7rua7rgxvznI6M3KdHc= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -612,7 +617,7 @@ github.com/jinzhu/copier v0.4.0 h1:w3ciUoD19shMCRargcpm0cm91ytaBhDvuRpz1ODO/U8= github.com/jinzhu/copier v0.4.0/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548 h1:dYTbLf4m0a5u0KLmPfB6mgxbcV7588bOCx79hxa5Sr4= +github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs= github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= @@ -640,8 +645,8 @@ github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdY github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/compress v1.17.3 h1:qkRjuerhUU1EmXLYGkSH6EZL+vPSxIrYjLNAK4slzwA= -github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg= +github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= @@ -661,8 +666,8 @@ github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 h1:unJdfS94Y3k85TKy+mvKzjW5R9rIC+Lv4KGbE7uNu0I= -github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6/go.mod h1:PUgW5vI9ANEaV6qv9a6EKu8gAySgwf0xrzG9xIB/CK0= +github.com/letsencrypt/boulder v0.0.0-20230907030200-6d76a0f91e1e h1:RLTpX495BXToqxpM90Ws4hXEo4Wfh81jr9DX1n/4WOo= +github.com/letsencrypt/boulder v0.0.0-20230907030200-6d76a0f91e1e/go.mod h1:EAuqr9VFWxBi9nD5jc/EA2MT1RFty9288TF6zdtYoCU= github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -686,8 +691,8 @@ github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vq github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= -github.com/mattn/go-sqlite3 v1.14.18 h1:JL0eqdCOq6DJVNPSvArO/bIV9/P7fbGrV00LZHc+5aI= -github.com/mattn/go-sqlite3 v1.14.18/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= +github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= @@ -718,6 +723,8 @@ github.com/moby/sys/mountinfo v0.7.1/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGp github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg= +github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= @@ -779,8 +786,8 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= -github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= +github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= github.com/opencontainers/runc v1.1.1-0.20230904132852-a0466dd76f23 h1:CjJqzUWt07IJR6gO+Ron5qEcXomyLZLgURiSCXN6vXM= github.com/opencontainers/runc v1.1.1-0.20230904132852-a0466dd76f23/go.mod h1:UkHdGiHfjdRR/suiePnXB844WcjZ0RcfGm2mQS/V5jM= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -790,8 +797,8 @@ github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/ github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.1.1-0.20230823135140-4fec88fd00a4/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.1.1-0.20230922153023-c0e90434df2a h1:ekgJlqTI6efJ57J7tqvIOYtdPnJRe8MxUZHbZAC021Y= -github.com/opencontainers/runtime-spec v1.1.1-0.20230922153023-c0e90434df2a/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= +github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc h1:d2hUh5O6MRBvStV55MQ8we08t42zSTqBbscoQccWmMc= github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc/go.mod h1:8tx1helyqhUC65McMm3x7HmOex8lO2/v9zPuxmKHurs= @@ -880,16 +887,16 @@ github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24 github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y= github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2rWM6M9YjLpY= github.com/seccomp/libseccomp-golang v0.10.0/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= -github.com/secure-systems-lab/go-securesystemslib v0.7.0 h1:OwvJ5jQf9LnIAS83waAjPbcMsODrTQUpJ02eNLUoxBg= -github.com/secure-systems-lab/go-securesystemslib v0.7.0/go.mod h1:/2gYnlnHVQ6xeGtfIqFy7Do03K4cdCY0A/GlJLDKLHI= -github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= +github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA= +github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU= +github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sigstore/fulcio v1.4.3 h1:9JcUCZjjVhRF9fmhVuz6i1RyhCc/EGCD7MOl+iqCJLQ= github.com/sigstore/fulcio v1.4.3/go.mod h1:BQPWo7cfxmJwgaHlphUHUpFkp5+YxeJes82oo39m5og= github.com/sigstore/rekor v1.2.2 h1:5JK/zKZvcQpL/jBmHvmFj3YbpDMBQnJQ6ygp8xdF3bY= github.com/sigstore/rekor v1.2.2/go.mod h1:FGnWBGWzeNceJnp0x9eDFd41mI8aQqCjj+Zp0IEs0Qg= -github.com/sigstore/sigstore v1.7.5 h1:ij55dBhLwjICmLTBJZm7SqoQLdsu/oowDanACcJNs48= -github.com/sigstore/sigstore v1.7.5/go.mod h1:9OCmYWhzuq/G4e1cy9m297tuMRJ1LExyrXY3ZC3Zt/s= +github.com/sigstore/sigstore v1.8.2 h1:0Ttjcn3V0fVQXlYq7+oHaaHkGFIt3ywm7SF4JTU/l8c= +github.com/sigstore/sigstore v1.8.2/go.mod h1:CHVcSyknCcjI4K2ZhS1SI28r0tcQyBlwtALG536x1DY= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -941,9 +948,9 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/sylabs/sif/v2 v2.15.0 h1:Nv0tzksFnoQiQ2eUwpAis9nVqEu4c3RcNSxX8P3Cecw= -github.com/sylabs/sif/v2 v2.15.0/go.mod h1:X1H7eaPz6BAxA84POMESXoXfTqgAnLQkujyF/CQFWTc= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/sylabs/sif/v2 v2.15.1 h1:75BcunPOY11fVhe02/WHuNLTfDd3OHH0ex0MuuNMYX0= +github.com/sylabs/sif/v2 v2.15.1/go.mod h1:YiwCUdZOhiohnPbyxuxvCZa+03HwAaiC+vfAKZPR8nQ= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= @@ -953,7 +960,6 @@ github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes= github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -971,8 +977,8 @@ github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI= github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts= github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk= -github.com/vbauerster/mpb/v8 v8.6.2 h1:9EhnJGQRtvgDVCychJgR96EDCOqgg2NsMuk5JUcX4DA= -github.com/vbauerster/mpb/v8 v8.6.2/go.mod h1:oVJ7T+dib99kZ/VBjoBaC8aPXiSAihnzuKmotuihyFo= +github.com/vbauerster/mpb/v8 v8.7.2 h1:SMJtxhNho1MV3OuFgS1DAzhANN1Ejc5Ct+0iSaIkB14= +github.com/vbauerster/mpb/v8 v8.7.2/go.mod h1:ZFnrjzspgDHoxYLGvxIruiNk73GNTPG4YHgVNpR10VY= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= @@ -983,8 +989,6 @@ github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17 github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= -github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU= -github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= @@ -1012,14 +1016,14 @@ go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= -go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA= -go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= +go.etcd.io/bbolt v1.3.9 h1:8x7aARPEXiXbHmtUwAIv7eV2fQFHrLLavdiJ3uzJXoI= +go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= -go.mongodb.org/mongo-driver v1.11.3 h1:Ql6K6qYHEzB6xvu4+AU0BoRoqf9vFPcc4o7MUIdPW8Y= -go.mongodb.org/mongo-driver v1.11.3/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= +go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= +go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 h1:CCriYyAfq1Br1aIYettdHZTy8mBTIPo7We18TuO/bak= go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= @@ -1029,7 +1033,19 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q= +go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= +go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg= +go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= +go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= +go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= +go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= @@ -1043,7 +1059,6 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1054,8 +1069,9 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= -golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1067,8 +1083,8 @@ golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= -golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= -golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= +golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ= +golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1093,8 +1109,8 @@ golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= -golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1143,15 +1159,15 @@ golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= +golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= +golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1256,16 +1272,18 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= -golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= -golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1341,8 +1359,8 @@ golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= -golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= +golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= +golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1393,8 +1411,10 @@ google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 h1:N3bU/SQDCDyD6R528GJ/PwW9KjYcJA3dgyH+MovAkIM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA= +google.golang.org/genproto v0.0.0-20230920204549-e6e6cdab5c13 h1:vlzZttNJGVqTsRFU9AmdnrcO1Znh8Ew9kCD//yjigk0= +google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb h1:lK0oleSc7IQsUxO3U5TjL9DWlsxpEBemh+zpB7IqhWI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 h1:6GQBEOdGkX6MMTLT9V+TjtIRZCw9VPD5Z+yHY9wMgS0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97/go.mod h1:v7nGkzlmW8P3n/bKmWBn2WpBjpOEx8Q6gMueudAmKfY= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1413,8 +1433,8 @@ google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTp google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= -google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= +google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= +google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1434,7 +1454,6 @@ google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7 google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/alexcesaro/statsd.v2 v2.0.0 h1:FXkZSCZIH17vLCO5sO2UucTHsH9pc+17F6pl3JVCwMc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1477,7 +1496,7 @@ gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= -gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= +gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/Microsoft/hcsshim/.golangci.yml b/vendor/github.com/Microsoft/hcsshim/.golangci.yml index abe77f57..7d38a2fb 100644 --- a/vendor/github.com/Microsoft/hcsshim/.golangci.yml +++ b/vendor/github.com/Microsoft/hcsshim/.golangci.yml @@ -20,6 +20,7 @@ linters: # - typecheck # - unused + - errorlint # error wrapping (eg, not using `errors.Is`, using `%s` instead of `%w` in `fmt.Errorf`) - gofmt # whether code was gofmt-ed - govet # enabled by default, but just to be sure - nolintlint # ill-formed or insufficient nolint directives @@ -53,6 +54,12 @@ issues: text: "^ST1003: should not use underscores in package names$" source: "^package cri_containerd$" + # don't bother with propper error wrapping in test code + - path: cri-containerd + linters: + - errorlint + text: "non-wrapping format verb for fmt.Errorf" + # This repo has a LOT of generated schema files, operating system bindings, and other # things that ST1003 from stylecheck won't like (screaming case Windows api constants for example). # There's also some structs that we *could* change the initialisms to be Go friendly diff --git a/vendor/github.com/Microsoft/hcsshim/Makefile b/vendor/github.com/Microsoft/hcsshim/Makefile index d8eb30b8..de643589 100644 --- a/vendor/github.com/Microsoft/hcsshim/Makefile +++ b/vendor/github.com/Microsoft/hcsshim/Makefile @@ -29,12 +29,23 @@ ifeq "$(DEV_BUILD)" "1" DELTA_TARGET=out/delta-dev.tar.gz endif +ifeq "$(SNP_BUILD)" "1" +DELTA_TARGET=out/delta-snp.tar.gz +endif + # The link aliases for gcstools GCS_TOOLS=\ generichook \ install-drivers -.PHONY: all always rootfs test +# Common path prefix. +PATH_PREFIX:= +# These have PATH_PREFIX prepended to obtain the full path in recipies e.g. $(PATH_PREFIX)/$(VMGS_TOOL) +VMGS_TOOL:= +IGVM_TOOL:= +KERNEL_PATH:= + +.PHONY: all always rootfs test snp simple .DEFAULT_GOAL := all @@ -49,9 +60,58 @@ test: rootfs: out/rootfs.vhd -out/rootfs.vhd: out/rootfs.tar.gz bin/cmd/tar2ext4 +snp: out/kernelinitrd.vmgs out/rootfs.hash.vhd out/rootfs.vhd out/v2056.vmgs + +simple: out/simple.vmgs snp + +%.vmgs: %.bin + rm -f $@ + # du -BM returns the size of the bin file in M, eg 7M. The sed command replaces the M with *1024*1024 and then bc does the math to convert to bytes + $(PATH_PREFIX)/$(VMGS_TOOL) create --filepath $@ --filesize `du -BM $< | sed "s/M.*/*1024*1024/" | bc` + $(PATH_PREFIX)/$(VMGS_TOOL) write --filepath $@ --datapath $< -i=8 + +# Simplest debug UVM used to test changes to the linux kernel. No dmverity protection. Boots an initramdisk rather than directly booting a vhd disk. +out/simple.bin: out/initrd.img $(PATH_PREFIX)/$(KERNEL_PATH) boot/startup_simple.sh + rm -f $@ + python3 $(PATH_PREFIX)/$(IGVM_TOOL) -o $@ -kernel $(PATH_PREFIX)/$(KERNEL_PATH) -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 rdinit=/startup_simple.sh" -rdinit out/initrd.img -vtl 0 + +ROOTFS_DEVICE:=/dev/sda +VERITY_DEVICE:=/dev/sdb +# Debug build for use with uvmtester. UVM with dm-verity protected vhd disk mounted directly via the kernel command line. Ignores corruption in dm-verity protected disk. (Use dmesg to see if dm-verity is ignoring data corruption.) +out/v2056.bin: out/rootfs.vhd out/rootfs.hash.vhd $(PATH_PREFIX)/$(KERNEL_PATH) out/rootfs.hash.datasectors out/rootfs.hash.datablocksize out/rootfs.hash.hashblocksize out/rootfs.hash.datablocks out/rootfs.hash.rootdigest out/rootfs.hash.salt boot/startup_v2056.sh + rm -f $@ + python3 $(PATH_PREFIX)/$(IGVM_TOOL) -o $@ -kernel $(PATH_PREFIX)/$(KERNEL_PATH) -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(VERITY_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) 0 sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt) 1 ignore_corruption\" init=/startup_v2056.sh" -vtl 0 + +# Full UVM with dm-verity protected vhd disk mounted directly via the kernel command line. +out/kernelinitrd.bin: out/rootfs.vhd out/rootfs.hash.vhd out/rootfs.hash.datasectors out/rootfs.hash.datablocksize out/rootfs.hash.hashblocksize out/rootfs.hash.datablocks out/rootfs.hash.rootdigest out/rootfs.hash.salt $(PATH_PREFIX)/$(KERNEL_PATH) boot/startup.sh + rm -f $@ + python3 $(PATH_PREFIX)/$(IGVM_TOOL) -o $@ -kernel $(PATH_PREFIX)/$(KERNEL_PATH) -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(VERITY_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) 0 sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt)\" init=/startup.sh" -vtl 0 + +# Rule to make a vhd from a file. This is used to create the rootfs.hash.vhd from rootfs.hash. +%.vhd: % bin/cmd/tar2ext4 + ./bin/cmd/tar2ext4 -only-vhd -i $< -o $@ + +# Rule to make a vhd from an ext4 file. This is used to create the rootfs.vhd from rootfs.ext4. +%.vhd: %.ext4 bin/cmd/tar2ext4 + ./bin/cmd/tar2ext4 -only-vhd -i $< -o $@ + +%.hash %.hash.info %.hash.datablocks %.hash.rootdigest %hash.datablocksize %.hash.datasectors %.hash.hashblocksize: %.ext4 %.hash.salt + veritysetup format --no-superblock --salt $(shell cat out/rootfs.hash.salt) $< $*.hash > $*.hash.info + # Retrieve info required by dm-verity at boot time + # Get the blocksize of rootfs + cat $*.hash.info | awk '/^Root hash:/{ print $$3 }' > $*.hash.rootdigest + cat $*.hash.info | awk '/^Salt:/{ print $$2 }' > $*.hash.salt + cat $*.hash.info | awk '/^Data block size:/{ print $$4 }' > $*.hash.datablocksize + cat $*.hash.info | awk '/^Hash block size:/{ print $$4 }' > $*.hash.hashblocksize + cat $*.hash.info | awk '/^Data blocks:/{ print $$3 }' > $*.hash.datablocks + echo $$(( $$(cat $*.hash.datablocks) * $$(cat $*.hash.datablocksize) / 512 )) > $*.hash.datasectors + +out/rootfs.hash.salt: + hexdump -vn32 -e'8/4 "%08X" 1 "\n"' /dev/random > $@ + +out/rootfs.ext4: out/rootfs.tar.gz bin/cmd/tar2ext4 gzip -f -d ./out/rootfs.tar.gz - bin/cmd/tar2ext4 -vhd -i ./out/rootfs.tar -o $@ + ./bin/cmd/tar2ext4 -i ./out/rootfs.tar -o $@ out/rootfs.tar.gz: out/initrd.img rm -rf rootfs-conv @@ -74,6 +134,20 @@ out/delta-dev.tar.gz: out/delta.tar.gz bin/internal/tools/snp-report tar -zcf $@ -C rootfs-dev . rm -rf rootfs-dev +out/delta-snp.tar.gz: out/delta.tar.gz bin/internal/tools/snp-report boot/startup_v2056.sh boot/startup_simple.sh boot/startup.sh + rm -rf rootfs-snp + mkdir rootfs-snp + tar -xzf out/delta.tar.gz -C rootfs-snp + cp boot/startup_v2056.sh rootfs-snp/startup_v2056.sh + cp boot/startup_simple.sh rootfs-snp/startup_simple.sh + cp boot/startup.sh rootfs-snp/startup.sh + cp bin/internal/tools/snp-report rootfs-snp/bin/ + chmod a+x rootfs-snp/startup_v2056.sh + chmod a+x rootfs-snp/startup_simple.sh + chmod a+x rootfs-snp/startup.sh + tar -zcf $@ -C rootfs-snp . + rm -rf rootfs-snp + out/delta.tar.gz: bin/init bin/vsockexec bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths Makefile @mkdir -p out rm -rf rootfs @@ -94,7 +168,10 @@ out/delta.tar.gz: bin/init bin/vsockexec bin/cmd/gcs bin/cmd/gcstools bin/cmd/ho tar -zcf $@ -C rootfs . rm -rf rootfs -bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths bin/cmd/tar2ext4 bin/internal/tools/snp-report: +out/containerd-shim-runhcs-v1.exe: + GOOS=windows $(GO_BUILD) -o $@ $(SRCROOT)/cmd/containerd-shim-runhcs-v1 + +bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths bin/cmd/tar2ext4 bin/internal/tools/snp-report bin/cmd/dmverity-vhd: @mkdir -p $(dir $@) GOOS=linux $(GO_BUILD) -o $@ $(SRCROOT)/$(@:bin/%=%) @@ -108,4 +185,4 @@ bin/init: init/init.o vsockexec/vsock.o %.o: %.c @mkdir -p $(dir $@) - $(CC) $(CFLAGS) $(CPPFLAGS) -c -o $@ $< + $(CC) $(CFLAGS) $(CPPFLAGS) -c -o $@ $< \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/README.md b/vendor/github.com/Microsoft/hcsshim/README.md index 5a136153..32043804 100644 --- a/vendor/github.com/Microsoft/hcsshim/README.md +++ b/vendor/github.com/Microsoft/hcsshim/README.md @@ -9,15 +9,18 @@ It is primarily used in the [Moby](https://github.com/moby/moby) and [Containerd ## Building While this repository can be used as a library of sorts to call the HCS apis, there are a couple binaries built out of the repository as well. The main ones being the Linux guest agent, and an implementation of the [runtime v2 containerd shim api](https://github.com/containerd/containerd/blob/master/runtime/v2/README.md). + ### Linux Hyper-V Container Guest Agent To build the Linux guest agent itself all that's needed is to set your GOOS to "Linux" and build out of ./cmd/gcs. + ```powershell C:\> $env:GOOS="linux" C:\> go build .\cmd\gcs\ ``` or on a Linux machine + ```sh > go build ./cmd/gcs ``` @@ -33,13 +36,15 @@ make all ``` If the build is successful, in the `./out` folder you should see: + ```sh > ls ./out/ delta.tar.gz initrd.img rootfs.tar.gz ``` ### Containerd Shim -For info on the Runtime V2 API: https://github.com/containerd/containerd/blob/master/runtime/v2/README.md. + +For info on the [Runtime V2 API](https://github.com/containerd/containerd/blob/master/runtime/v2/README.md). Contrary to the typical Linux architecture of shim -> runc, the runhcs shim is used both to launch and manage the lifetime of containers. @@ -48,7 +53,9 @@ C:\> $env:GOOS="windows" C:\> go build .\cmd\containerd-shim-runhcs-v1 ``` -Then place the binary in the same directory that Containerd is located at in your environment. A default Containerd configuration file can be generated by running: +Then place the binary in the same directory that Containerd is located at in your environment. +A default Containerd configuration file can be generated by running: + ```powershell .\containerd.exe config default | Out-File "C:\Program Files\containerd\config.toml" -Encoding ascii ``` @@ -56,6 +63,7 @@ Then place the binary in the same directory that Containerd is located at in you This config file will already have the shim set as the default runtime for cri interactions. To trial using the shim out with ctr.exe: + ```powershell C:\> ctr.exe run --runtime io.containerd.runhcs.v1 --rm mcr.microsoft.com/windows/nanoserver:2004 windows-test cmd /c "echo Hello World!" ``` @@ -64,16 +72,69 @@ C:\> ctr.exe run --runtime io.containerd.runhcs.v1 --rm mcr.microsoft.com/window This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us -the rights to use your contribution. For details, visit https://cla.microsoft.com. +the rights to use your contribution. For details, visit [Microsoft CLA](https://cla.microsoft.com). When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. -We also require that contributors [sign their commits](https://git-scm.com/docs/git-commit) using `git commit -s` or `git commit --signoff` to -certify they either authored the work themselves or otherwise have permission to use it in this project. Please see https://developercertificate.org/ for -more info, as well as to make sure that you can attest to the rules listed. Our CI uses the [DCO Github app](https://github.com/apps/dco) to ensure -that all commits in a given PR are signed-off. +We require that contributors sign their commits +to certify they either authored the work themselves or otherwise have permission to use it in this project. + +We also require that contributors sign their commits using using [`git commit --signoff`][git-commit-s] +to certify they either authored the work themselves or otherwise have permission to use it in this project. +A range of commits can be signed off using [`git rebase --signoff`][git-rebase-s]. + +Please see [the developer certificate](https://developercertificate.org) for more info, +as well as to make sure that you can attest to the rules listed. +Our CI uses the [DCO Github app](https://github.com/apps/dco) to ensure that all commits in a given PR are signed-off. + +### Linting + +Code must pass a linting stage, which uses [`golangci-lint`][lint]. +Since `./test` is a separate Go module, the linter is run from both the root and the +`test` directories. Additionally, the linter is run with `GOOS` set to both `windows` and +`linux`. + +The linting settings are stored in [`.golangci.yaml`](./.golangci.yaml), and can be run +automatically with VSCode by adding the following to your workspace or folder settings: + +```json + "go.lintTool": "golangci-lint", + "go.lintOnSave": "package", +``` + +Additional editor [integrations options are also available][lint-ide]. + +Alternatively, `golangci-lint` can be [installed][lint-install] and run locally: + +```shell +# use . or specify a path to only lint a package +# to show all lint errors, use flags "--max-issues-per-linter=0 --max-same-issues=0" +> golangci-lint run +``` + +To run across the entire repo for both `GOOS=windows` and `linux`: + +```powershell +> foreach ( $goos in ('windows', 'linux') ) { + foreach ( $repo in ('.', 'test') ) { + pwsh -Command "cd $repo && go env -w GOOS=$goos && golangci-lint.exe run --verbose" + } +} +``` + +### Go Generate + +The pipeline checks that auto-generated code, via `go generate`, are up to date. +Similar to the [linting stage](#linting), `go generate` is run in both the root and test Go modules. + +This can be done via: + +```shell +> go generate ./... +> cd test && go generate ./... +``` ## Code of Conduct @@ -83,7 +144,7 @@ contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additio ## Dependencies -This project requires Golang 1.17 or newer to build. +This project requires Golang 1.18 or newer to build. For system requirements to run this project, see the Microsoft docs on [Windows Container requirements](https://docs.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/system-requirements). @@ -100,3 +161,10 @@ For additional details, see [Report a Computer Security Vulnerability](https://t --------------- Copyright (c) 2018 Microsoft Corp. All rights reserved. + +[lint]: https://golangci-lint.run/ +[lint-ide]: https://golangci-lint.run/usage/integrations/#editor-integration +[lint-install]: https://golangci-lint.run/usage/install/#local-installation + +[git-commit-s]: https://git-scm.com/docs/git-commit#Documentation/git-commit.txt--s +[git-rebase-s]: https://git-scm.com/docs/git-rebase#Documentation/git-rebase.txt---signoff diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go b/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go index 54c4b3bc..301a1088 100644 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go @@ -38,3 +38,31 @@ func AttachLayerStorageFilter(ctx context.Context, layerPath string, layerData L } return nil } + +// AttachOverlayFilter sets up a filter of the given type on a writable container layer. Currently the only +// supported filter types are WCIFS & UnionFS (defined in internal/hcs/schema2/layer.go) +// +// `volumePath` is volume path at which writable layer is mounted. If the +// path does not end in a `\` the platform will append it automatically. +// +// `layerData` is the parent read-only layer data. +func AttachOverlayFilter(ctx context.Context, volumePath string, layerData LayerData) (err error) { + title := "hcsshim::AttachOverlayFilter" + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("volumePath", volumePath), + ) + + bytes, err := json.Marshal(layerData) + if err != nil { + return err + } + + err = hcsAttachOverlayFilter(volumePath, string(bytes)) + if err != nil { + return errors.Wrap(err, "failed to attach overlay filter") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go b/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go index daf1bfff..6e00e4a1 100644 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go @@ -4,7 +4,9 @@ package computestorage import ( "context" + "encoding/json" + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" "github.com/Microsoft/hcsshim/internal/oc" "github.com/pkg/errors" "go.opencensus.io/trace" @@ -26,3 +28,27 @@ func DetachLayerStorageFilter(ctx context.Context, layerPath string) (err error) } return nil } + +// DetachOverlayFilter detaches the filter on a writable container layer. +// +// `volumePath` is a path to writable container volume. +func DetachOverlayFilter(ctx context.Context, volumePath string, filterType hcsschema.FileSystemFilterType) (err error) { + title := "hcsshim::DetachOverlayFilter" + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("volumePath", volumePath)) + + layerData := LayerData{} + layerData.FilterType = filterType + bytes, err := json.Marshal(layerData) + if err != nil { + return err + } + + err = hcsDetachOverlayFilter(volumePath, string(bytes)) + if err != nil { + return errors.Wrap(err, "failed to detach overlay filter") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go b/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go index c38d3aa5..5af931f2 100644 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go @@ -19,14 +19,17 @@ import ( //sys hcsFormatWritableLayerVhd(handle windows.Handle) (hr error) = computestorage.HcsFormatWritableLayerVhd? //sys hcsGetLayerVhdMountPath(vhdHandle windows.Handle, mountPath **uint16) (hr error) = computestorage.HcsGetLayerVhdMountPath? //sys hcsSetupBaseOSVolume(layerPath string, volumePath string, options string) (hr error) = computestorage.HcsSetupBaseOSVolume? +//sys hcsAttachOverlayFilter(volumePath string, layerData string) (hr error) = computestorage.HcsAttachOverlayFilter? +//sys hcsDetachOverlayFilter(volumePath string, layerData string) (hr error) = computestorage.HcsDetachOverlayFilter? type Version = hcsschema.Version type Layer = hcsschema.Layer // LayerData is the data used to describe parent layer information. type LayerData struct { - SchemaVersion Version `json:"SchemaVersion,omitempty"` - Layers []Layer `json:"Layers,omitempty"` + SchemaVersion Version `json:"SchemaVersion,omitempty"` + Layers []Layer `json:"Layers,omitempty"` + FilterType hcsschema.FileSystemFilterType `json:"FilterType,omitempty"` } // ExportLayerOptions are the set of options that are used with the `computestorage.HcsExportLayer` syscall. diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go index b996b35e..53d0beb8 100644 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go @@ -43,8 +43,10 @@ var ( modcomputestorage = windows.NewLazySystemDLL("computestorage.dll") procHcsAttachLayerStorageFilter = modcomputestorage.NewProc("HcsAttachLayerStorageFilter") + procHcsAttachOverlayFilter = modcomputestorage.NewProc("HcsAttachOverlayFilter") procHcsDestroyLayer = modcomputestorage.NewProc("HcsDestroyLayer") procHcsDetachLayerStorageFilter = modcomputestorage.NewProc("HcsDetachLayerStorageFilter") + procHcsDetachOverlayFilter = modcomputestorage.NewProc("HcsDetachOverlayFilter") procHcsExportLayer = modcomputestorage.NewProc("HcsExportLayer") procHcsFormatWritableLayerVhd = modcomputestorage.NewProc("HcsFormatWritableLayerVhd") procHcsGetLayerVhdMountPath = modcomputestorage.NewProc("HcsGetLayerVhdMountPath") @@ -83,6 +85,35 @@ func _hcsAttachLayerStorageFilter(layerPath *uint16, layerData *uint16) (hr erro return } +func hcsAttachOverlayFilter(volumePath string, layerData string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(volumePath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(layerData) + if hr != nil { + return + } + return _hcsAttachOverlayFilter(_p0, _p1) +} + +func _hcsAttachOverlayFilter(volumePath *uint16, layerData *uint16) (hr error) { + hr = procHcsAttachOverlayFilter.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsAttachOverlayFilter.Addr(), 2, uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(layerData)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + func hcsDestroyLayer(layerPath string) (hr error) { var _p0 *uint16 _p0, hr = syscall.UTF16PtrFromString(layerPath) @@ -131,6 +162,35 @@ func _hcsDetachLayerStorageFilter(layerPath *uint16) (hr error) { return } +func hcsDetachOverlayFilter(volumePath string, layerData string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(volumePath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(layerData) + if hr != nil { + return + } + return _hcsDetachOverlayFilter(_p0, _p1) +} + +func _hcsDetachOverlayFilter(volumePath *uint16, layerData *uint16) (hr error) { + hr = procHcsDetachOverlayFilter.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsDetachOverlayFilter.Addr(), 2, uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(layerData)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + func hcsExportLayer(layerPath string, exportFolderPath string, layerData string, options string) (hr error) { var _p0 *uint16 _p0, hr = syscall.UTF16PtrFromString(layerPath) diff --git a/vendor/github.com/Microsoft/hcsshim/container.go b/vendor/github.com/Microsoft/hcsshim/container.go index c8f09f88..0ad7f495 100644 --- a/vendor/github.com/Microsoft/hcsshim/container.go +++ b/vendor/github.com/Microsoft/hcsshim/container.go @@ -75,7 +75,7 @@ func init() { func CreateContainer(id string, c *ContainerConfig) (Container, error) { fullConfig, err := mergemaps.MergeJSON(c, createContainerAdditionalJSON) if err != nil { - return nil, fmt.Errorf("failed to merge additional JSON '%s': %s", createContainerAdditionalJSON, err) + return nil, fmt.Errorf("failed to merge additional JSON '%s': %w", createContainerAdditionalJSON, err) } system, err := hcs.CreateComputeSystem(context.Background(), id, fullConfig) diff --git a/vendor/github.com/Microsoft/hcsshim/errors.go b/vendor/github.com/Microsoft/hcsshim/errors.go index 594bbfb7..b441b0cd 100644 --- a/vendor/github.com/Microsoft/hcsshim/errors.go +++ b/vendor/github.com/Microsoft/hcsshim/errors.go @@ -115,6 +115,7 @@ func (e *ContainerError) Error() string { s += " encountered an error during " + e.Operation } + //nolint:errorlint // legacy code switch e.Err.(type) { case nil: break @@ -145,6 +146,7 @@ func (e *ProcessError) Error() string { s += " encountered an error during " + e.Operation } + //nolint:errorlint // legacy code switch e.Err.(type) { case nil: break @@ -166,10 +168,10 @@ func (e *ProcessError) Error() string { // already exited, or does not exist. Both IsAlreadyStopped and IsNotExist // will currently return true when the error is ErrElementNotFound. func IsNotExist(err error) bool { - if _, ok := err.(EndpointNotFoundError); ok { + if _, ok := err.(EndpointNotFoundError); ok { //nolint:errorlint // legacy code return true } - if _, ok := err.(NetworkNotFoundError); ok { + if _, ok := err.(NetworkNotFoundError); ok { //nolint:errorlint // legacy code return true } return hcs.IsNotExist(getInnerError(err)) @@ -224,6 +226,7 @@ func IsAccessIsDenied(err error) bool { } func getInnerError(err error) error { + //nolint:errorlint // legacy code switch pe := err.(type) { case nil: return nil @@ -236,14 +239,14 @@ func getInnerError(err error) error { } func convertSystemError(err error, c *container) error { - if serr, ok := err.(*hcs.SystemError); ok { + if serr, ok := err.(*hcs.SystemError); ok { //nolint:errorlint // legacy code return &ContainerError{Container: c, Operation: serr.Op, Err: serr.Err, Events: serr.Events} } return err } func convertProcessError(err error, p *process) error { - if perr, ok := err.(*hcs.ProcessError); ok { + if perr, ok := err.(*hcs.ProcessError); ok { //nolint:errorlint // legacy code return &ProcessError{Process: p, Operation: perr.Op, Err: perr.Err, Events: perr.Events} } return err diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go index 37afbf69..8ef611d6 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go @@ -63,7 +63,7 @@ func (process *Process) SystemID() string { } func (process *Process) processSignalResult(ctx context.Context, err error) (bool, error) { - switch err { + switch err { //nolint:errorlint case nil: return true, nil case ErrVmcomputeOperationInvalidState, ErrComputeSystemDoesNotExist, ErrElementNotFound: diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go index 176c49d4..cb8dea08 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go @@ -9,6 +9,13 @@ package hcsschema +type FileSystemFilterType string + +const ( + UnionFS FileSystemFilterType = "UnionFS" + WCIFS FileSystemFilterType = "WCIFS" +) + type Layer struct { Id string `json:"Id,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_hive.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_hive.go new file mode 100644 index 00000000..e7b605fd --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_hive.go @@ -0,0 +1,13 @@ +package hcsschema + +// NOTE: manually added + +type RegistryHive string + +// List of RegistryHive +const ( + RegistryHive_SYSTEM RegistryHive = "System" + RegistryHive_SOFTWARE RegistryHive = "Software" + RegistryHive_SECURITY RegistryHive = "Security" + RegistryHive_SAM RegistryHive = "Sam" +) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go index 26fde99c..1883444a 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go @@ -10,7 +10,7 @@ package hcsschema type RegistryKey struct { - Hive string `json:"Hive,omitempty"` + Hive RegistryHive `json:"Hive,omitempty"` Name string `json:"Name,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go index 3f203176..13f24d53 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go @@ -14,7 +14,7 @@ type RegistryValue struct { Name string `json:"Name,omitempty"` - Type_ string `json:"Type,omitempty"` + Type_ RegistryValueType `json:"Type,omitempty"` // One and only one value type must be set. StringValue string `json:"StringValue,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value_type.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value_type.go new file mode 100644 index 00000000..c8b4f6c9 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value_type.go @@ -0,0 +1,17 @@ +package hcsschema + +// NOTE: manually added + +type RegistryValueType string + +// List of RegistryValueType +const ( + RegistryValueType_NONE RegistryValueType = "None" + RegistryValueType_STRING RegistryValueType = "String" + RegistryValueType_EXPANDED_STRING RegistryValueType = "ExpandedString" + RegistryValueType_MULTI_STRING RegistryValueType = "MultiString" + RegistryValueType_BINARY RegistryValueType = "Binary" + RegistryValueType_D_WORD RegistryValueType = "DWord" + RegistryValueType_Q_WORD RegistryValueType = "QWord" + RegistryValueType_CUSTOM_TYPE RegistryValueType = "CustomType" +) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go index cf1db7da..81d60ed4 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go @@ -97,7 +97,7 @@ func CreateComputeSystem(ctx context.Context, id string, hcsDocumentInterface in events, err := processAsyncHcsResult(ctx, createError, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemCreateCompleted, &timeout.SystemCreate) if err != nil { - if err == ErrTimeout { + if errors.Is(err, ErrTimeout) { // Terminate the compute system if it still exists. We're okay to // ignore a failure here. _ = computeSystem.Terminate(ctx) @@ -238,7 +238,7 @@ func (computeSystem *System) Shutdown(ctx context.Context) error { resultJSON, err := vmcompute.HcsShutdownComputeSystem(ctx, computeSystem.handle, "") events := processHcsResult(ctx, resultJSON) - switch err { + switch err { //nolint:errorlint case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending: default: return makeSystemError(computeSystem, operation, err, events) @@ -259,7 +259,7 @@ func (computeSystem *System) Terminate(ctx context.Context) error { resultJSON, err := vmcompute.HcsTerminateComputeSystem(ctx, computeSystem.handle, "") events := processHcsResult(ctx, resultJSON) - switch err { + switch err { //nolint:errorlint case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending: default: return makeSystemError(computeSystem, operation, err, events) @@ -279,7 +279,7 @@ func (computeSystem *System) waitBackground() { span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) err := waitForNotification(ctx, computeSystem.callbackNumber, hcsNotificationSystemExited, nil) - switch err { + switch err { //nolint:errorlint case nil: log.G(ctx).Debug("system exited") case ErrVmcomputeUnexpectedExit: diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go index 0a8f36d8..e61dc8de 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go @@ -31,7 +31,7 @@ func hnsCallRawResponse(method, path, request string) (*hnsResponse, error) { func hnsCall(method, path, request string, returnResponse interface{}) error { hnsresponse, err := hnsCallRawResponse(method, path, request) if err != nil { - return fmt.Errorf("failed during hnsCallRawResponse: %v", err) + return fmt.Errorf("failed during hnsCallRawResponse: %w", err) } if !hnsresponse.Success { return fmt.Errorf("hns failed with error : %s", hnsresponse.Error) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go index 749588ad..a64b6792 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go @@ -56,7 +56,7 @@ func issueNamespaceRequest(id *string, method, subpath string, request interface if strings.Contains(err.Error(), "Element not found.") { return nil, os.ErrNotExist } - return nil, fmt.Errorf("%s %s: %s", method, hnspath, err) + return nil, fmt.Errorf("%s %s: %w", method, hnspath, err) } return &ns, err } @@ -86,7 +86,7 @@ func GetNamespaceEndpoints(id string) ([]string, error) { var endpoint namespaceEndpointRequest err = json.Unmarshal(rsrc.Data, &endpoint) if err != nil { - return nil, fmt.Errorf("unmarshal endpoint: %s", err) + return nil, fmt.Errorf("unmarshal endpoint: %w", err) } endpoints = append(endpoints, endpoint.ID) } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go index bcca84b0..eae3cc50 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go @@ -4,6 +4,7 @@ package jobobject import ( "context" + "errors" "fmt" "sync" "unsafe" @@ -59,7 +60,7 @@ func pollIOCP(ctx context.Context, iocpHandle windows.Handle) { }).Warn("failed to parse job object message") continue } - if err := msq.Enqueue(notification); err == queue.ErrQueueClosed { + if err := msq.Enqueue(notification); errors.Is(err, queue.ErrQueueClosed) { // Write will only return an error when the queue is closed. // The only time a queue would ever be closed is when we call `Close` on // the job it belongs to which also removes it from the jobMap, so something diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go index 4a224fbe..10ae4d67 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go @@ -374,7 +374,7 @@ func (job *JobObject) Pids() ([]uint32, error) { return []uint32{}, nil } - if err != winapi.ERROR_MORE_DATA { + if err != winapi.ERROR_MORE_DATA { //nolint:errorlint return nil, fmt.Errorf("failed initial query for PIDs in job object: %w", err) } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go index 03f71d9a..e3b1a1ed 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go @@ -143,6 +143,13 @@ func (job *JobObject) SetCPUAffinity(affinityBitMask uint64) error { return err } info.BasicLimitInformation.LimitFlags |= uint32(windows.JOB_OBJECT_LIMIT_AFFINITY) + + // We really, really shouldn't be running on 32 bit, but just in case (and to satisfy CodeQL) ... + const maxUintptr = ^uintptr(0) + if affinityBitMask > uint64(maxUintptr) { + return fmt.Errorf("affinity bitmask (%d) exceeds max allowable value (%d)", affinityBitMask, maxUintptr) + } + info.BasicLimitInformation.Affinity = uintptr(affinityBitMask) return job.setExtendedInformation(info) } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/format.go b/vendor/github.com/Microsoft/hcsshim/internal/log/format.go index 6d69c15b..1ceb26ba 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/log/format.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/log/format.go @@ -104,6 +104,7 @@ func encode(v interface{}) (_ []byte, err error) { if jErr := enc.Encode(v); jErr != nil { if err != nil { // TODO (go1.20): use multierror via fmt.Errorf("...: %w; ...: %w", ...) + //nolint:errorlint // non-wrapping format verb for fmt.Errorf return nil, fmt.Errorf("protojson encoding: %v; json encoding: %w", err, jErr) } return nil, fmt.Errorf("json encoding: %w", jErr) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go b/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go index 3e175e52..cceb3e2d 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go @@ -46,6 +46,7 @@ const ( ExpectedType = "expected-type" Bool = "bool" + Int32 = "int32" Uint32 = "uint32" Uint64 = "uint64" diff --git a/vendor/github.com/Microsoft/hcsshim/internal/memory/pool.go b/vendor/github.com/Microsoft/hcsshim/internal/memory/pool.go index 1ef5814d..6d39ca3b 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/memory/pool.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/memory/pool.go @@ -126,7 +126,7 @@ func (pa *PoolAllocator) Allocate(size uint64) (MappedRegion, error) { // this means that there are no more regions for the current class, try expanding if nextCls != memCls { if err := pa.split(memCls); err != nil { - if err == ErrInvalidMemoryClass { + if errors.Is(err, ErrInvalidMemoryClass) { return nil, ErrNotEnoughSpace } return nil, err @@ -147,7 +147,7 @@ func (pa *PoolAllocator) Allocate(size uint64) (MappedRegion, error) { } // Release marks a memory region of class `memCls` and offset `offset` as free and tries to merge smaller regions into -// a bigger one +// a bigger one. func (pa *PoolAllocator) Release(reg MappedRegion) error { mp := pa.pools[reg.Type()] if mp == nil { @@ -164,7 +164,7 @@ func (pa *PoolAllocator) Release(reg MappedRegion) error { return ErrNotAllocated } if err := pa.merge(n.parent); err != nil { - if err != ErrEarlyMerge { + if !errors.Is(err, ErrEarlyMerge) { return err } } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go b/vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go index 71df25b8..8c41a366 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go @@ -6,7 +6,7 @@ import ( "net" "os" - "github.com/containerd/containerd/errdefs" + "github.com/containerd/errdefs" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) @@ -16,7 +16,7 @@ import ( func toStatusCode(err error) codes.Code { // checks if err implements GRPCStatus() *"google.golang.org/grpc/status".Status, - // wraps an error defined in "github.com/containerd/containerd/errdefs", or is a + // wraps an error defined in "github.com/containerd/errdefs", or is a // context timeout or cancelled error if s, ok := status.FromError(errdefs.ToGRPC(err)); ok { return s.Code() diff --git a/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go b/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go index 70368533..b087b987 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go @@ -243,7 +243,7 @@ func RemoveRelative(path string, root *os.File) error { if err == nil { defer f.Close() err = deleteOnClose(f) - if err == syscall.ERROR_ACCESS_DENIED { + if err == syscall.ERROR_ACCESS_DENIED { //nolint:errorlint // Maybe the file is marked readonly. Clear the bit and retry. _ = clearReadOnly(f) err = deleteOnClose(f) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go index 79b14ef9..67ca897c 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go @@ -104,7 +104,7 @@ func execute(ctx gcontext.Context, timeout time.Duration, f func() error) error }() select { case <-ctx.Done(): - if ctx.Err() == gcontext.DeadlineExceeded { + if ctx.Err() == gcontext.DeadlineExceeded { //nolint:errorlint log.G(ctx).WithField(logfields.Timeout, trueTimeout). Warning("Syscall did not complete within operation timeout. This may indicate a platform issue. " + "If it appears to be making no forward progress, obtain the stacks and see if there is a syscall " + @@ -150,7 +150,7 @@ func HcsCreateComputeSystem(ctx gcontext.Context, id string, configuration strin if result != "" { span.AddAttributes(trace.StringAttribute("result", result)) } - if hr != errVmcomputeOperationPending { + if hr != errVmcomputeOperationPending { //nolint:errorlint // explicitly returned oc.SetSpanStatus(span, hr) } }() @@ -205,7 +205,7 @@ func HcsStartComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, option if result != "" { span.AddAttributes(trace.StringAttribute("result", result)) } - if hr != errVmcomputeOperationPending { + if hr != errVmcomputeOperationPending { //nolint:errorlint // explicitly returned oc.SetSpanStatus(span, hr) } }() @@ -228,7 +228,7 @@ func HcsShutdownComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, opt if result != "" { span.AddAttributes(trace.StringAttribute("result", result)) } - if hr != errVmcomputeOperationPending { + if hr != errVmcomputeOperationPending { //nolint:errorlint // explicitly returned oc.SetSpanStatus(span, hr) } }() @@ -251,7 +251,7 @@ func HcsTerminateComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, op if result != "" { span.AddAttributes(trace.StringAttribute("result", result)) } - if hr != errVmcomputeOperationPending { + if hr != errVmcomputeOperationPending { //nolint:errorlint // explicitly returned oc.SetSpanStatus(span, hr) } }() @@ -274,7 +274,7 @@ func HcsPauseComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, option if result != "" { span.AddAttributes(trace.StringAttribute("result", result)) } - if hr != errVmcomputeOperationPending { + if hr != errVmcomputeOperationPending { //nolint:errorlint // explicitly returned oc.SetSpanStatus(span, hr) } }() @@ -297,7 +297,7 @@ func HcsResumeComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, optio if result != "" { span.AddAttributes(trace.StringAttribute("result", result)) } - if hr != errVmcomputeOperationPending { + if hr != errVmcomputeOperationPending { //nolint:errorlint // explicitly returned oc.SetSpanStatus(span, hr) } }() @@ -621,7 +621,7 @@ func HcsSaveComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options if result != "" { span.AddAttributes(trace.StringAttribute("result", result)) } - if hr != errVmcomputeOperationPending { + if hr != errVmcomputeOperationPending { //nolint:errorlint // explicitly returned oc.SetSpanStatus(span, hr) } }() diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayerreader.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayerreader.go index 792f13f5..807b7de1 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayerreader.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayerreader.go @@ -1,3 +1,5 @@ +//go:build windows + package wclayer import ( @@ -64,7 +66,7 @@ func (r *baseLayerReader) walkUntilCancelled() error { return nil }) - if err == errorIterationCanceled { + if err == errorIterationCanceled { //nolint:errorlint // explicitly returned return nil } @@ -103,7 +105,7 @@ func (r *baseLayerReader) walkUntilCancelled() error { return nil }) - if err == errorIterationCanceled { + if err == errorIterationCanceled { //nolint:errorlint // explicitly returned return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/converttobaselayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/converttobaselayer.go index c542f556..d25c3c52 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/converttobaselayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/converttobaselayer.go @@ -1,3 +1,5 @@ +//go:build windows + package wclayer import ( diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go index e2ec27ad..35fcbedb 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go @@ -11,7 +11,6 @@ import ( "github.com/Microsoft/hcsshim/internal/hcserror" "github.com/Microsoft/hcsshim/internal/oc" - "github.com/Microsoft/hcsshim/osversion" "go.opencensus.io/trace" ) @@ -30,14 +29,17 @@ func ExpandScratchSize(ctx context.Context, path string, size uint64) (err error return hcserror.New(err, title, "") } - // Manually expand the volume now in order to work around bugs in 19H1 and - // prerelease versions of Vb. Remove once this is fixed in Windows. - if build := osversion.Build(); build >= osversion.V19H1 && build < 19020 { - err = expandSandboxVolume(ctx, path) - if err != nil { - return err - } + // Always expand the volume too. In case of legacy layers not expanding the volume here works because + // the PrepareLayer call internally handles the expansion. However, in other cases (like CimFS) we + // don't call PrepareLayer and so the volume will never be expanded. This also means in case of + // legacy layers, we might have a small perf hit because the VHD is mounted twice for expansion (once + // here and once during the PrepareLayer call). But as long as the perf hit is minimal, we should be + // okay. + err = expandSandboxVolume(ctx, path) + if err != nil { + return err } + return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go index 807d8331..fc12eeba 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go @@ -154,7 +154,7 @@ func (r *legacyLayerReader) walkUntilCancelled() error { } return nil }) - if err == errorIterationCanceled { + if err == errorIterationCanceled { //nolint:errorlint // explicitly returned return nil } if err == nil { @@ -196,7 +196,7 @@ func findBackupStreamSize(r io.Reader) (int64, error) { for { hdr, err := br.Next() if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { err = nil } return 0, err @@ -428,7 +428,7 @@ func (w *legacyLayerWriter) initUtilityVM() error { // immutable. err = cloneTree(w.parentRoots[0], w.destRoot, UtilityVMFilesPath, mutatedUtilityVMFiles) if err != nil { - return fmt.Errorf("cloning the parent utility VM image failed: %s", err) + return fmt.Errorf("cloning the parent utility VM image failed: %w", err) } w.HasUtilityVM = true } @@ -451,7 +451,7 @@ func (w *legacyLayerWriter) reset() error { for { bhdr, err := br.Next() - if err == io.EOF { + if errors.Is(err, io.EOF) { // end of backupstream data break } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/cimfs.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/cimfs.go index d04bffc1..21664577 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/cimfs.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/cimfs.go @@ -1,3 +1,5 @@ +//go:build windows + package winapi import ( @@ -34,7 +36,7 @@ type CimFsFileMetadata struct { //sys CimDismountImage(volumeID *g) (hr error) = cimfs.CimDismountImage? //sys CimCreateImage(imagePath string, oldFSName *uint16, newFSName *uint16, cimFSHandle *FsHandle) (hr error) = cimfs.CimCreateImage? -//sys CimCloseImage(cimFSHandle FsHandle) (hr error) = cimfs.CimCloseImage? +//sys CimCloseImage(cimFSHandle FsHandle) = cimfs.CimCloseImage? //sys CimCommitImage(cimFSHandle FsHandle) (hr error) = cimfs.CimCommitImage? //sys CimCreateFile(cimFSHandle FsHandle, path string, file *CimFsFileMetadata, cimStreamHandle *StreamHandle) (hr error) = cimfs.CimCreateFile? diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go index b5b9fccc..ffd3cd7f 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go @@ -184,18 +184,12 @@ func _CMLocateDevNode(pdnDevInst *uint32, pDeviceID *uint16, uFlags uint32) (hr return } -func CimCloseImage(cimFSHandle FsHandle) (hr error) { - hr = procCimCloseImage.Find() - if hr != nil { +func CimCloseImage(cimFSHandle FsHandle) (err error) { + err = procCimCloseImage.Find() + if err != nil { return } - r0, _, _ := syscall.Syscall(procCimCloseImage.Addr(), 1, uintptr(cimFSHandle), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } + syscall.Syscall(procCimCloseImage.Addr(), 1, uintptr(cimFSHandle), 0, 0) return } diff --git a/vendor/github.com/containerd/errdefs/LICENSE b/vendor/github.com/containerd/errdefs/LICENSE new file mode 100644 index 00000000..584149b6 --- /dev/null +++ b/vendor/github.com/containerd/errdefs/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright The containerd Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/errdefs/README.md b/vendor/github.com/containerd/errdefs/README.md new file mode 100644 index 00000000..bd418c63 --- /dev/null +++ b/vendor/github.com/containerd/errdefs/README.md @@ -0,0 +1,13 @@ +# errdefs + +A Go package for defining and checking common containerd errors. + +## Project details + +**errdefs** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). +As a containerd sub-project, you will find the: + * [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md), + * [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS), + * and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md) + +information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/vendor/github.com/containerd/errdefs/errors.go b/vendor/github.com/containerd/errdefs/errors.go new file mode 100644 index 00000000..87622559 --- /dev/null +++ b/vendor/github.com/containerd/errdefs/errors.go @@ -0,0 +1,92 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package errdefs defines the common errors used throughout containerd +// packages. +// +// Use with fmt.Errorf to add context to an error. +// +// To detect an error class, use the IsXXX functions to tell whether an error +// is of a certain type. +// +// The functions ToGRPC and FromGRPC can be used to map server-side and +// client-side errors to the correct types. +package errdefs + +import ( + "context" + "errors" +) + +// Definitions of common error types used throughout containerd. All containerd +// errors returned by most packages will map into one of these errors classes. +// Packages should return errors of these types when they want to instruct a +// client to take a particular action. +// +// For the most part, we just try to provide local grpc errors. Most conditions +// map very well to those defined by grpc. +var ( + ErrUnknown = errors.New("unknown") // used internally to represent a missed mapping. + ErrInvalidArgument = errors.New("invalid argument") + ErrNotFound = errors.New("not found") + ErrAlreadyExists = errors.New("already exists") + ErrFailedPrecondition = errors.New("failed precondition") + ErrUnavailable = errors.New("unavailable") + ErrNotImplemented = errors.New("not implemented") // represents not supported and unimplemented +) + +// IsInvalidArgument returns true if the error is due to an invalid argument +func IsInvalidArgument(err error) bool { + return errors.Is(err, ErrInvalidArgument) +} + +// IsNotFound returns true if the error is due to a missing object +func IsNotFound(err error) bool { + return errors.Is(err, ErrNotFound) +} + +// IsAlreadyExists returns true if the error is due to an already existing +// metadata item +func IsAlreadyExists(err error) bool { + return errors.Is(err, ErrAlreadyExists) +} + +// IsFailedPrecondition returns true if an operation could not proceed to the +// lack of a particular condition +func IsFailedPrecondition(err error) bool { + return errors.Is(err, ErrFailedPrecondition) +} + +// IsUnavailable returns true if the error is due to a resource being unavailable +func IsUnavailable(err error) bool { + return errors.Is(err, ErrUnavailable) +} + +// IsNotImplemented returns true if the error is due to not being implemented +func IsNotImplemented(err error) bool { + return errors.Is(err, ErrNotImplemented) +} + +// IsCanceled returns true if the error is due to `context.Canceled`. +func IsCanceled(err error) bool { + return errors.Is(err, context.Canceled) +} + +// IsDeadlineExceeded returns true if the error is due to +// `context.DeadlineExceeded`. +func IsDeadlineExceeded(err error) bool { + return errors.Is(err, context.DeadlineExceeded) +} diff --git a/vendor/github.com/containerd/errdefs/grpc.go b/vendor/github.com/containerd/errdefs/grpc.go new file mode 100644 index 00000000..7a9b33e0 --- /dev/null +++ b/vendor/github.com/containerd/errdefs/grpc.go @@ -0,0 +1,147 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package errdefs + +import ( + "context" + "fmt" + "strings" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// ToGRPC will attempt to map the backend containerd error into a grpc error, +// using the original error message as a description. +// +// Further information may be extracted from certain errors depending on their +// type. +// +// If the error is unmapped, the original error will be returned to be handled +// by the regular grpc error handling stack. +func ToGRPC(err error) error { + if err == nil { + return nil + } + + if isGRPCError(err) { + // error has already been mapped to grpc + return err + } + + switch { + case IsInvalidArgument(err): + return status.Errorf(codes.InvalidArgument, err.Error()) + case IsNotFound(err): + return status.Errorf(codes.NotFound, err.Error()) + case IsAlreadyExists(err): + return status.Errorf(codes.AlreadyExists, err.Error()) + case IsFailedPrecondition(err): + return status.Errorf(codes.FailedPrecondition, err.Error()) + case IsUnavailable(err): + return status.Errorf(codes.Unavailable, err.Error()) + case IsNotImplemented(err): + return status.Errorf(codes.Unimplemented, err.Error()) + case IsCanceled(err): + return status.Errorf(codes.Canceled, err.Error()) + case IsDeadlineExceeded(err): + return status.Errorf(codes.DeadlineExceeded, err.Error()) + } + + return err +} + +// ToGRPCf maps the error to grpc error codes, assembling the formatting string +// and combining it with the target error string. +// +// This is equivalent to errdefs.ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)) +func ToGRPCf(err error, format string, args ...interface{}) error { + return ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)) +} + +// FromGRPC returns the underlying error from a grpc service based on the grpc error code +func FromGRPC(err error) error { + if err == nil { + return nil + } + + var cls error // divide these into error classes, becomes the cause + + switch code(err) { + case codes.InvalidArgument: + cls = ErrInvalidArgument + case codes.AlreadyExists: + cls = ErrAlreadyExists + case codes.NotFound: + cls = ErrNotFound + case codes.Unavailable: + cls = ErrUnavailable + case codes.FailedPrecondition: + cls = ErrFailedPrecondition + case codes.Unimplemented: + cls = ErrNotImplemented + case codes.Canceled: + cls = context.Canceled + case codes.DeadlineExceeded: + cls = context.DeadlineExceeded + default: + cls = ErrUnknown + } + + msg := rebaseMessage(cls, err) + if msg != "" { + err = fmt.Errorf("%s: %w", msg, cls) + } else { + err = cls + } + + return err +} + +// rebaseMessage removes the repeats for an error at the end of an error +// string. This will happen when taking an error over grpc then remapping it. +// +// Effectively, we just remove the string of cls from the end of err if it +// appears there. +func rebaseMessage(cls error, err error) string { + desc := errDesc(err) + clss := cls.Error() + if desc == clss { + return "" + } + + return strings.TrimSuffix(desc, ": "+clss) +} + +func isGRPCError(err error) bool { + _, ok := status.FromError(err) + return ok +} + +func code(err error) codes.Code { + if s, ok := status.FromError(err); ok { + return s.Code() + } + return codes.Unknown +} + +func errDesc(err error) string { + if s, ok := status.FromError(err); ok { + return s.Message() + } + return err.Error() +} diff --git a/vendor/github.com/containers/common/internal/attributedstring/slice.go b/vendor/github.com/containers/common/internal/attributedstring/slice.go index ad4acc5e..298d468d 100644 --- a/vendor/github.com/containers/common/internal/attributedstring/slice.go +++ b/vendor/github.com/containers/common/internal/attributedstring/slice.go @@ -42,8 +42,8 @@ func (a *Slice) Set(values []string) { } // UnmarshalTOML is the custom unmarshal method for Slice. -func (a *Slice) UnmarshalTOML(data interface{}) error { - iFaceSlice, ok := data.([]interface{}) +func (a *Slice) UnmarshalTOML(data any) error { + iFaceSlice, ok := data.([]any) if !ok { return fmt.Errorf("unable to cast to interface array: %v", data) } @@ -53,7 +53,7 @@ func (a *Slice) UnmarshalTOML(data interface{}) error { switch val := x.(type) { case string: // Strings are directly appended to the slice. loadedStrings = append(loadedStrings, val) - case map[string]interface{}: // The attribute struct is represented as a map. + case map[string]any: // The attribute struct is represented as a map. for k, v := range val { // Iterate over all _supported_ keys. switch k { case "append": @@ -81,16 +81,15 @@ func (a *Slice) UnmarshalTOML(data interface{}) error { // MarshalTOML is the custom marshal method for Slice. func (a *Slice) MarshalTOML() ([]byte, error) { - iFaceSlice := make([]interface{}, 0, len(a.Values)) + iFaceSlice := make([]any, 0, len(a.Values)) for _, x := range a.Values { iFaceSlice = append(iFaceSlice, x) } if a.Attributes.Append != nil { - Attributes := make(map[string]any) - Attributes["append"] = *a.Attributes.Append - iFaceSlice = append(iFaceSlice, Attributes) + attributes := map[string]any{"append": *a.Attributes.Append} + iFaceSlice = append(iFaceSlice, attributes) } buf := new(bytes.Buffer) diff --git a/vendor/github.com/containers/common/internal/deepcopy.go b/vendor/github.com/containers/common/internal/deepcopy.go new file mode 100644 index 00000000..157f6ee4 --- /dev/null +++ b/vendor/github.com/containers/common/internal/deepcopy.go @@ -0,0 +1,29 @@ +package internal + +import ( + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" +) + +// DeepCopyDescriptor copies a Descriptor, deeply copying its contents +func DeepCopyDescriptor(original *v1.Descriptor) *v1.Descriptor { + tmp := *original + if original.URLs != nil { + tmp.URLs = slices.Clone(original.URLs) + } + if original.Annotations != nil { + tmp.Annotations = maps.Clone(original.Annotations) + } + if original.Data != nil { + tmp.Data = slices.Clone(original.Data) + } + if original.Platform != nil { + tmpPlatform := *original.Platform + if original.Platform.OSFeatures != nil { + tmpPlatform.OSFeatures = slices.Clone(original.Platform.OSFeatures) + } + tmp.Platform = &tmpPlatform + } + return &tmp +} diff --git a/vendor/github.com/containers/common/libimage/copier.go b/vendor/github.com/containers/common/libimage/copier.go index d6acc732..ebd76b38 100644 --- a/vendor/github.com/containers/common/libimage/copier.go +++ b/vendor/github.com/containers/common/libimage/copier.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libimage @@ -364,11 +363,13 @@ func (c *copier) copy(ctx context.Context, source, destination types.ImageRefere defer cancel() defer timer.Stop() - fmt.Fprintf(c.imageCopyOptions.ReportWriter, - "Pulling image %s inside systemd: setting pull timeout to %s\n", - source.StringWithinTransport(), - time.Duration(numExtensions)*extension, - ) + if c.imageCopyOptions.ReportWriter != nil { + fmt.Fprintf(c.imageCopyOptions.ReportWriter, + "Pulling image %s inside systemd: setting pull timeout to %s\n", + source.StringWithinTransport(), + time.Duration(numExtensions)*extension, + ) + } // From `man systemd.service(5)`: // @@ -431,12 +432,12 @@ func (c *copier) copy(ctx context.Context, source, destination types.ImageRefere // Sanity checks for Buildah. if sourceInsecure != nil && *sourceInsecure { if c.systemContext.DockerInsecureSkipTLSVerify == types.OptionalBoolFalse { - return nil, fmt.Errorf("can't require tls verification on an insecured registry") + return nil, errors.New("can't require tls verification on an insecured registry") } } if destinationInsecure != nil && *destinationInsecure { if c.systemContext.DockerInsecureSkipTLSVerify == types.OptionalBoolFalse { - return nil, fmt.Errorf("can't require tls verification on an insecured registry") + return nil, errors.New("can't require tls verification on an insecured registry") } } @@ -516,8 +517,8 @@ func checkRegistrySourcesAllows(dest types.ImageReference) (insecure *bool, err return nil, fmt.Errorf("registry %q denied by policy: not in allowed registries list (%s)", reference.Domain(dref), registrySources) } - for _, inseureDomain := range sources.InsecureRegistries { - if inseureDomain == reference.Domain(dref) { + for _, insecureDomain := range sources.InsecureRegistries { + if insecureDomain == reference.Domain(dref) { insecure := true return &insecure, nil } diff --git a/vendor/github.com/containers/common/libimage/define/manifests.go b/vendor/github.com/containers/common/libimage/define/manifests.go index 1e02984b..c59a58f7 100644 --- a/vendor/github.com/containers/common/libimage/define/manifests.go +++ b/vendor/github.com/containers/common/libimage/define/manifests.go @@ -4,24 +4,28 @@ import ( "github.com/containers/image/v5/manifest" ) -// ManifestListDescriptor references a platform-specific manifest. -// Contains exclusive field like `annotations` which is only present in -// OCI spec and not in docker image spec. +// ManifestListDescriptor describes a manifest that is mentioned in an +// image index or manifest list. +// Contains a subset of the fields which are present in both the OCI spec and +// the Docker spec, along with some which are unique to one or the other. type ManifestListDescriptor struct { manifest.Schema2Descriptor - Platform manifest.Schema2PlatformSpec `json:"platform"` - // Annotations contains arbitrary metadata for the image index. - Annotations map[string]string `json:"annotations,omitempty"` + Platform manifest.Schema2PlatformSpec `json:"platform,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` + ArtifactType string `json:"artifactType,omitempty"` + Data []byte `json:"data,omitempty"` + Files []string `json:"files,omitempty"` } // ManifestListData is a list of platform-specific manifests, specifically used to // generate output struct for `podman manifest inspect`. Reason for maintaining and -// having this type is to ensure we can have a common type which contains exclusive +// having this type is to ensure we can have a single type which contains exclusive // fields from both Docker manifest format and OCI manifest format. type ManifestListData struct { SchemaVersion int `json:"schemaVersion"` MediaType string `json:"mediaType"` + ArtifactType string `json:"artifactType,omitempty"` Manifests []ManifestListDescriptor `json:"manifests"` - // Annotations contains arbitrary metadata for the image index. - Annotations map[string]string `json:"annotations,omitempty"` + Subject *ManifestListDescriptor `json:"subject,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` } diff --git a/vendor/github.com/containers/common/libimage/disk_usage.go b/vendor/github.com/containers/common/libimage/disk_usage.go index 765b0df8..6264b25e 100644 --- a/vendor/github.com/containers/common/libimage/disk_usage.go +++ b/vendor/github.com/containers/common/libimage/disk_usage.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libimage @@ -37,7 +36,7 @@ func (r *Runtime) DiskUsage(ctx context.Context) ([]ImageDiskUsage, int64, error return nil, -1, err } - layerTree, err := r.layerTree(images) + layerTree, err := r.layerTree(ctx, images) if err != nil { return nil, -1, err } @@ -80,7 +79,7 @@ func (r *Runtime) DiskUsage(ctx context.Context) ([]ImageDiskUsage, int64, error // diskUsageForImage returns the disk-usage baseistics for the specified image. func diskUsageForImage(ctx context.Context, image *Image, tree *layerTree) ([]ImageDiskUsage, error) { - if err := image.isCorrupted(""); err != nil { + if err := image.isCorrupted(ctx, ""); err != nil { return nil, err } diff --git a/vendor/github.com/containers/common/libimage/events.go b/vendor/github.com/containers/common/libimage/events.go index 5d82efa6..5071d254 100644 --- a/vendor/github.com/containers/common/libimage/events.go +++ b/vendor/github.com/containers/common/libimage/events.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libimage @@ -19,6 +18,8 @@ const ( EventTypeUnknown EventType = iota // EventTypeImagePull represents an image pull. EventTypeImagePull + // EventTypeImagePullError represents an image pull failed. + EventTypeImagePullError // EventTypeImagePush represents an image push. EventTypeImagePush // EventTypeImageRemove represents an image removal. @@ -47,6 +48,8 @@ type Event struct { Time time.Time // Type of the event. Type EventType + // Error in case of failure. + Error error } // writeEvent writes the specified event to the Runtime's event channel. The diff --git a/vendor/github.com/containers/common/libimage/filters.go b/vendor/github.com/containers/common/libimage/filters.go index b51853af..f0cf2e5b 100644 --- a/vendor/github.com/containers/common/libimage/filters.go +++ b/vendor/github.com/containers/common/libimage/filters.go @@ -1,10 +1,10 @@ //go:build !remote -// +build !remote package libimage import ( "context" + "errors" "fmt" "path" "strconv" @@ -21,33 +21,28 @@ import ( // indicates that the image matches the criteria. type filterFunc func(*Image) (bool, error) -// Apply the specified filters. At least one filter of each key must apply. -func (i *Image) applyFilters(filters map[string][]filterFunc) (bool, error) { - matches := false - for key := range filters { // and - matches = false - for _, filter := range filters[key] { // or - var err error - matches, err = filter(i) +// Apply the specified filters. All filters of each key must apply. +func (i *Image) applyFilters(ctx context.Context, filters map[string][]filterFunc) (bool, error) { + for key := range filters { + for _, filter := range filters[key] { + matches, err := filter(i) if err != nil { // Some images may have been corrupted in the // meantime, so do an extra check and make the // error non-fatal (see containers/podman/issues/12582). - if errCorrupted := i.isCorrupted(""); errCorrupted != nil { + if errCorrupted := i.isCorrupted(ctx, ""); errCorrupted != nil { logrus.Errorf(errCorrupted.Error()) return false, nil } return false, err } - if matches { - break + // If any filter within a group doesn't match, return false + if !matches { + return false, nil } } - if !matches { - return false, nil - } } - return matches, nil + return true, nil } // filterImages returns a slice of images which are passing all specified @@ -63,7 +58,7 @@ func (r *Runtime) filterImages(ctx context.Context, images []*Image, options *Li } result := []*Image{} for i := range images { - match, err := images[i].applyFilters(filters) + match, err := images[i].applyFilters(ctx, filters) if err != nil { return nil, err } @@ -84,7 +79,7 @@ func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOp var tree *layerTree getTree := func() (*layerTree, error) { if tree == nil { - t, err := r.layerTree(nil) + t, err := r.layerTree(ctx, nil) if err != nil { return nil, err } @@ -93,6 +88,7 @@ func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOp return tree, nil } + var wantedReferenceMatches, unwantedReferenceMatches []string filters := map[string][]filterFunc{} duplicate := map[string]string{} for _, f := range options.Filters { @@ -184,7 +180,12 @@ func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOp filter = filterManifest(ctx, manifest) case "reference": - filter = filterReferences(r, value) + if negate { + unwantedReferenceMatches = append(unwantedReferenceMatches, value) + } else { + wantedReferenceMatches = append(wantedReferenceMatches, value) + } + continue case "until": until, err := r.until(value) @@ -202,6 +203,11 @@ func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOp filters[key] = append(filters[key], filter) } + // reference filters is a special case as it does an OR for positive matches + // and an AND logic for negative matches + filter := filterReferences(r, wantedReferenceMatches, unwantedReferenceMatches) + filters["reference"] = append(filters["reference"], filter) + return filters, nil } @@ -221,7 +227,7 @@ func (r *Runtime) containers(duplicate map[string]string, key, value string, ext case "false", "true": case "external": if externalFunc == nil { - return fmt.Errorf("libimage error: external containers filter without callback") + return errors.New("libimage error: external containers filter without callback") } default: return fmt.Errorf("unsupported value %q for containers filter", value) @@ -273,55 +279,97 @@ func filterManifest(ctx context.Context, value bool) filterFunc { } } -// filterReferences creates a reference filter for matching the specified value. -func filterReferences(r *Runtime, value string) filterFunc { - lookedUp, _, _ := r.LookupImage(value, nil) +// filterReferences creates a reference filter for matching the specified wantedReferenceMatches value (OR logic) +// and for matching the unwantedReferenceMatches values (AND logic) +func filterReferences(r *Runtime, wantedReferenceMatches, unwantedReferenceMatches []string) filterFunc { return func(img *Image) (bool, error) { - if lookedUp != nil { - if lookedUp.ID() == img.ID() { + // Empty reference filters, return true + if len(wantedReferenceMatches) == 0 && len(unwantedReferenceMatches) == 0 { + return true, nil + } + + unwantedMatched := false + // Go through the unwanted matches first + for _, value := range unwantedReferenceMatches { + matches, err := imageMatchesReferenceFilter(r, img, value) + if err != nil { + return false, err + } + if matches { + unwantedMatched = true + } + } + + // If there are no wanted match filters, then return false for the image + // that matched the unwanted value otherwise return true + if len(wantedReferenceMatches) == 0 { + return !unwantedMatched, nil + } + + // Go through the wanted matches + // If an image matches the wanted filter but it also matches the unwanted + // filter, don't add it to the output + for _, value := range wantedReferenceMatches { + matches, err := imageMatchesReferenceFilter(r, img, value) + if err != nil { + return false, err + } + if matches && !unwantedMatched { return true, nil } } - refs, err := img.NamesReferences() - if err != nil { - return false, err + return false, nil + } +} + +// imageMatchesReferenceFilter returns true if an image matches the filter value given +func imageMatchesReferenceFilter(r *Runtime, img *Image, value string) (bool, error) { + lookedUp, _, _ := r.LookupImage(value, nil) + if lookedUp != nil { + if lookedUp.ID() == img.ID() { + return true, nil } + } - for _, ref := range refs { - refString := ref.String() // FQN with tag/digest - candidates := []string{refString} + refs, err := img.NamesReferences() + if err != nil { + return false, err + } - // Split the reference into 3 components (twice if digested/tagged): - // 1) Fully-qualified reference - // 2) Without domain - // 3) Without domain and path - if named, isNamed := ref.(reference.Named); isNamed { + for _, ref := range refs { + refString := ref.String() // FQN with tag/digest + candidates := []string{refString} + + // Split the reference into 3 components (twice if digested/tagged): + // 1) Fully-qualified reference + // 2) Without domain + // 3) Without domain and path + if named, isNamed := ref.(reference.Named); isNamed { + candidates = append(candidates, + reference.Path(named), // path/name without tag/digest (Path() removes it) + refString[strings.LastIndex(refString, "/")+1:]) // name with tag/digest + + trimmedString := reference.TrimNamed(named).String() + if refString != trimmedString { + tagOrDigest := refString[len(trimmedString):] candidates = append(candidates, - reference.Path(named), // path/name without tag/digest (Path() removes it) - refString[strings.LastIndex(refString, "/")+1:]) // name with tag/digest - - trimmedString := reference.TrimNamed(named).String() - if refString != trimmedString { - tagOrDigest := refString[len(trimmedString):] - candidates = append(candidates, - trimmedString, // FQN without tag/digest - reference.Path(named)+tagOrDigest, // path/name with tag/digest - trimmedString[strings.LastIndex(trimmedString, "/")+1:]) // name without tag/digest - } + trimmedString, // FQN without tag/digest + reference.Path(named)+tagOrDigest, // path/name with tag/digest + trimmedString[strings.LastIndex(trimmedString, "/")+1:]) // name without tag/digest } + } - for _, candidate := range candidates { - // path.Match() is also used by Docker's reference.FamiliarMatch(). - matched, _ := path.Match(value, candidate) - if matched { - return true, nil - } + for _, candidate := range candidates { + // path.Match() is also used by Docker's reference.FamiliarMatch(). + matched, _ := path.Match(value, candidate) + if matched { + return true, nil } } - - return false, nil } + + return false, nil } // filterLabel creates a label for matching the specified value. diff --git a/vendor/github.com/containers/common/libimage/history.go b/vendor/github.com/containers/common/libimage/history.go index ccd81096..56f84e37 100644 --- a/vendor/github.com/containers/common/libimage/history.go +++ b/vendor/github.com/containers/common/libimage/history.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libimage @@ -26,7 +25,7 @@ func (i *Image) History(ctx context.Context) ([]ImageHistory, error) { return nil, err } - layerTree, err := i.runtime.layerTree(nil) + layerTree, err := i.runtime.layerTree(ctx, nil) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/common/libimage/image.go b/vendor/github.com/containers/common/libimage/image.go index 4d106d42..9cc77cdb 100644 --- a/vendor/github.com/containers/common/libimage/image.go +++ b/vendor/github.com/containers/common/libimage/image.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libimage @@ -67,7 +66,7 @@ type Image struct { } } -// reload the image and pessimitically clear all cached data. +// reload the image and pessimistically clear all cached data. func (i *Image) reload() error { logrus.Tracef("Reloading image %s", i.ID()) img, err := i.runtime.store.Image(i.ID()) @@ -85,7 +84,7 @@ func (i *Image) reload() error { } // isCorrupted returns an error if the image may be corrupted. -func (i *Image) isCorrupted(name string) error { +func (i *Image) isCorrupted(ctx context.Context, name string) error { // If it's a manifest list, we're good for now. if _, err := i.getManifestList(); err == nil { return nil @@ -96,7 +95,7 @@ func (i *Image) isCorrupted(name string) error { return err } - img, err := ref.NewImage(context.Background(), nil) + img, err := ref.NewImage(ctx, nil) if err != nil { if name == "" { name = i.ID()[:12] @@ -258,7 +257,7 @@ func (i *Image) TopLayer() string { // Parent returns the parent image or nil if there is none func (i *Image) Parent(ctx context.Context) (*Image, error) { - tree, err := i.runtime.layerTree(nil) + tree, err := i.runtime.layerTree(ctx, nil) if err != nil { return nil, err } @@ -292,7 +291,7 @@ func (i *Image) Children(ctx context.Context) ([]*Image, error) { // created for this invocation only. func (i *Image) getChildren(ctx context.Context, all bool, tree *layerTree) ([]*Image, error) { if tree == nil { - t, err := i.runtime.layerTree(nil) + t, err := i.runtime.layerTree(ctx, nil) if err != nil { return nil, err } @@ -611,7 +610,7 @@ func (i *Image) Untag(name string) error { } // FIXME: this is breaking Podman CI but must be re-enabled once - // c/storage supports alterting the digests of an image. Then, + // c/storage supports altering the digests of an image. Then, // Podman will do the right thing. // // !!! Also make sure to re-enable the tests !!! @@ -1031,7 +1030,7 @@ func getImageID(ctx context.Context, src types.ImageReference, sys *types.System // - 2) a bool indicating whether architecture, os or variant were set (some callers need that to decide whether they need to throw an error) // - 3) a fatal error that occurred prior to check for matches (e.g., storage errors etc.) func (i *Image) matchesPlatform(ctx context.Context, os, arch, variant string) (error, bool, error) { - if err := i.isCorrupted(""); err != nil { + if err := i.isCorrupted(ctx, ""); err != nil { return err, false, nil } inspectInfo, err := i.inspectInfo(ctx) diff --git a/vendor/github.com/containers/common/libimage/image_config.go b/vendor/github.com/containers/common/libimage/image_config.go index 9f5841fe..cd4ed3c4 100644 --- a/vendor/github.com/containers/common/libimage/image_config.go +++ b/vendor/github.com/containers/common/libimage/image_config.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libimage diff --git a/vendor/github.com/containers/common/libimage/image_tree.go b/vendor/github.com/containers/common/libimage/image_tree.go index 8143d377..8b9d1f4c 100644 --- a/vendor/github.com/containers/common/libimage/image_tree.go +++ b/vendor/github.com/containers/common/libimage/image_tree.go @@ -1,9 +1,9 @@ //go:build !remote -// +build !remote package libimage import ( + "context" "fmt" "strings" @@ -38,7 +38,7 @@ func (i *Image) Tree(traverseChildren bool) (string, error) { fmt.Fprintf(sb, "No Image Layers") } - layerTree, err := i.runtime.layerTree(nil) + layerTree, err := i.runtime.layerTree(context.Background(), nil) if err != nil { return "", err } @@ -53,7 +53,7 @@ func (i *Image) Tree(traverseChildren bool) (string, error) { return tree.Print(), nil } - // Walk all layers of the image and assemlbe their data. Note that the + // Walk all layers of the image and assemble their data. Note that the // tree is constructed in reverse order to remain backwards compatible // with Podman. contents := []string{} diff --git a/vendor/github.com/containers/common/libimage/import.go b/vendor/github.com/containers/common/libimage/import.go index 5519f02b..552c48ea 100644 --- a/vendor/github.com/containers/common/libimage/import.go +++ b/vendor/github.com/containers/common/libimage/import.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libimage diff --git a/vendor/github.com/containers/common/libimage/inspect.go b/vendor/github.com/containers/common/libimage/inspect.go index 1003b648..0db94708 100644 --- a/vendor/github.com/containers/common/libimage/inspect.go +++ b/vendor/github.com/containers/common/libimage/inspect.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libimage @@ -180,22 +179,26 @@ func (i *Image) Inspect(ctx context.Context, options *InspectOptions) (*ImageDat } // Docker image - case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema2MediaType: + case manifest.DockerV2Schema2MediaType: rawConfig, err := i.rawConfigBlob(ctx) if err != nil { return nil, err } - var dockerManifest manifest.Schema2V1Image - if err := json.Unmarshal(rawConfig, &dockerManifest); err != nil { + var dockerConfig manifest.Schema2V1Image + if err := json.Unmarshal(rawConfig, &dockerConfig); err != nil { return nil, err } - data.Comment = dockerManifest.Comment + data.Comment = dockerConfig.Comment // NOTE: Health checks may be listed in the container config or // the config. - data.HealthCheck = dockerManifest.ContainerConfig.Healthcheck - if data.HealthCheck == nil && dockerManifest.Config != nil { - data.HealthCheck = dockerManifest.Config.Healthcheck + data.HealthCheck = dockerConfig.ContainerConfig.Healthcheck + if data.HealthCheck == nil && dockerConfig.Config != nil { + data.HealthCheck = dockerConfig.Config.Healthcheck } + + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType: + // There seem to be at least _some_ images with .Healthcheck set in schema1 (possibly just as an artifact + // of testing format conversion?), so this could plausibly read these values. } if data.Annotations == nil { diff --git a/vendor/github.com/containers/common/libimage/layer_tree.go b/vendor/github.com/containers/common/libimage/layer_tree.go index 71eafb0e..66829c46 100644 --- a/vendor/github.com/containers/common/libimage/layer_tree.go +++ b/vendor/github.com/containers/common/libimage/layer_tree.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libimage @@ -92,14 +91,14 @@ func (l *layerNode) repoTags() ([]string, error) { // layerTree extracts a layerTree from the layers in the local storage and // relates them to the specified images. -func (r *Runtime) layerTree(images []*Image) (*layerTree, error) { +func (r *Runtime) layerTree(ctx context.Context, images []*Image) (*layerTree, error) { layers, err := r.store.Layers() if err != nil { return nil, err } if images == nil { - images, err = r.ListImages(context.Background(), nil, nil) + images, err = r.ListImages(ctx, nil, nil) if err != nil { return nil, err } @@ -150,7 +149,9 @@ func (t *layerTree) layersOf(image *Image) []*storage.Layer { var layers []*storage.Layer node := t.node(image.TopLayer()) for node != nil { - layers = append(layers, node.layer) + if node.layer != nil { + layers = append(layers, node.layer) + } node = node.parent } return layers diff --git a/vendor/github.com/containers/common/libimage/load.go b/vendor/github.com/containers/common/libimage/load.go index 36283a99..2be6a1b6 100644 --- a/vendor/github.com/containers/common/libimage/load.go +++ b/vendor/github.com/containers/common/libimage/load.go @@ -1,10 +1,10 @@ //go:build !remote -// +build !remote package libimage import ( "context" + "errors" "fmt" "os" "time" @@ -92,7 +92,7 @@ func (r *Runtime) Load(ctx context.Context, path string, options *LoadOptions) ( // Give a decent error message if nothing above worked. // we want the colon here for the multiline error //nolint:revive - loadError := fmt.Errorf("payload does not match any of the supported image formats:") + loadError := errors.New("payload does not match any of the supported image formats:") for _, err := range loadErrors { loadError = fmt.Errorf("%v\n * %v", loadError, err) } diff --git a/vendor/github.com/containers/common/libimage/manifest_list.go b/vendor/github.com/containers/common/libimage/manifest_list.go index c36bfda9..d7ee5e6b 100644 --- a/vendor/github.com/containers/common/libimage/manifest_list.go +++ b/vendor/github.com/containers/common/libimage/manifest_list.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libimage @@ -19,6 +18,9 @@ import ( "github.com/containers/storage" structcopier "github.com/jinzhu/copier" "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" ) // NOTE: the abstractions and APIs here are a first step to further merge @@ -222,22 +224,78 @@ func (i *Image) IsManifestList(ctx context.Context) (bool, error) { // Inspect returns a dockerized version of the manifest list. func (m *ManifestList) Inspect() (*define.ManifestListData, error) { inspectList := define.ManifestListData{} + // Copy the fields from the Docker-format version of the list. dockerFormat := m.list.Docker() err := structcopier.Copy(&inspectList, &dockerFormat) if err != nil { return &inspectList, err } - // Get missing annotation field from OCIv1 Spec - // and populate inspect data. + // Get OCI-specific fields from the OCIv1-format version of the list + // and copy them to the inspect data. ociFormat := m.list.OCIv1() + inspectList.ArtifactType = ociFormat.ArtifactType inspectList.Annotations = ociFormat.Annotations for i, manifest := range ociFormat.Manifests { inspectList.Manifests[i].Annotations = manifest.Annotations + inspectList.Manifests[i].ArtifactType = manifest.ArtifactType + if manifest.URLs != nil { + inspectList.Manifests[i].URLs = slices.Clone(manifest.URLs) + } + inspectList.Manifests[i].Data = manifest.Data + inspectList.Manifests[i].Files, err = m.list.Files(manifest.Digest) + if err != nil { + return &inspectList, err + } + } + if ociFormat.Subject != nil { + platform := ociFormat.Subject.Platform + if platform == nil { + platform = &imgspecv1.Platform{} + } + var osFeatures []string + if platform.OSFeatures != nil { + osFeatures = slices.Clone(platform.OSFeatures) + } + inspectList.Subject = &define.ManifestListDescriptor{ + Platform: manifest.Schema2PlatformSpec{ + OS: platform.OS, + Architecture: platform.Architecture, + OSVersion: platform.OSVersion, + Variant: platform.Variant, + OSFeatures: osFeatures, + }, + Schema2Descriptor: manifest.Schema2Descriptor{ + MediaType: ociFormat.Subject.MediaType, + Digest: ociFormat.Subject.Digest, + Size: ociFormat.Subject.Size, + URLs: ociFormat.Subject.URLs, + }, + Annotations: ociFormat.Subject.Annotations, + ArtifactType: ociFormat.Subject.ArtifactType, + Data: ociFormat.Subject.Data, + } + } + // Set MediaType to mirror the value we'd use when saving the list + // using defaults, instead of forcing it to one or the other by + // using the value from one version or the other that we explicitly + // requested above. + serialized, err := m.list.Serialize("") + if err != nil { + return &inspectList, err + } + var typed struct { + MediaType string `json:"mediaType,omitempty"` + } + if err := json.Unmarshal(serialized, &typed); err != nil { + return &inspectList, err + } + if typed.MediaType != "" { + inspectList.MediaType = typed.MediaType } return &inspectList, nil } -// Options for adding a manifest list. +// Options for adding an image or artifact to a manifest list. type ManifestListAddOptions struct { // Add all images to the list if the to-be-added image itself is a // manifest list. @@ -314,7 +372,105 @@ func (m *ManifestList) Add(ctx context.Context, name string, options *ManifestLi return newDigest, nil } -// Options for annotationg a manifest list. +// Options for creating an artifact manifest for one or more files and adding +// the artifact manifest to a manifest list. +type ManifestListAddArtifactOptions struct { + // The artifactType to set in the artifact manifest. + Type *string `json:"artifact_type"` + // The mediaType to set in the config.MediaType field in the artifact manifest. + ConfigType string `json:"artifact_config_type"` + // Content to point to from the config field in the artifact manifest. + Config string `json:"artifact_config"` + // The mediaType to set in the layer descriptors in the artifact manifest. + LayerType string `json:"artifact_layer_type"` + // Whether or not to suppress the org.opencontainers.image.title annotation in layer descriptors. + ExcludeTitles bool `json:"exclude_layer_titles"` + // Annotations to set in the artifact manifest. + Annotations map[string]string `json:"annotations"` + // Subject to set in the artifact manifest. + Subject string `json:"subject"` +} + +// Add adds one or more manifests to the manifest list and returns the digest +// of the added instance. +func (m *ManifestList) AddArtifact(ctx context.Context, options *ManifestListAddArtifactOptions, files ...string) (digest.Digest, error) { + if options == nil { + options = &ManifestListAddArtifactOptions{} + } + opts := manifests.AddArtifactOptions{ + ManifestArtifactType: options.Type, + Annotations: maps.Clone(options.Annotations), + ExcludeTitles: options.ExcludeTitles, + } + if options.ConfigType != "" { + opts.ConfigDescriptor = &imgspecv1.Descriptor{ + MediaType: options.ConfigType, + Digest: imgspecv1.DescriptorEmptyJSON.Digest, + Size: imgspecv1.DescriptorEmptyJSON.Size, + Data: slices.Clone(imgspecv1.DescriptorEmptyJSON.Data), + } + } + if options.Config != "" { + if opts.ConfigDescriptor == nil { + opts.ConfigDescriptor = &imgspecv1.Descriptor{ + MediaType: imgspecv1.MediaTypeImageConfig, + } + } + opts.ConfigDescriptor.Digest = digest.FromString(options.Config) + opts.ConfigDescriptor.Size = int64(len(options.Config)) + opts.ConfigDescriptor.Data = slices.Clone([]byte(options.Config)) + } + if opts.ConfigDescriptor == nil { + empty := imgspecv1.DescriptorEmptyJSON + opts.ConfigDescriptor = &empty + } + if options.LayerType != "" { + opts.LayerMediaType = &options.LayerType + } + if options.Subject != "" { + ref, err := alltransports.ParseImageName(options.Subject) + if err != nil { + withDocker := fmt.Sprintf("%s://%s", docker.Transport.Name(), options.Subject) + ref, err = alltransports.ParseImageName(withDocker) + if err != nil { + image, _, err := m.image.runtime.LookupImage(options.Subject, &LookupImageOptions{ManifestList: true}) + if err != nil { + return "", fmt.Errorf("locating subject for artifact manifest: %w", err) + } + ref = image.storageReference + } + } + opts.SubjectReference = ref + } + + // Lock the image record where this list lives. + locker, err := manifests.LockerForImage(m.image.runtime.store, m.ID()) + if err != nil { + return "", err + } + locker.Lock() + defer locker.Unlock() + + systemContext := m.image.runtime.systemContextCopy() + + // Make sure to reload the image from the containers storage to fetch + // the latest data (e.g., new or delete digests). + if err := m.reload(); err != nil { + return "", err + } + newDigest, err := m.list.AddArtifact(ctx, systemContext, opts, files...) + if err != nil { + return "", err + } + + // Write the changes to disk. + if err := m.saveAndReload(); err != nil { + return "", err + } + return newDigest, nil +} + +// Options for annotating a manifest list. type ManifestListAnnotateOptions struct { // Add the specified annotations to the added image. Annotations map[string]string @@ -330,10 +486,16 @@ type ManifestListAnnotateOptions struct { OSVersion string // Add the specified variant to the added image. Variant string + // Add the specified annotations to the index itself. + IndexAnnotations map[string]string + // Set the subject to which the index refers. + Subject string } // Annotate an image instance specified by `d` in the manifest list. func (m *ManifestList) AnnotateInstance(d digest.Digest, options *ManifestListAnnotateOptions) error { + ctx := context.Background() + if options == nil { return nil } @@ -373,6 +535,54 @@ func (m *ManifestList) AnnotateInstance(d digest.Digest, options *ManifestListAn return err } } + if len(options.IndexAnnotations) > 0 { + if err := m.list.SetAnnotations(nil, options.IndexAnnotations); err != nil { + return err + } + } + if options.Subject != "" { + ref, err := alltransports.ParseImageName(options.Subject) + if err != nil { + withDocker := fmt.Sprintf("%s://%s", docker.Transport.Name(), options.Subject) + ref, err = alltransports.ParseImageName(withDocker) + if err != nil { + image, _, err := m.image.runtime.LookupImage(options.Subject, &LookupImageOptions{ManifestList: true}) + if err != nil { + return fmt.Errorf("locating subject for image index: %w", err) + } + ref = image.storageReference + } + } + src, err := ref.NewImageSource(ctx, &m.image.runtime.systemContext) + if err != nil { + return err + } + defer src.Close() + subjectManifestBytes, subjectManifestType, err := src.GetManifest(ctx, nil) + if err != nil { + return err + } + subjectManifestDigest, err := manifest.Digest(subjectManifestBytes) + if err != nil { + return err + } + var subjectArtifactType string + if !manifest.MIMETypeIsMultiImage(subjectManifestType) { + var subjectManifest imgspecv1.Manifest + if json.Unmarshal(subjectManifestBytes, &subjectManifest) == nil { + subjectArtifactType = subjectManifest.ArtifactType + } + } + descriptor := &imgspecv1.Descriptor{ + MediaType: subjectManifestType, + ArtifactType: subjectArtifactType, + Digest: subjectManifestDigest, + Size: int64(len(subjectManifestBytes)), + } + if err := m.list.SetSubject(descriptor); err != nil { + return err + } + } // Write the changes to disk. return m.saveAndReload() diff --git a/vendor/github.com/containers/common/libimage/manifests/manifests.go b/vendor/github.com/containers/common/libimage/manifests/manifests.go index d28ac87b..78349b50 100644 --- a/vendor/github.com/containers/common/libimage/manifests/manifests.go +++ b/vendor/github.com/containers/common/libimage/manifests/manifests.go @@ -1,13 +1,21 @@ package manifests import ( + "bytes" "context" "encoding/json" "errors" "fmt" "io" + "mime" + "net/http" + "os" + "path/filepath" + "strconv" + "strings" "time" + "github.com/containers/common/internal" "github.com/containers/common/pkg/manifests" "github.com/containers/common/pkg/retry" "github.com/containers/common/pkg/supplemented" @@ -15,6 +23,7 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/image" "github.com/containers/image/v5/manifest" + ocilayout "github.com/containers/image/v5/oci/layout" "github.com/containers/image/v5/pkg/compression" "github.com/containers/image/v5/signature" "github.com/containers/image/v5/signature/signer" @@ -23,17 +32,25 @@ import ( "github.com/containers/image/v5/transports/alltransports" "github.com/containers/image/v5/types" "github.com/containers/storage" + "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/lockfile" digest "github.com/opencontainers/go-digest" + imgspec "github.com/opencontainers/image-spec/specs-go" v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" ) const ( defaultMaxRetries = 3 ) -const instancesData = "instances.json" +const ( + instancesData = "instances.json" + artifactsData = "artifacts.json" + pushingArtifactsSubdirectory = "referenced-artifacts" +) // LookupReferenceFunc return an image reference based on the specified one. // The returned reference can return custom ImageSource or ImageDestination @@ -45,9 +62,19 @@ type LookupReferenceFunc func(ref types.ImageReference) (types.ImageReference, e // for a List that has not yet been saved to an image. var ErrListImageUnknown = errors.New("unable to determine which image holds the manifest list") +type artifactsDetails struct { + Manifests map[digest.Digest]string `json:"manifests,omitempty"` // artifact (and other?) manifest digests → manifest contents + Files map[digest.Digest][]string `json:"files,omitempty"` // artifact (and other?) manifest digests → file paths (mainly for display) + Configs map[digest.Digest]digest.Digest `json:"config,omitempty"` // artifact (and other?) manifest digests → referenced config digests + Layers map[digest.Digest][]digest.Digest `json:"layers,omitempty"` // artifact (and other?) manifest digests → referenced layer digests + Detached map[digest.Digest]string `json:"detached,omitempty"` // "config" and "layer" (and other?) digests in (usually artifact) manifests → file paths + Blobs map[digest.Digest][]byte `json:"blobs,omitempty"` // "config" and "layer" (and other?) manifest digests → inlined blob contents +} + type list struct { manifests.List - instances map[digest.Digest]string + instances map[digest.Digest]string // instance manifest digests → image references + artifacts artifactsDetails } // List is a manifest list or image index, either created using Create(), or @@ -58,6 +85,9 @@ type List interface { Reference(store storage.Store, multiple cp.ImageListSelection, instances []digest.Digest) (types.ImageReference, error) Push(ctx context.Context, dest types.ImageReference, options PushOptions) (reference.Canonical, digest.Digest, error) Add(ctx context.Context, sys *types.SystemContext, ref types.ImageReference, all bool) (digest.Digest, error) + AddArtifact(ctx context.Context, sys *types.SystemContext, options AddArtifactOptions, files ...string) (digest.Digest, error) + InstanceByFile(file string) (digest.Digest, error) + Files(instanceDigest digest.Digest) ([]string, error) } // PushOptions includes various settings which are needed for pushing the @@ -93,6 +123,14 @@ func Create() List { return &list{ List: manifests.Create(), instances: make(map[digest.Digest]string), + artifacts: artifactsDetails{ + Manifests: make(map[digest.Digest]string), + Files: make(map[digest.Digest][]string), + Configs: make(map[digest.Digest]digest.Digest), + Layers: make(map[digest.Digest][]digest.Digest), + Detached: make(map[digest.Digest]string), + Blobs: make(map[digest.Digest][]byte), + }, } } @@ -115,6 +153,14 @@ func LoadFromImage(store storage.Store, image string) (string, List, error) { list := &list{ List: manifestList, instances: make(map[digest.Digest]string), + artifacts: artifactsDetails{ + Manifests: make(map[digest.Digest]string), + Files: make(map[digest.Digest][]string), + Configs: make(map[digest.Digest]digest.Digest), + Layers: make(map[digest.Digest][]digest.Digest), + Detached: make(map[digest.Digest]string), + Blobs: make(map[digest.Digest][]byte), + }, } instancesBytes, err := store.ImageBigData(img.ID, instancesData) if err != nil { @@ -123,8 +169,18 @@ func LoadFromImage(store storage.Store, image string) (string, List, error) { if err := json.Unmarshal(instancesBytes, &list.instances); err != nil { return "", nil, fmt.Errorf("decoding instance list for image %q: %w", image, err) } + artifactsBytes, err := store.ImageBigData(img.ID, artifactsData) + if err != nil { + if !errors.Is(err, os.ErrNotExist) { + return "", nil, fmt.Errorf("locating image %q for loading instance list: %w", image, err) + } + artifactsBytes = []byte("{}") + } + if err := json.Unmarshal(artifactsBytes, &list.artifacts); err != nil { + return "", nil, fmt.Errorf("decoding artifact list for image %q: %w", image, err) + } list.instances[""] = img.ID - return img.ID, list, err + return img.ID, list, nil } // SaveToImage saves the manifest list or image index as the manifest of an @@ -140,41 +196,79 @@ func (l *list) SaveToImage(store storage.Store, imageID string, names []string, if err != nil { return "", err } - img, err := store.CreateImage(imageID, names, "", "", &storage.ImageOptions{}) - if err == nil || errors.Is(err, storage.ErrDuplicateID) { - created := (err == nil) - if created { - imageID = img.ID - l.instances[""] = img.ID - } - err := store.SetImageBigData(imageID, storage.ImageDigestManifestBigDataNamePrefix, manifestBytes, manifest.Digest) - if err != nil { - if created { - if _, err2 := store.DeleteImage(img.ID, true); err2 != nil { - logrus.Errorf("Deleting image %q after failing to save manifest for it", img.ID) + artifactsBytes, err := json.Marshal(&l.artifacts) + if err != nil { + return "", err + } + manifestDigest, err := manifest.Digest(manifestBytes) + if err != nil { + return "", err + } + imageOptions := &storage.ImageOptions{ + BigData: []storage.ImageBigDataOption{ + {Key: storage.ImageDigestManifestBigDataNamePrefix, Data: manifestBytes, Digest: manifestDigest}, + {Key: instancesData, Data: instancesBytes}, + {Key: artifactsData, Data: artifactsBytes}, + }, + } + img, err := store.CreateImage(imageID, names, "", "", imageOptions) + if err != nil { + if imageID != "" && errors.Is(err, storage.ErrDuplicateID) { + for _, bd := range imageOptions.BigData { + digester := manifest.Digest + if !strings.HasPrefix(bd.Key, storage.ImageDigestManifestBigDataNamePrefix) { + digester = nil + } + err := store.SetImageBigData(imageID, bd.Key, bd.Data, digester) + if err != nil { + return "", fmt.Errorf("saving manifest list to image %q: %w", imageID, err) } } - return "", fmt.Errorf("saving manifest list to image %q: %w", imageID, err) + return imageID, nil } - err = store.SetImageBigData(imageID, instancesData, instancesBytes, nil) - if err != nil { - if created { - if _, err2 := store.DeleteImage(img.ID, true); err2 != nil { - logrus.Errorf("Deleting image %q after failing to save instance locations for it", img.ID) - } + return "", err + } + l.instances[""] = img.ID + return img.ID, nil +} + +// Files returns the list of files associated with a particular artifact +// instance in the image index, primarily for display purposes. +func (l *list) Files(instanceDigest digest.Digest) ([]string, error) { + filesList, ok := l.artifacts.Files[instanceDigest] + if ok { + return slices.Clone(filesList), nil + } + return nil, nil +} + +// instanceByFile returns the instanceDigest of the first manifest in the index +// which refers to the named file. The name will be passed to filepath.Abs() +// before searching for an instance which references it. +func (l *list) InstanceByFile(file string) (digest.Digest, error) { + if parsedDigest, err := digest.Parse(file); err == nil { + // nice try, but that's already a digest! + return parsedDigest, nil + } + abs, err := filepath.Abs(file) + if err != nil { + return "", err + } + for instanceDigest, files := range l.artifacts.Files { + for _, file := range files { + if file == abs { + return instanceDigest, nil } - return "", fmt.Errorf("saving instance list to image %q: %w", imageID, err) } - return imageID, nil } - return "", fmt.Errorf("creating image to hold manifest list: %w", err) + return "", os.ErrNotExist } // Reference returns an image reference for the composite image being built // in the list, or an error if the list has never been saved to a local image. func (l *list) Reference(store storage.Store, multiple cp.ImageListSelection, instances []digest.Digest) (types.ImageReference, error) { if l.instances[""] == "" { - return nil, fmt.Errorf("building reference to list: %w", ErrListImageUnknown) + return nil, fmt.Errorf("building reference to list, appears to have not been saved first: %w", ErrListImageUnknown) } s, err := is.Transport.ParseStoreReference(store, l.instances[""]) if err != nil { @@ -191,11 +285,100 @@ func (l *list) Reference(store storage.Store, multiple cp.ImageListSelection, in } case cp.CopySpecificImages: for instance := range l.instances { - for _, allowed := range instances { - if instance == allowed { - whichInstances = append(whichInstances, instance) + if slices.Contains(instances, instance) { + whichInstances = append(whichInstances, instance) + } + } + } + if len(l.artifacts.Manifests) > 0 { + img, err := is.Transport.GetImage(s) + if err != nil { + return nil, fmt.Errorf("locating image %s: %w", transports.ImageName(s), err) + } + imgDirectory, err := store.ImageDirectory(img.ID) + if err != nil { + return nil, fmt.Errorf("locating per-image directory for %s: %w", img.ID, err) + } + tmp, err := os.MkdirTemp(imgDirectory, pushingArtifactsSubdirectory) + if err != nil { + return nil, err + } + subdir := 0 + for artifactManifestDigest, contents := range l.artifacts.Manifests { + // create the blobs directory + subdir++ + tmp := filepath.Join(tmp, strconv.Itoa(subdir)) + blobsDir := filepath.Join(tmp, "blobs", artifactManifestDigest.Algorithm().String()) + if err := os.MkdirAll(blobsDir, 0o700); err != nil { + return nil, fmt.Errorf("creating directory for blobs: %w", err) + } + // write the artifact manifest + if err := os.WriteFile(filepath.Join(blobsDir, artifactManifestDigest.Encoded()), []byte(contents), 0o644); err != nil { + return nil, fmt.Errorf("writing artifact manifest as blob: %w", err) + } + // symlink all of the referenced files and write the inlined blobs into the blobs directory + var referencedBlobDigests []digest.Digest + var symlinkedFiles []string + if referencedConfigDigest, ok := l.artifacts.Configs[artifactManifestDigest]; ok { + referencedBlobDigests = append(referencedBlobDigests, referencedConfigDigest) + } + referencedBlobDigests = append(referencedBlobDigests, l.artifacts.Layers[artifactManifestDigest]...) + for _, referencedBlobDigest := range referencedBlobDigests { + referencedFile, knownFile := l.artifacts.Detached[referencedBlobDigest] + referencedBlob, knownBlob := l.artifacts.Blobs[referencedBlobDigest] + if !knownFile && !knownBlob { + return nil, fmt.Errorf(`internal error: no file or blob with artifact "config" or "layer" digest %q recorded`, referencedBlobDigest) + } + expectedLayerBlobPath := filepath.Join(blobsDir, referencedBlobDigest.Encoded()) + if _, err := os.Lstat(expectedLayerBlobPath); err == nil { + // did this one already + continue + } else if knownFile { + if err := os.Symlink(referencedFile, expectedLayerBlobPath); err != nil { + return nil, err + } + symlinkedFiles = append(symlinkedFiles, referencedFile) + } else if knownBlob { + if err := os.WriteFile(expectedLayerBlobPath, referencedBlob, 0o600); err != nil { + return nil, err + } } } + // write the index that refers to this one artifact image + tag := "latest" + indexFile := filepath.Join(tmp, "index.json") + index := v1.Index{ + Versioned: imgspec.Versioned{ + SchemaVersion: 2, + }, + MediaType: v1.MediaTypeImageIndex, + Manifests: []v1.Descriptor{{ + MediaType: v1.MediaTypeImageManifest, + Digest: artifactManifestDigest, + Size: int64(len(contents)), + Annotations: map[string]string{ + v1.AnnotationRefName: tag, + }, + }}, + } + indexBytes, err := json.Marshal(&index) + if err != nil { + return nil, fmt.Errorf("encoding image index for OCI layout: %w", err) + } + if err := os.WriteFile(indexFile, indexBytes, 0o644); err != nil { + return nil, fmt.Errorf("writing image index for OCI layout: %w", err) + } + // write the layout file + layoutFile := filepath.Join(tmp, "oci-layout") + if err := os.WriteFile(layoutFile, []byte(`{"imageLayoutVersion": "1.0.0"}`), 0o644); err != nil { + return nil, fmt.Errorf("writing oci-layout file: %w", err) + } + // build the reference to this artifact image's oci layout + ref, err := ocilayout.NewReference(tmp, tag) + if err != nil { + return nil, fmt.Errorf("creating ImageReference for artifact with files %q: %w", symlinkedFiles, err) + } + references = append(references, ref) } } for _, instance := range whichInstances { @@ -331,6 +514,8 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag OS, Architecture, OSVersion, Variant string Features, OSFeatures, Annotations []string Size int64 + ConfigInfo types.BlobInfo + ArtifactType string } var instanceInfos []instanceInfo var manifestDigest digest.Digest @@ -361,6 +546,7 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag Features: append([]string{}, lists.Docker().Manifests[i].Platform.Features...), OSFeatures: append([]string{}, platform.OSFeatures...), Size: instance.Size, + ArtifactType: instance.ArtifactType, } instanceInfos = append(instanceInfos, instanceInfo) } @@ -391,6 +577,7 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag Features: append([]string{}, lists.Docker().Manifests[i].Platform.Features...), OSFeatures: append([]string{}, platform.OSFeatures...), Size: instance.Size, + ArtifactType: instance.ArtifactType, } instanceInfos = append(instanceInfos, instanceInfo) added = true @@ -406,11 +593,28 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag instanceInfo := instanceInfo{ instanceDigest: nil, } + if primaryManifestType == v1.MediaTypeImageManifest { + if m, err := manifest.OCI1FromManifest(primaryManifestBytes); err == nil { + instanceInfo.ArtifactType = m.ArtifactType + } + } instanceInfos = append(instanceInfos, instanceInfo) } + knownConfigTypes := []string{manifest.DockerV2Schema2ConfigMediaType, v1.MediaTypeImageConfig} for _, instanceInfo := range instanceInfos { - if instanceInfo.OS == "" || instanceInfo.Architecture == "" { + manifestBytes, manifestType, err := src.GetManifest(ctx, instanceInfo.instanceDigest) + if err != nil { + return "", fmt.Errorf("reading manifest from %q, instance %q: %w", transports.ImageName(ref), instanceInfo.instanceDigest, err) + } + instanceManifest, err := manifest.FromBlob(manifestBytes, manifestType) + if err != nil { + return "", fmt.Errorf("parsing manifest from %q, instance %q: %w", transports.ImageName(ref), instanceInfo.instanceDigest, err) + } + instanceInfo.ConfigInfo = instanceManifest.ConfigInfo() + hasPlatformConfig := instanceInfo.ArtifactType == "" && slices.Contains(knownConfigTypes, instanceInfo.ConfigInfo.MediaType) + needToParsePlatformConfig := (instanceInfo.OS == "" || instanceInfo.Architecture == "") + if hasPlatformConfig && needToParsePlatformConfig { img, err := image.FromUnparsedImage(ctx, sys, image.UnparsedInstance(src, instanceInfo.instanceDigest)) if err != nil { return "", fmt.Errorf("reading configuration blob from %q: %w", transports.ImageName(ref), err) @@ -422,17 +626,15 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag if instanceInfo.OS == "" { instanceInfo.OS = config.OS instanceInfo.OSVersion = config.OSVersion - instanceInfo.OSFeatures = config.OSFeatures + if config.OSFeatures != nil { + instanceInfo.OSFeatures = slices.Clone(config.OSFeatures) + } } if instanceInfo.Architecture == "" { instanceInfo.Architecture = config.Architecture instanceInfo.Variant = config.Variant } } - manifestBytes, manifestType, err := src.GetManifest(ctx, instanceInfo.instanceDigest) - if err != nil { - return "", fmt.Errorf("reading manifest from %q, instance %q: %w", transports.ImageName(ref), instanceInfo.instanceDigest, err) - } if instanceInfo.instanceDigest == nil { manifestDigest, err = manifest.Digest(manifestBytes) if err != nil { @@ -455,6 +657,281 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag return manifestDigest, nil } +// AddArtifactOptions contains options which control the contents of the +// artifact manifest that AddArtifact will create and add to the image index. + +// This should provide for all of the ways to construct a manifest outlined in +// https://github.com/opencontainers/image-spec/blob/main/manifest.md#guidelines-for-artifact-usage +// * no blobs → set ManifestArtifactType +// * blobs, no configuration → set ManifestArtifactType and possibly LayerMediaType, and provide file names +// * blobs and configuration → set ManifestArtifactType, possibly LayerMediaType, and ConfigDescriptor, and provide file names +// +// The older style of describing artifacts: +// * leave ManifestArtifactType blank +// * specify a zero-length application/vnd.oci.image.config.v1+json config blob +// * set LayerMediaType to a custom type +// +// When reading data produced elsewhere, note that newer tooling will produce +// manifests with ArtifactType set. If the manifest's ArtifactType is not set, +// consumers should consult the config descriptor's MediaType. +type AddArtifactOptions struct { + ManifestArtifactType *string // overall type of the artifact manifest. default: "application/vnd.unknown.artifact.v1" + Platform v1.Platform // default: add to the index without platform information + ConfigDescriptor *v1.Descriptor // default: a descriptor for an explicitly empty config blob + ConfigFile string // path to config contents, recorded if ConfigDescriptor.Size != 0 and ConfigDescriptor.Data is not set + LayerMediaType *string // default: mime.TypeByExtension() if basename contains ".", else http.DetectContentType() + Annotations map[string]string // optional, default is none + SubjectReference types.ImageReference // optional + ExcludeTitles bool // don't add "org.opencontainers.image.title" annotations set to file base names +} + +// AddArtifact creates an artifact manifest describing the specified file or +// files, then adds them to the specified image index. Returns the +// instanceDigest for the artifact manifest. +// The caller could craft the manifest themselves and use Add() to add it to +// the image index and get the same end-result, but this should save them some +// work. +func (l *list) AddArtifact(ctx context.Context, sys *types.SystemContext, options AddArtifactOptions, files ...string) (digest.Digest, error) { + // If we were given a subject, build a descriptor for it first, since + // it might be remote, and anything else we do before looking at it + // might have to get thrown away if we can't get to it for whatever + // reason. + var subject *v1.Descriptor + if options.SubjectReference != nil { + subjectReference, err := options.SubjectReference.NewImageSource(ctx, sys) + if err != nil { + return "", fmt.Errorf("setting up to read manifest and configuration from subject %q: %w", transports.ImageName(options.SubjectReference), err) + } + defer subjectReference.Close() + subjectManifestBytes, subjectManifestType, err := subjectReference.GetManifest(ctx, nil) + if err != nil { + return "", fmt.Errorf("reading manifest from subject %q: %w", transports.ImageName(options.SubjectReference), err) + } + subjectManifestDigest, err := manifest.Digest(subjectManifestBytes) + if err != nil { + return "", fmt.Errorf("digesting manifest of subject %q: %w", transports.ImageName(options.SubjectReference), err) + } + var subjectArtifactType string + if !manifest.MIMETypeIsMultiImage(subjectManifestType) { + var subjectManifest v1.Manifest + if json.Unmarshal(subjectManifestBytes, &subjectManifest) == nil { + subjectArtifactType = subjectManifest.ArtifactType + } + } + subject = &v1.Descriptor{ + MediaType: subjectManifestType, + ArtifactType: subjectArtifactType, + Digest: subjectManifestDigest, + Size: int64(len(subjectManifestBytes)), + } + } + + // Build up the layers list piece by piece. + var layers []v1.Descriptor + fileDigests := make(map[string]digest.Digest) + + if len(files) == 0 { + // https://github.com/opencontainers/image-spec/blob/main/manifest.md#guidelines-for-artifact-usage + // says that we should have at least one layer listed, even if it's just a placeholder + layers = append(layers, v1.DescriptorEmptyJSON) + } + for _, file := range files { + if err := func() error { + // Open the file so that we can digest it. + absFile, err := filepath.Abs(file) + if err != nil { + return fmt.Errorf("converting %q to an absolute path: %w", file, err) + } + + f, err := os.Open(absFile) + if err != nil { + return fmt.Errorf("reading %q to determine its digest: %w", file, err) + } + defer f.Close() + + // Hang on to a copy of the first 512 bytes, but digest the whole thing. + digester := digest.Canonical.Digester() + writeCounter := ioutils.NewWriteCounter(digester.Hash()) + var detectableData bytes.Buffer + _, err = io.CopyN(writeCounter, io.TeeReader(f, &detectableData), 512) + if err != nil && !errors.Is(err, io.EOF) { + return fmt.Errorf("reading %q to determine its digest: %w", file, err) + } + if err == nil { + if _, err := io.Copy(writeCounter, f); err != nil { + return fmt.Errorf("reading %q to determine its digest: %w", file, err) + } + } + fileDigests[absFile] = digester.Digest() + + // If one wasn't specified, figure out what the MediaType should be. + title := filepath.Base(absFile) + layerMediaType := options.LayerMediaType + if layerMediaType == nil { + if index := strings.LastIndex(title, "."); index != -1 { + // File's basename has an extension, try to use a shortcut. + tmp := mime.TypeByExtension(title[index:]) + if tmp != "" { + layerMediaType = &tmp + } + } + if layerMediaType == nil { + // File's basename has no extension or didn't map to a type, look at the contents we saved. + tmp := http.DetectContentType(detectableData.Bytes()) + layerMediaType = &tmp + } + if layerMediaType != nil { + // Strip off any parameters, since we only want the type name. + if parsedMediaType, _, err := mime.ParseMediaType(*layerMediaType); err == nil { + layerMediaType = &parsedMediaType + } + } + } + + // Build the descriptor for the layer. + descriptor := v1.Descriptor{ + MediaType: *layerMediaType, + Digest: fileDigests[absFile], + Size: writeCounter.Count, + } + // OCI annotations are usually applied at the image manifest as a whole, + // but tools like oras (https://oras.land/) also apply them to blob + // descriptors. AnnotationTitle is used as a suggestion for the name + // to give to a blob if it's being stored as a file, and we default + // to adding one based on its original name. + if !options.ExcludeTitles { + descriptor.Annotations = map[string]string{ + v1.AnnotationTitle: title, + } + } + layers = append(layers, descriptor) + return nil + }(); err != nil { + return "", err + } + } + + // Unless we were told what this is, use the default that ORAS uses. + artifactType := "application/vnd.unknown.artifact.v1" + if options.ManifestArtifactType != nil { + artifactType = *options.ManifestArtifactType + } + + // Unless we were explicitly told otherwise, default to an empty config blob. + configDescriptor := internal.DeepCopyDescriptor(&v1.DescriptorEmptyJSON) + if options.ConfigDescriptor != nil { + configDescriptor = internal.DeepCopyDescriptor(options.ConfigDescriptor) + } + if options.ConfigFile != "" { + if options.ConfigDescriptor == nil { // i.e., we assigned the default mediatype + configDescriptor.MediaType = v1.MediaTypeImageConfig + } + configDescriptor.Data = nil + configDescriptor.Digest = "" // to be figured out below + configDescriptor.Size = -1 // to be figured out below + } + configFilePath := "" + if configDescriptor.Size != 0 { + if len(configDescriptor.Data) == 0 { + if options.ConfigFile == "" { + return "", errors.New("needed config data file, but none was provided") + } + filePath, err := filepath.Abs(options.ConfigFile) + if err != nil { + return "", fmt.Errorf("recording artifact config data file %q: %w", options.ConfigFile, err) + } + digester := digest.Canonical.Digester() + counter := ioutils.NewWriteCounter(digester.Hash()) + if err := func() error { + f, err := os.Open(filePath) + if err != nil { + return fmt.Errorf("reading artifact config data file %q: %w", options.ConfigFile, err) + } + defer f.Close() + if _, err := io.Copy(counter, f); err != nil { + return fmt.Errorf("digesting artifact config data file %q: %w", options.ConfigFile, err) + } + return nil + }(); err != nil { + return "", err + } + configDescriptor.Data = nil + configDescriptor.Size = counter.Count + configDescriptor.Digest = digester.Digest() + configFilePath = filePath + } else { + decoder := bytes.NewReader(configDescriptor.Data) + digester := digest.Canonical.Digester() + counter := ioutils.NewWriteCounter(digester.Hash()) + if _, err := io.Copy(counter, decoder); err != nil { + return "", fmt.Errorf("digesting inlined artifact config data: %w", err) + } + configDescriptor.Size = counter.Count + configDescriptor.Digest = digester.Digest() + } + } else { + configDescriptor.Data = nil + configDescriptor.Digest = digest.Canonical.FromString("") + } + + // Construct the manifest. + artifactManifest := v1.Manifest{ + Versioned: imgspec.Versioned{ + SchemaVersion: 2, + }, + MediaType: v1.MediaTypeImageManifest, + ArtifactType: artifactType, + Config: *configDescriptor, + Layers: layers, + Subject: subject, + } + // Add in annotations, more or less exactly as specified. + if options.Annotations != nil { + artifactManifest.Annotations = maps.Clone(options.Annotations) + } + + // Encode and save the data we care about. + artifactManifestBytes, err := json.Marshal(artifactManifest) + if err != nil { + return "", fmt.Errorf("marshalling the artifact manifest: %w", err) + } + artifactManifestDigest, err := manifest.Digest(artifactManifestBytes) + if err != nil { + return "", fmt.Errorf("digesting the artifact manifest: %w", err) + } + l.artifacts.Manifests[artifactManifestDigest] = string(artifactManifestBytes) + l.artifacts.Layers[artifactManifestDigest] = nil + l.artifacts.Configs[artifactManifestDigest] = artifactManifest.Config.Digest + if configFilePath != "" { + l.artifacts.Detached[artifactManifest.Config.Digest] = configFilePath + l.artifacts.Files[artifactManifestDigest] = append(l.artifacts.Files[artifactManifestDigest], configFilePath) + } else { + l.artifacts.Blobs[artifactManifest.Config.Digest] = slices.Clone(artifactManifest.Config.Data) + } + for filePath, fileDigest := range fileDigests { + l.artifacts.Layers[artifactManifestDigest] = append(l.artifacts.Layers[artifactManifestDigest], fileDigest) + l.artifacts.Detached[fileDigest] = filePath + l.artifacts.Files[artifactManifestDigest] = append(l.artifacts.Files[artifactManifestDigest], filePath) + } + for _, layer := range layers { + if len(layer.Data) != 0 { + l.artifacts.Blobs[layer.Digest] = slices.Clone(layer.Data) + l.artifacts.Layers[artifactManifestDigest] = append(l.artifacts.Layers[artifactManifestDigest], layer.Digest) + } + } + // Add this artifact manifest to the image index. + if err := l.AddInstance(artifactManifestDigest, int64(len(artifactManifestBytes)), artifactManifest.MediaType, options.Platform.OS, options.Platform.Architecture, options.Platform.OSVersion, options.Platform.OSFeatures, options.Platform.Variant, nil, nil); err != nil { + return "", fmt.Errorf("adding artifact manifest for %q to image index: %w", files, err) + } + // Set the artifact type in the image index entry if we have one, since AddInstance() didn't do that for us. + if artifactManifest.ArtifactType != "" { + if err := l.List.SetArtifactType(&artifactManifestDigest, artifactManifest.ArtifactType); err != nil { + return "", fmt.Errorf("adding artifact manifest for %q to image index: %w", files, err) + } + } + return artifactManifestDigest, nil +} + // Remove filters out any instances in the list which match the specified digest. func (l *list) Remove(instanceDigest digest.Digest) error { err := l.List.Remove(instanceDigest) diff --git a/vendor/github.com/containers/common/libimage/normalize.go b/vendor/github.com/containers/common/libimage/normalize.go index 2b340286..b00af66a 100644 --- a/vendor/github.com/containers/common/libimage/normalize.go +++ b/vendor/github.com/containers/common/libimage/normalize.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libimage diff --git a/vendor/github.com/containers/common/libimage/oci.go b/vendor/github.com/containers/common/libimage/oci.go index fcbd10ad..80aefc97 100644 --- a/vendor/github.com/containers/common/libimage/oci.go +++ b/vendor/github.com/containers/common/libimage/oci.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libimage diff --git a/vendor/github.com/containers/common/libimage/platform.go b/vendor/github.com/containers/common/libimage/platform.go index c378bc27..bf8b054b 100644 --- a/vendor/github.com/containers/common/libimage/platform.go +++ b/vendor/github.com/containers/common/libimage/platform.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libimage diff --git a/vendor/github.com/containers/common/libimage/pull.go b/vendor/github.com/containers/common/libimage/pull.go index bc8e8498..6c8d87c5 100644 --- a/vendor/github.com/containers/common/libimage/pull.go +++ b/vendor/github.com/containers/common/libimage/pull.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libimage @@ -31,7 +30,7 @@ import ( "github.com/sirupsen/logrus" ) -// PullOptions allows for custommizing image pulls. +// PullOptions allows for customizing image pulls. type PullOptions struct { CopyOptions @@ -54,13 +53,37 @@ type PullOptions struct { // The error is storage.ErrImageUnknown iff the pull policy is set to "never" // and no local image has been found. This allows for an easier integration // into some users of this package (e.g., Buildah). -func (r *Runtime) Pull(ctx context.Context, name string, pullPolicy config.PullPolicy, options *PullOptions) ([]*Image, error) { +func (r *Runtime) Pull(ctx context.Context, name string, pullPolicy config.PullPolicy, options *PullOptions) (_ []*Image, pullError error) { logrus.Debugf("Pulling image %s (policy: %s)", name, pullPolicy) - + if r.eventChannel != nil { + defer func() { + if pullError != nil { + // Note that we use the input name here to preserve the transport data. + r.writeEvent(&Event{Name: name, Time: time.Now(), Type: EventTypeImagePullError, Error: pullError}) + } + }() + } if options == nil { options = &PullOptions{} } + defaultConfig, err := config.Default() + if err != nil { + return nil, err + } + if options.MaxRetries == nil { + options.MaxRetries = &defaultConfig.Engine.Retry + } + if options.RetryDelay == nil { + if defaultConfig.Engine.RetryDelay != "" { + duration, err := time.ParseDuration(defaultConfig.Engine.RetryDelay) + if err != nil { + return nil, fmt.Errorf("failed to parse containers.conf retry_delay: %w", err) + } + options.RetryDelay = &duration + } + } + var possiblyUnqualifiedName string // used for short-name resolution ref, err := alltransports.ParseImageName(name) if err != nil { @@ -134,28 +157,25 @@ func (r *Runtime) Pull(ctx context.Context, name string, pullPolicy config.PullP options.Variant = r.systemContext.VariantChoice } - var ( - pulledImages []string - pullError error - ) + var pulledImages []string // Dispatch the copy operation. switch ref.Transport().Name() { // DOCKER REGISTRY case registryTransport.Transport.Name(): - pulledImages, pullError = r.copyFromRegistry(ctx, ref, possiblyUnqualifiedName, pullPolicy, options) + pulledImages, err = r.copyFromRegistry(ctx, ref, possiblyUnqualifiedName, pullPolicy, options) // DOCKER ARCHIVE case dockerArchiveTransport.Transport.Name(): - pulledImages, pullError = r.copyFromDockerArchive(ctx, ref, &options.CopyOptions) + pulledImages, err = r.copyFromDockerArchive(ctx, ref, &options.CopyOptions) // ALL OTHER TRANSPORTS default: - pulledImages, pullError = r.copyFromDefault(ctx, ref, &options.CopyOptions) + pulledImages, err = r.copyFromDefault(ctx, ref, &options.CopyOptions) } - if pullError != nil { - return nil, pullError + if err != nil { + return nil, err } localImages := []*Image{} @@ -406,7 +426,7 @@ func (r *Runtime) copyFromRegistry(ctx context.Context, ref types.ImageReference for _, tag := range tags { select { // Let's be gentle with Podman remote. case <-ctx.Done(): - return nil, fmt.Errorf("pulling cancelled") + return nil, errors.New("pulling cancelled") default: // We can continue. } @@ -446,7 +466,7 @@ func (r *Runtime) imagesIDsForManifest(manifestBytes []byte, sys *types.SystemCo } else { d, err := manifest.Digest(manifestBytes) if err != nil { - return nil, fmt.Errorf("digesting manifest") + return nil, errors.New("digesting manifest") } imageDigest = d } @@ -511,7 +531,7 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str // If the local image is corrupted, we need to repull it. if localImage != nil { - if err := localImage.isCorrupted(imageName); err != nil { + if err := localImage.isCorrupted(ctx, imageName); err != nil { logrus.Error(err) localImage = nil } diff --git a/vendor/github.com/containers/common/libimage/push.go b/vendor/github.com/containers/common/libimage/push.go index ed1d90c1..f89b8fc0 100644 --- a/vendor/github.com/containers/common/libimage/push.go +++ b/vendor/github.com/containers/common/libimage/push.go @@ -1,19 +1,22 @@ //go:build !remote -// +build !remote package libimage import ( "context" + "fmt" "time" + "github.com/containers/common/pkg/config" dockerArchiveTransport "github.com/containers/image/v5/docker/archive" + dockerDaemonTransport "github.com/containers/image/v5/docker/daemon" "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/transports/alltransports" "github.com/sirupsen/logrus" ) -// PushOptions allows for custommizing image pushes. +// PushOptions allows for customizing image pushes. type PushOptions struct { CopyOptions } @@ -32,6 +35,23 @@ func (r *Runtime) Push(ctx context.Context, source, destination string, options options = &PushOptions{} } + defaultConfig, err := config.Default() + if err != nil { + return nil, err + } + if options.MaxRetries == nil { + options.MaxRetries = &defaultConfig.Engine.Retry + } + if options.RetryDelay == nil { + if defaultConfig.Engine.RetryDelay != "" { + duration, err := time.ParseDuration(defaultConfig.Engine.RetryDelay) + if err != nil { + return nil, fmt.Errorf("failed to parse containers.conf retry_delay: %w", err) + } + options.RetryDelay = &duration + } + } + // Look up the local image. Note that we need to ignore the platform // and push what the user specified (containers/podman/issues/10344). image, resolvedSource, err := r.LookupImage(source, nil) @@ -66,6 +86,14 @@ func (r *Runtime) Push(ctx context.Context, source, destination string, options destRef = dockerRef } + // docker-archive and only DockerV2Schema2MediaType support Gzip compression + if options.CompressionFormat != nil && + (destRef.Transport().Name() == dockerArchiveTransport.Transport.Name() || + destRef.Transport().Name() == dockerDaemonTransport.Transport.Name() || + options.ManifestMIMEType == manifest.DockerV2Schema2MediaType) { + options.CompressionFormat = nil + } + if r.eventChannel != nil { defer r.writeEvent(&Event{ID: image.ID(), Name: destination, Time: time.Now(), Type: EventTypeImagePush}) } diff --git a/vendor/github.com/containers/common/libimage/runtime.go b/vendor/github.com/containers/common/libimage/runtime.go index 1948fe0a..632f0fcc 100644 --- a/vendor/github.com/containers/common/libimage/runtime.go +++ b/vendor/github.com/containers/common/libimage/runtime.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libimage @@ -162,7 +161,7 @@ func (r *Runtime) storageToImage(storageImage *storage.Image, ref types.ImageRef } } -// Exists returns true if the specicifed image exists in the local containers +// Exists returns true if the specified image exists in the local containers // storage. Note that it may return false if an image corrupted. func (r *Runtime) Exists(name string) (bool, error) { image, _, err := r.LookupImage(name, nil) @@ -172,7 +171,7 @@ func (r *Runtime) Exists(name string) (bool, error) { if image == nil { return false, nil } - if err := image.isCorrupted(name); err != nil { + if err := image.isCorrupted(context.Background(), name); err != nil { logrus.Error(err) return false, nil } @@ -235,8 +234,12 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image, if storageRef.Transport().Name() != storageTransport.Transport.Name() { return nil, "", fmt.Errorf("unsupported transport %q for looking up local images", storageRef.Transport().Name()) } - img, err := storageTransport.Transport.GetStoreImage(r.store, storageRef) + _, img, err := storageTransport.ResolveReference(storageRef) if err != nil { + if errors.Is(err, storageTransport.ErrNoSuchImage) { + // backward compatibility + return nil, "", storage.ErrImageUnknown + } return nil, "", err } logrus.Debugf("Found image %q in local containers storage (%s)", name, storageRef.StringWithinTransport()) @@ -347,9 +350,9 @@ func (r *Runtime) lookupImageInLocalStorage(name, candidate string, namedCandida if err != nil { return nil, err } - img, err = storageTransport.Transport.GetStoreImage(r.store, ref) + _, img, err = storageTransport.ResolveReference(ref) if err != nil { - if errors.Is(err, storage.ErrImageUnknown) { + if errors.Is(err, storageTransport.ErrNoSuchImage) { return nil, nil } return nil, err @@ -605,7 +608,7 @@ func (r *Runtime) ListImages(ctx context.Context, names []string, options *ListI // as the layer tree will computed once for all instead of once for // each individual image (see containers/podman/issues/17828). - tree, err := r.layerTree(images) + tree, err := r.layerTree(ctx, images) if err != nil { return nil, err } @@ -687,7 +690,7 @@ func (r *Runtime) RemoveImages(ctx context.Context, names []string, options *Rem } if options.ExternalContainers && options.IsExternalContainerFunc == nil { - return nil, []error{fmt.Errorf("libimage error: cannot remove external containers without callback")} + return nil, []error{errors.New("libimage error: cannot remove external containers without callback")} } // The logic here may require some explanation. Image removal is diff --git a/vendor/github.com/containers/common/libimage/save.go b/vendor/github.com/containers/common/libimage/save.go index 47a3a566..62cad328 100644 --- a/vendor/github.com/containers/common/libimage/save.go +++ b/vendor/github.com/containers/common/libimage/save.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libimage @@ -54,7 +53,7 @@ func (r *Runtime) Save(ctx context.Context, names []string, format, path string, return fmt.Errorf("unsupported format %q for saving multiple images (only docker-archive)", format) } if len(options.AdditionalTags) > 0 { - return fmt.Errorf("cannot save multiple images with multiple tags") + return errors.New("cannot save multiple images with multiple tags") } } diff --git a/vendor/github.com/containers/common/libimage/search.go b/vendor/github.com/containers/common/libimage/search.go index 9ef0e832..b26ad80d 100644 --- a/vendor/github.com/containers/common/libimage/search.go +++ b/vendor/github.com/containers/common/libimage/search.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libimage diff --git a/vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go b/vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go index 6e4514b9..ffadae0d 100644 --- a/vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go +++ b/vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go @@ -1,5 +1,4 @@ -//go:build linux || freebsd -// +build linux freebsd +//go:build (linux || freebsd) && cni package cni @@ -18,8 +17,8 @@ import ( internalutil "github.com/containers/common/libnetwork/internal/util" "github.com/containers/common/libnetwork/types" "github.com/containers/common/libnetwork/util" - pkgutil "github.com/containers/common/pkg/util" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" "golang.org/x/sys/unix" ) @@ -32,13 +31,13 @@ func createNetworkFromCNIConfigList(conf *libcni.NetworkConfigList, confPath str IPAMOptions: map[string]string{}, } - cniJSON := make(map[string]interface{}) + cniJSON := make(map[string]any) err := json.Unmarshal(conf.Bytes, &cniJSON) if err != nil { return nil, fmt.Errorf("failed to unmarshal network config %s: %w", conf.Name, err) } if args, ok := cniJSON["args"]; ok { - if key, ok := args.(map[string]interface{}); ok { + if key, ok := args.(map[string]any); ok { // read network labels and options from the conf file network.Labels = getNetworkArgsFromConfList(key, podmanLabelKey) network.Options = getNetworkArgsFromConfList(key, podmanOptionsKey) @@ -215,9 +214,9 @@ func convertIPAMConfToNetwork(network *types.Network, ipam *ipamConfig, confPath } // getNetworkArgsFromConfList returns the map of args in a conflist, argType should be labels or options -func getNetworkArgsFromConfList(args map[string]interface{}, argType string) map[string]string { +func getNetworkArgsFromConfList(args map[string]any, argType string) map[string]string { if args, ok := args[argType]; ok { - if labels, ok := args.(map[string]interface{}); ok { + if labels, ok := args.(map[string]any); ok { result := make(map[string]string, len(labels)) for k, v := range labels { if v, ok := v.(string); ok { @@ -299,7 +298,7 @@ func (n *cniNetwork) createCNIConfigListFromNetwork(network *types.Network, writ // the dnsname plugin also needs to be updated for 1.0.0 // TODO change to 1.0.0 when most distros support it ncList := newNcList(network.Name, "0.4.0", network.Labels, network.Options) - var plugins []interface{} + var plugins []any switch network.Driver { case types.BridgeNetworkDriver: @@ -359,7 +358,7 @@ func convertSpecgenPortsToCNIPorts(ports []types.PortMapping) ([]cniPortMapEntry protocols := strings.Split(port.Protocol, ",") for _, protocol := range protocols { - if !pkgutil.StringInSlice(protocol, []string{"tcp", "udp", "sctp"}) { + if !slices.Contains([]string{"tcp", "udp", "sctp"}, protocol) { return nil, fmt.Errorf("unknown port protocol %s", protocol) } cniPort := cniPortMapEntry{ @@ -421,11 +420,11 @@ func parseOptions(networkOptions map[string]string, networkDriver string) (*opti case types.ModeOption: switch networkDriver { case types.MacVLANNetworkDriver: - if !pkgutil.StringInSlice(v, types.ValidMacVLANModes) { + if !slices.Contains(types.ValidMacVLANModes, v) { return nil, fmt.Errorf("unknown macvlan mode %q", v) } case types.IPVLANNetworkDriver: - if !pkgutil.StringInSlice(v, types.ValidIPVLANModes) { + if !slices.Contains(types.ValidIPVLANModes, v) { return nil, fmt.Errorf("unknown ipvlan mode %q", v) } default: diff --git a/vendor/github.com/containers/common/libnetwork/cni/cni_exec.go b/vendor/github.com/containers/common/libnetwork/cni/cni_exec.go index 79d7ef12..9ccf4eff 100644 --- a/vendor/github.com/containers/common/libnetwork/cni/cni_exec.go +++ b/vendor/github.com/containers/common/libnetwork/cni/cni_exec.go @@ -16,8 +16,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build linux || freebsd -// +build linux freebsd +//go:build (linux || freebsd) && cni package cni @@ -26,8 +25,10 @@ import ( "context" "encoding/json" "fmt" + "os" "os/exec" "path/filepath" + "strings" "github.com/containernetworking/cni/pkg/invoke" "github.com/containernetworking/cni/pkg/version" @@ -80,6 +81,16 @@ func (e *cniExec) ExecPlugin(ctx context.Context, pluginPath string, stdinData [ c.Env = append(c.Env, "XDG_RUNTIME_DIR=") } + // The CNI plugins need access to iptables in $PATH. As it turns out debian doesn't put + // /usr/sbin in $PATH for rootless users. This will break rootless networking completely. + // We might break existing users and we cannot expect everyone to change their $PATH so + // let's add /usr/sbin to $PATH ourselves. + path := os.Getenv("PATH") + if !strings.Contains(path, "/usr/sbin") { + path += ":/usr/sbin" + c.Env = append(c.Env, "PATH="+path) + } + err := c.Run() if err != nil { return nil, annotatePluginError(err, pluginPath, stdout.Bytes(), stderr.Bytes()) diff --git a/vendor/github.com/containers/common/libnetwork/cni/cni_types.go b/vendor/github.com/containers/common/libnetwork/cni/cni_types.go index 1d48d080..711535ce 100644 --- a/vendor/github.com/containers/common/libnetwork/cni/cni_types.go +++ b/vendor/github.com/containers/common/libnetwork/cni/cni_types.go @@ -1,5 +1,4 @@ -//go:build linux || freebsd -// +build linux freebsd +//go:build (linux || freebsd) && cni package cni @@ -116,7 +115,7 @@ type dnsNameConfig struct { } // ncList describes a generic map -type ncList map[string]interface{} +type ncList map[string]any // newNcList creates a generic map of values with string // keys and adds in version and network name @@ -139,8 +138,6 @@ func newNcList(name, version string, labels, options map[string]string) ncList { // newHostLocalBridge creates a new LocalBridge for host-local func newHostLocalBridge(name string, isGateWay, ipMasq bool, mtu, vlan int, ipamConf *ipamConfig) *hostLocalBridge { - caps := make(map[string]bool) - caps["ips"] = true bridge := hostLocalBridge{ PluginType: "bridge", BrName: name, @@ -154,7 +151,7 @@ func newHostLocalBridge(name string, isGateWay, ipMasq bool, mtu, vlan int, ipam bridge.IPAM = *ipamConf // if we use host-local set the ips cap to ensure we can set static ips via runtime config if ipamConf.PluginType == types.HostLocalIPAMDriver { - bridge.Capabilities = caps + bridge.Capabilities = map[string]bool{"ips": true} } } return &bridge @@ -216,13 +213,10 @@ func newIPAMDefaultRoute(isIPv6 bool) (ipamRoute, error) { // newPortMapPlugin creates a predefined, default portmapping // configuration func newPortMapPlugin() portMapConfig { - caps := make(map[string]bool) - caps["portMappings"] = true - p := portMapConfig{ + return portMapConfig{ PluginType: "portmap", - Capabilities: caps, + Capabilities: map[string]bool{"portMappings": true}, } - return p } // newFirewallPlugin creates a generic firewall plugin @@ -246,12 +240,10 @@ func newTuningPlugin() tuningConfig { // newDNSNamePlugin creates the dnsname config with a given // domainname func newDNSNamePlugin(domainName string) dnsNameConfig { - caps := make(map[string]bool, 1) - caps["aliases"] = true return dnsNameConfig{ PluginType: "dnsname", DomainName: domainName, - Capabilities: caps, + Capabilities: map[string]bool{"aliases": true}, } } diff --git a/vendor/github.com/containers/common/libnetwork/cni/config.go b/vendor/github.com/containers/common/libnetwork/cni/config.go index a1eeceb7..71b7872f 100644 --- a/vendor/github.com/containers/common/libnetwork/cni/config.go +++ b/vendor/github.com/containers/common/libnetwork/cni/config.go @@ -1,5 +1,4 @@ -//go:build linux || freebsd -// +build linux freebsd +//go:build (linux || freebsd) && cni package cni @@ -11,8 +10,8 @@ import ( internalutil "github.com/containers/common/libnetwork/internal/util" "github.com/containers/common/libnetwork/types" - pkgutil "github.com/containers/common/pkg/util" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" ) func (n *cniNetwork) NetworkUpdate(_ string, _ types.NetworkUpdateOptions) error { @@ -206,7 +205,7 @@ func createIPMACVLAN(network *types.Network) error { if err != nil { return err } - if !pkgutil.StringInSlice(network.NetworkInterface, interfaceNames) { + if !slices.Contains(interfaceNames, network.NetworkInterface) { return fmt.Errorf("parent interface %s does not exist", network.NetworkInterface) } } diff --git a/vendor/github.com/containers/common/libnetwork/cni/config_freebsd.go b/vendor/github.com/containers/common/libnetwork/cni/config_freebsd.go index ff95c0e1..ddee6d2e 100644 --- a/vendor/github.com/containers/common/libnetwork/cni/config_freebsd.go +++ b/vendor/github.com/containers/common/libnetwork/cni/config_freebsd.go @@ -1,5 +1,4 @@ -//go:build freebsd -// +build freebsd +//go:build (linux || freebsd) && cni package cni diff --git a/vendor/github.com/containers/common/libnetwork/cni/config_linux.go b/vendor/github.com/containers/common/libnetwork/cni/config_linux.go index 836fd73b..efc92061 100644 --- a/vendor/github.com/containers/common/libnetwork/cni/config_linux.go +++ b/vendor/github.com/containers/common/libnetwork/cni/config_linux.go @@ -1,5 +1,4 @@ -//go:build linux -// +build linux +//go:build (linux || freebsd) && cni package cni diff --git a/vendor/github.com/containers/common/libnetwork/cni/network.go b/vendor/github.com/containers/common/libnetwork/cni/network.go index 49d20b91..06b78f67 100644 --- a/vendor/github.com/containers/common/libnetwork/cni/network.go +++ b/vendor/github.com/containers/common/libnetwork/cni/network.go @@ -1,5 +1,4 @@ -//go:build linux || freebsd -// +build linux freebsd +//go:build (linux || freebsd) && cni package cni @@ -16,6 +15,7 @@ import ( "time" "github.com/containernetworking/cni/libcni" + "github.com/containers/common/libnetwork/internal/rootlessnetns" "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/version" @@ -53,6 +53,9 @@ type cniNetwork struct { // networks is a map with loaded networks, the key is the network name networks map[string]*network + + // rootlessNetns is used for the rootless network setup/teardown + rootlessNetns *rootlessnetns.Netns } type network struct { @@ -65,21 +68,14 @@ type network struct { type InitConfig struct { // CNIConfigDir is directory where the cni config files are stored. CNIConfigDir string - // CNIPluginDirs is a list of directories where cni should look for the plugins. - CNIPluginDirs []string // RunDir is a directory where temporary files can be stored. RunDir string - // DefaultNetwork is the name for the default network. - DefaultNetwork string - // DefaultSubnet is the default subnet for the default network. - DefaultSubnet string - - // DefaultsubnetPools contains the subnets which must be used to allocate a free subnet by network create - DefaultsubnetPools []config.SubnetPool - // IsMachine describes whenever podman runs in a podman machine environment. IsMachine bool + + // Config containers.conf options + Config *config.Config } // NewCNINetworkInterface creates the ContainerNetwork interface for the CNI backend. @@ -96,12 +92,12 @@ func NewCNINetworkInterface(conf *InitConfig) (types.ContainerNetwork, error) { return nil, err } - defaultNetworkName := conf.DefaultNetwork + defaultNetworkName := conf.Config.Network.DefaultNetwork if defaultNetworkName == "" { defaultNetworkName = types.DefaultNetworkName } - defaultSubnet := conf.DefaultSubnet + defaultSubnet := conf.Config.Network.DefaultSubnet if defaultSubnet == "" { defaultSubnet = types.DefaultSubnet } @@ -110,21 +106,30 @@ func NewCNINetworkInterface(conf *InitConfig) (types.ContainerNetwork, error) { return nil, fmt.Errorf("failed to parse default subnet: %w", err) } - defaultSubnetPools := conf.DefaultsubnetPools + defaultSubnetPools := conf.Config.Network.DefaultSubnetPools if defaultSubnetPools == nil { defaultSubnetPools = config.DefaultSubnetPools } - cni := libcni.NewCNIConfig(conf.CNIPluginDirs, &cniExec{}) + var netns *rootlessnetns.Netns + if unshare.IsRootless() { + netns, err = rootlessnetns.New(conf.RunDir, rootlessnetns.CNI, conf.Config) + if err != nil { + return nil, err + } + } + + cni := libcni.NewCNIConfig(conf.Config.Network.CNIPluginDirs.Values, &cniExec{}) n := &cniNetwork{ cniConfigDir: conf.CNIConfigDir, - cniPluginDirs: conf.CNIPluginDirs, + cniPluginDirs: conf.Config.Network.CNIPluginDirs.Get(), cniConf: cni, defaultNetwork: defaultNetworkName, defaultSubnet: defaultNet, defaultsubnetPools: defaultSubnetPools, isMachine: conf.IsMachine, lock: lock, + rootlessNetns: netns, } return n, nil diff --git a/vendor/github.com/containers/common/libnetwork/cni/run.go b/vendor/github.com/containers/common/libnetwork/cni/run.go index 2da8da1a..d8fb4775 100644 --- a/vendor/github.com/containers/common/libnetwork/cni/run.go +++ b/vendor/github.com/containers/common/libnetwork/cni/run.go @@ -1,5 +1,4 @@ -//go:build linux || freebsd -// +build linux freebsd +//go:build (linux || freebsd) && cni package cni @@ -39,61 +38,72 @@ func (n *cniNetwork) Setup(namespacePath string, options types.SetupOptions) (ma return nil, fmt.Errorf("failed to set the loopback adapter up: %w", err) } - var retErr error - teardownOpts := options - teardownOpts.Networks = map[string]types.PerNetworkOptions{} - // make sure to teardown the already connected networks on error - defer func() { - if retErr != nil { - if len(teardownOpts.Networks) > 0 { - err := n.teardown(namespacePath, types.TeardownOptions(teardownOpts)) - if err != nil { - logrus.Warn(err) + results := make(map[string]types.StatusBlock, len(options.Networks)) + + setup := func() error { + var retErr error + teardownOpts := options + teardownOpts.Networks = map[string]types.PerNetworkOptions{} + // make sure to teardown the already connected networks on error + defer func() { + if retErr != nil { + if len(teardownOpts.Networks) > 0 { + err := n.teardown(namespacePath, types.TeardownOptions(teardownOpts)) + if err != nil { + logrus.Warn(err) + } } } + }() + + ports, err := convertSpecgenPortsToCNIPorts(options.PortMappings) + if err != nil { + return err } - }() - ports, err := convertSpecgenPortsToCNIPorts(options.PortMappings) - if err != nil { - return nil, err - } + for name, netOpts := range options.Networks { + netOpts := netOpts + network := n.networks[name] + rt := getRuntimeConfig(namespacePath, options.ContainerName, options.ContainerID, name, ports, &netOpts) - results := make(map[string]types.StatusBlock, len(options.Networks)) - for name, netOpts := range options.Networks { - netOpts := netOpts - network := n.networks[name] - rt := getRuntimeConfig(namespacePath, options.ContainerName, options.ContainerID, name, ports, &netOpts) - - // If we have more than one static ip we need parse the ips via runtime config, - // make sure to add the ips capability to the first plugin otherwise it doesn't get the ips - if len(netOpts.StaticIPs) > 0 && !network.cniNet.Plugins[0].Network.Capabilities["ips"] { - caps := make(map[string]interface{}) - caps["capabilities"] = map[string]bool{"ips": true} - network.cniNet.Plugins[0], retErr = libcni.InjectConf(network.cniNet.Plugins[0], caps) + // If we have more than one static ip we need parse the ips via runtime config, + // make sure to add the ips capability to the first plugin otherwise it doesn't get the ips + if len(netOpts.StaticIPs) > 0 && !network.cniNet.Plugins[0].Network.Capabilities["ips"] { + caps := map[string]any{ + "capabilities": map[string]bool{"ips": true}, + } + network.cniNet.Plugins[0], retErr = libcni.InjectConf(network.cniNet.Plugins[0], caps) + if retErr != nil { + return retErr + } + } + + var res cnitypes.Result + res, retErr = n.cniConf.AddNetworkList(context.Background(), network.cniNet, rt) + // Add this network to teardown opts since it is now connected. + // Also add this if an errors was returned since we want to call teardown on this regardless. + teardownOpts.Networks[name] = netOpts if retErr != nil { - return nil, retErr + return retErr } - } - var res cnitypes.Result - res, retErr = n.cniConf.AddNetworkList(context.Background(), network.cniNet, rt) - // Add this network to teardown opts since it is now connected. - // Also add this if an errors was returned since we want to call teardown on this regardless. - teardownOpts.Networks[name] = netOpts - if retErr != nil { - return nil, retErr + logrus.Debugf("cni result for container %s network %s: %v", options.ContainerID, name, res) + var status types.StatusBlock + status, retErr = CNIResultToStatus(res) + if retErr != nil { + return retErr + } + results[name] = status } + return nil + } - logrus.Debugf("cni result for container %s network %s: %v", options.ContainerID, name, res) - var status types.StatusBlock - status, retErr = CNIResultToStatus(res) - if retErr != nil { - return nil, retErr - } - results[name] = status + if n.rootlessNetns != nil { + err = n.rootlessNetns.Setup(len(options.Networks), setup) + } else { + err = setup() } - return results, nil + return results, err } // CNIResultToStatus convert the cni result to status block @@ -164,7 +174,7 @@ func getRuntimeConfig(netns, conName, conID, networkName string, ports []cniPort // Only K8S_POD_NAME is used by dnsname to get the container name. {"K8S_POD_NAME", conName}, }, - CapabilityArgs: map[string]interface{}{}, + CapabilityArgs: map[string]any{}, } // Propagate environment CNI_ARGS @@ -225,28 +235,39 @@ func (n *cniNetwork) teardown(namespacePath string, options types.TeardownOption } var multiErr *multierror.Error - for name, netOpts := range options.Networks { - netOpts := netOpts - rt := getRuntimeConfig(namespacePath, options.ContainerName, options.ContainerID, name, ports, &netOpts) - - cniConfList, newRt, err := getCachedNetworkConfig(n.cniConf, name, rt) - if err == nil { - rt = newRt - } else { - logrus.Warnf("Failed to load cached network config: %v, falling back to loading network %s from disk", err, name) - network := n.networks[name] - if network == nil { - multiErr = multierror.Append(multiErr, fmt.Errorf("network %s: %w", name, types.ErrNoSuchNetwork)) - continue + teardown := func() error { + for name, netOpts := range options.Networks { + netOpts := netOpts + rt := getRuntimeConfig(namespacePath, options.ContainerName, options.ContainerID, name, ports, &netOpts) + + cniConfList, newRt, err := getCachedNetworkConfig(n.cniConf, name, rt) + if err == nil { + rt = newRt + } else { + logrus.Warnf("Failed to load cached network config: %v, falling back to loading network %s from disk", err, name) + network := n.networks[name] + if network == nil { + multiErr = multierror.Append(multiErr, fmt.Errorf("network %s: %w", name, types.ErrNoSuchNetwork)) + continue + } + cniConfList = network.cniNet } - cniConfList = network.cniNet - } - err = n.cniConf.DelNetworkList(context.Background(), cniConfList, rt) - if err != nil { - multiErr = multierror.Append(multiErr, err) + err = n.cniConf.DelNetworkList(context.Background(), cniConfList, rt) + if err != nil { + multiErr = multierror.Append(multiErr, err) + } } + return nil + } + + if n.rootlessNetns != nil { + err = n.rootlessNetns.Teardown(len(options.Networks), teardown) + } else { + err = teardown() } + multiErr = multierror.Append(multiErr, err) + return multiErr.ErrorOrNil() } @@ -267,3 +288,10 @@ func getCachedNetworkConfig(cniConf *libcni.CNIConfig, name string, rt *libcni.R } return cniConfList, rt, nil } + +func (n *cniNetwork) RunInRootlessNetns(toRun func() error) error { + if n.rootlessNetns == nil { + return types.ErrNotRootlessNetns + } + return n.rootlessNetns.Run(n.lock, toRun) +} diff --git a/vendor/github.com/containers/common/libnetwork/etchosts/hosts.go b/vendor/github.com/containers/common/libnetwork/etchosts/hosts.go index ed65921c..6bdb34e6 100644 --- a/vendor/github.com/containers/common/libnetwork/etchosts/hosts.go +++ b/vendor/github.com/containers/common/libnetwork/etchosts/hosts.go @@ -9,7 +9,7 @@ import ( "strings" "github.com/containers/common/pkg/config" - "github.com/containers/common/pkg/util" + "golang.org/x/exp/slices" ) const ( @@ -220,7 +220,7 @@ func checkIfEntryExists(current HostEntry, entries HostEntries) bool { if current.IP == rm.IP { // it is enough if one of the names match, in this case we remove the full entry for _, name := range current.Names { - if util.StringInSlice(name, rm.Names) { + if slices.Contains(rm.Names, name) { return true } } diff --git a/vendor/github.com/containers/common/libnetwork/internal/rootlessnetns/netns.go b/vendor/github.com/containers/common/libnetwork/internal/rootlessnetns/netns.go new file mode 100644 index 00000000..edc29f66 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/internal/rootlessnetns/netns.go @@ -0,0 +1,8 @@ +package rootlessnetns + +type NetworkBackend int + +const ( + Netavark NetworkBackend = iota + CNI +) diff --git a/vendor/github.com/containers/common/libnetwork/internal/rootlessnetns/netns_freebsd.go b/vendor/github.com/containers/common/libnetwork/internal/rootlessnetns/netns_freebsd.go new file mode 100644 index 00000000..a176d2d8 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/internal/rootlessnetns/netns_freebsd.go @@ -0,0 +1,28 @@ +package rootlessnetns + +import ( + "errors" + + "github.com/containers/common/pkg/config" + "github.com/containers/storage/pkg/lockfile" +) + +var ErrNotSupported = errors.New("rootless netns only supported on linux") + +type Netns struct{} + +func New(dir string, backend NetworkBackend, conf *config.Config) (*Netns, error) { + return nil, ErrNotSupported +} + +func (n *Netns) Setup(nets int, toRun func() error) error { + return ErrNotSupported +} + +func (n *Netns) Teardown(nets int, toRun func() error) error { + return ErrNotSupported +} + +func (n *Netns) Run(lock *lockfile.LockFile, toRun func() error) error { + return ErrNotSupported +} diff --git a/vendor/github.com/containers/common/libnetwork/internal/rootlessnetns/netns_linux.go b/vendor/github.com/containers/common/libnetwork/internal/rootlessnetns/netns_linux.go new file mode 100644 index 00000000..1531ee52 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/internal/rootlessnetns/netns_linux.go @@ -0,0 +1,602 @@ +package rootlessnetns + +import ( + "errors" + "fmt" + "io/fs" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/containernetworking/plugins/pkg/ns" + "github.com/containers/common/libnetwork/pasta" + "github.com/containers/common/libnetwork/resolvconf" + "github.com/containers/common/libnetwork/slirp4netns" + "github.com/containers/common/pkg/config" + "github.com/containers/common/pkg/netns" + "github.com/containers/common/pkg/systemd" + "github.com/containers/storage/pkg/homedir" + "github.com/containers/storage/pkg/lockfile" + "github.com/hashicorp/go-multierror" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +const ( + // rootlessNetnsDir is the directory name + rootlessNetnsDir = "rootless-netns" + // refCountFile file name for the ref count file + refCountFile = "ref-count" + + // rootlessNetNsConnPidFile is the name of the rootless netns slirp4netns/pasta pid file + rootlessNetNsConnPidFile = "rootless-netns-conn.pid" + + // persistentCNIDir is the directory where the CNI files are stored + persistentCNIDir = "/var/lib/cni" + + tmpfs = "tmpfs" + none = "none" + resolvConfName = "resolv.conf" +) + +type Netns struct { + // dir used for the rootless netns + dir string + // backend used for the network setup/teardown + backend NetworkBackend + + // config contains containers.conf options. + config *config.Config +} + +type rootlessNetnsError struct { + msg string + err error +} + +func (e *rootlessNetnsError) Error() string { + msg := e.msg + ": " + return fmt.Sprintf("rootless netns: %s%v", msg, e.err) +} + +func (e *rootlessNetnsError) Unwrap() error { + return e.err +} + +// wrapError wraps the error with extra context +// It will always include "rootless netns:" so the msg should not mention it again, +// msg can be empty to just include the rootless netns part. +// err must be non nil. +func wrapError(msg string, err error) *rootlessNetnsError { + return &rootlessNetnsError{ + msg: msg, + err: err, + } +} + +func New(dir string, backend NetworkBackend, conf *config.Config) (*Netns, error) { + netnsDir := filepath.Join(dir, rootlessNetnsDir) + if err := os.MkdirAll(netnsDir, 0o700); err != nil { + return nil, wrapError("", err) + } + return &Netns{ + dir: netnsDir, + backend: backend, + config: conf, + }, nil +} + +// getPath is a small wrapper around filepath.Join() to have a bit less code +func (n *Netns) getPath(path string) string { + return filepath.Join(n.dir, path) +} + +// getOrCreateNetns returns the rootless netns, if it created a new one the +// returned bool is set to true. +func (n *Netns) getOrCreateNetns() (ns.NetNS, bool, error) { + nsPath := n.getPath(rootlessNetnsDir) + nsRef, err := ns.GetNS(nsPath) + if err == nil { + // TODO check if slirp4netns is alive + return nsRef, false, nil + } + logrus.Debugf("Creating rootless network namespace at %q", nsPath) + // We have to create the netns dir again here because it is possible + // that cleanup() removed it. + if err := os.MkdirAll(n.dir, 0o700); err != nil { + return nil, false, wrapError("", err) + } + netns, err := netns.NewNSAtPath(nsPath) + if err != nil { + return nil, false, wrapError("create netns", err) + } + switch strings.ToLower(n.config.Network.DefaultRootlessNetworkCmd) { + case "", slirp4netns.BinaryName: + err = n.setupSlirp4netns(nsPath) + case pasta.BinaryName: + err = n.setupPasta(nsPath) + default: + err = fmt.Errorf("invalid rootless network command %q", n.config.Network.DefaultRootlessNetworkCmd) + } + return netns, true, err +} + +func (n *Netns) cleanup() error { + if _, err := os.Stat(n.dir); err != nil { + if errors.Is(err, fs.ErrNotExist) { + // dir does not exists no need for cleanup + return nil + } + return err + } + + logrus.Debug("Cleaning up rootless network namespace") + + nsPath := n.getPath(rootlessNetnsDir) + var multiErr *multierror.Error + if err := netns.UnmountNS(nsPath); err != nil { + multiErr = multierror.Append(multiErr, err) + } + if err := n.cleanupRootlessNetns(); err != nil { + multiErr = multierror.Append(multiErr, wrapError("kill network process", err)) + } + if err := os.RemoveAll(n.dir); err != nil { + multiErr = multierror.Append(multiErr, wrapError("remove rootless netns dir", err)) + } + + return multiErr.ErrorOrNil() +} + +func (n *Netns) setupPasta(nsPath string) error { + pidPath := n.getPath(rootlessNetNsConnPidFile) + + pastaOpts := pasta.SetupOptions{ + Config: n.config, + Netns: nsPath, + ExtraOptions: []string{"--pid", pidPath}, + } + if err := pasta.Setup(&pastaOpts); err != nil { + return fmt.Errorf("setting up Pasta: %w", err) + } + + if systemd.RunsOnSystemd() { + // Treat these as fatal - if pasta failed to write a PID file something is probably wrong. + pidfile, err := os.ReadFile(pidPath) + if err != nil { + return fmt.Errorf("unable to open pasta PID file: %w", err) + } + pid, err := strconv.Atoi(strings.TrimSpace(string(pidfile))) + if err != nil { + return fmt.Errorf("unable to decode pasta PID: %w", err) + } + + if err := systemd.MoveRootlessNetnsSlirpProcessToUserSlice(pid); err != nil { + // only log this, it is not fatal but can lead to issues when running podman inside systemd units + logrus.Errorf("failed to move the rootless netns pasta process to the systemd user.slice: %v", err) + } + } + + if err := resolvconf.New(&resolvconf.Params{ + Path: n.getPath(resolvConfName), + // fake the netns since we want to filter localhost + Namespaces: []specs.LinuxNamespace{ + {Type: specs.NetworkNamespace}, + }, + // TODO: Need a way to determine if there is a valid v6 address on any + // external interface of the system. + IPv6Enabled: false, + KeepHostServers: true, + Nameservers: []string{}, + }); err != nil { + return wrapError("create resolv.conf", err) + } + + return nil +} + +func (n *Netns) setupSlirp4netns(nsPath string) error { + res, err := slirp4netns.Setup(&slirp4netns.SetupOptions{ + Config: n.config, + ContainerID: "rootless-netns", + Netns: nsPath, + }) + if err != nil { + return wrapError("start slirp4netns", err) + } + // create pid file for the slirp4netns process + // this is need to kill the process in the cleanup + pid := strconv.Itoa(res.Pid) + err = os.WriteFile(n.getPath(rootlessNetNsConnPidFile), []byte(pid), 0o600) + if err != nil { + return wrapError("write slirp4netns pid file", err) + } + + if systemd.RunsOnSystemd() { + // move to systemd scope to prevent systemd from killing it + err = systemd.MoveRootlessNetnsSlirpProcessToUserSlice(res.Pid) + if err != nil { + // only log this, it is not fatal but can lead to issues when running podman inside systemd units + logrus.Errorf("failed to move the rootless netns slirp4netns process to the systemd user.slice: %v", err) + } + } + + // build a new resolv.conf file which uses the slirp4netns dns server address + resolveIP, err := slirp4netns.GetDNS(res.Subnet) + if err != nil { + return wrapError("determine default slirp4netns DNS address", err) + } + + if err := resolvconf.New(&resolvconf.Params{ + Path: n.getPath(resolvConfName), + // fake the netns since we want to filter localhost + Namespaces: []specs.LinuxNamespace{ + {Type: specs.NetworkNamespace}, + }, + IPv6Enabled: res.IPv6, + KeepHostServers: true, + Nameservers: []string{resolveIP.String()}, + }); err != nil { + return wrapError("create resolv.conf", err) + } + return nil +} + +func (n *Netns) cleanupRootlessNetns() error { + pidFile := n.getPath(rootlessNetNsConnPidFile) + b, err := os.ReadFile(pidFile) + if err == nil { + var i int + i, err = strconv.Atoi(strings.TrimSpace(string(b))) + if err == nil { + // kill the slirp process so we do not leak it + err = unix.Kill(i, unix.SIGTERM) + if err == unix.ESRCH { + err = nil + } + } + } + return err +} + +// mountAndMkdirDest convenience wrapper for mount and mkdir +func mountAndMkdirDest(source string, target string, fstype string, flags uintptr) error { + if err := os.MkdirAll(target, 0o700); err != nil { + return wrapError("create mount point", err) + } + if err := unix.Mount(source, target, fstype, flags, ""); err != nil { + return wrapError(fmt.Sprintf("mount %q to %q", source, target), err) + } + return nil +} + +func (n *Netns) setupMounts() error { + // Before we can run the given function, + // we have to set up all mounts correctly. + + // The order of the mounts is IMPORTANT. + // The idea of the extra mount ns is to make /run and /var/lib/cni writeable + // for the cni plugins but not affecting the podman user namespace. + // Because the plugins also need access to XDG_RUNTIME_DIR/netns some special setup is needed. + + // The following bind mounts are needed + // 1. XDG_RUNTIME_DIR -> XDG_RUNTIME_DIR/rootless-netns/XDG_RUNTIME_DIR + // 2. /run/systemd -> XDG_RUNTIME_DIR/rootless-netns/run/systemd (only if it exists) + // 3. XDG_RUNTIME_DIR/rootless-netns/resolv.conf -> /etc/resolv.conf or XDG_RUNTIME_DIR/rootless-netns/run/symlink/target + // 4. XDG_RUNTIME_DIR/rootless-netns/var/lib/cni -> /var/lib/cni (if /var/lib/cni does not exist, use the parent dir) + // 5. XDG_RUNTIME_DIR/rootless-netns/run -> /run + + // Create a new mount namespace, + // this must happen inside the netns thread. + err := unix.Unshare(unix.CLONE_NEWNS) + if err != nil { + return wrapError("create new mount namespace", err) + } + + xdgRuntimeDir, err := homedir.GetRuntimeDir() + if err != nil { + return fmt.Errorf("could not get runtime directory: %w", err) + } + newXDGRuntimeDir := n.getPath(xdgRuntimeDir) + // 1. Mount the netns into the new run to keep them accessible. + // Otherwise cni setup will fail because it cannot access the netns files. + err = mountAndMkdirDest(xdgRuntimeDir, newXDGRuntimeDir, none, unix.MS_BIND|unix.MS_SHARED|unix.MS_REC) + if err != nil { + return err + } + + // 2. Also keep /run/systemd if it exists. + // Many files are symlinked into this dir, for example /dev/log. + runSystemd := "/run/systemd" + _, err = os.Stat(runSystemd) + if err == nil { + newRunSystemd := n.getPath(runSystemd) + err = mountAndMkdirDest(runSystemd, newRunSystemd, none, unix.MS_BIND|unix.MS_REC) + if err != nil { + return err + } + } + + // 3. On some distros /etc/resolv.conf is symlinked to somewhere under /run. + // Because the kernel will follow the symlink before mounting, it is not + // possible to mount a file at /etc/resolv.conf. We have to ensure that + // the link target will be available in the mount ns. + // see: https://github.com/containers/podman/issues/10855 + resolvePath := resolvconf.DefaultResolvConf + linkCount := 0 + for i := 1; i < len(resolvePath); i++ { + // Do not use filepath.EvalSymlinks, we only want the first symlink under /run. + // If /etc/resolv.conf has more than one symlink under /run, e.g. + // -> /run/systemd/resolve/stub-resolv.conf -> /run/systemd/resolve/resolv.conf + // we would put the netns resolv.conf file to the last path. However this will + // break dns because the second link does not exist in the mount ns. + // see https://github.com/containers/podman/issues/11222 + // + // We also need to resolve all path components not just the last file. + // see https://github.com/containers/podman/issues/12461 + + if resolvePath[i] != '/' { + // if we are at the last char we need to inc i by one because there is no final slash + if i == len(resolvePath)-1 { + i++ + } else { + // not the end of path, keep going + continue + } + } + path := resolvePath[:i] + + fi, err := os.Lstat(path) + if err != nil { + return fmt.Errorf("failed to stat resolv.conf path: %w", err) + } + + // no link, just continue + if fi.Mode()&os.ModeSymlink == 0 { + continue + } + + link, err := os.Readlink(path) + if err != nil { + return fmt.Errorf("failed to read resolv.conf symlink: %w", err) + } + linkCount++ + if filepath.IsAbs(link) { + // link is as an absolute path + resolvePath = filepath.Join(link, resolvePath[i:]) + } else { + // link is as a relative, join it with the previous path + base := filepath.Dir(path) + resolvePath = filepath.Join(base, link, resolvePath[i:]) + } + // set i back to zero since we now have a new base path + i = 0 + + // we have to stop at the first path under /run because we will have an empty /run and will create the path anyway + // if we would continue we would need to recreate all links under /run + if strings.HasPrefix(resolvePath, "/run/") { + break + } + // make sure wo do not loop forever + if linkCount == 255 { + return errors.New("too many symlinks while resolving /etc/resolv.conf") + } + } + logrus.Debugf("The path of /etc/resolv.conf in the mount ns is %q", resolvePath) + // When /etc/resolv.conf on the host is a symlink to /run/systemd/resolve/stub-resolv.conf, + // we have to mount an empty filesystem on /run/systemd/resolve in the child namespace, + // so as to isolate the directory from the host mount namespace. + // + // Otherwise our bind-mount for /run/systemd/resolve/stub-resolv.conf is unmounted + // when systemd-resolved unlinks and recreates /run/systemd/resolve/stub-resolv.conf on the host. + // see: https://github.com/containers/podman/issues/10929 + if strings.HasPrefix(resolvePath, "/run/systemd/resolve/") { + rsr := n.getPath("/run/systemd/resolve") + err = mountAndMkdirDest("", rsr, tmpfs, unix.MS_NOEXEC|unix.MS_NOSUID|unix.MS_NODEV) + if err != nil { + return err + } + } + if strings.HasPrefix(resolvePath, "/run/") { + resolvePath = n.getPath(resolvePath) + err = os.MkdirAll(filepath.Dir(resolvePath), 0o700) + if err != nil { + return wrapError("create resolv.conf directory", err) + } + // we want to bind mount on this file so we have to create the file first + _, err = os.OpenFile(resolvePath, os.O_CREATE|os.O_RDONLY, 0o600) + if err != nil { + return wrapError("create resolv.conf file: %w", err) + } + } + // mount resolv.conf to make use of the host dns + err = unix.Mount(n.getPath(resolvConfName), resolvePath, none, unix.MS_BIND, "") + if err != nil { + return wrapError(fmt.Sprintf("mount resolv.conf to %q", resolvePath), err) + } + + // 4. CNI plugins need access to /var/lib/cni + if n.backend == CNI { + if err := n.mountCNIVarDir(); err != nil { + return err + } + } + + // 5. Mount the new prepared run dir to /run, it has to be recursive to keep the other bind mounts. + runDir := n.getPath("run") + // relabel the new run directory to the iptables /run label + // this is important, otherwise the iptables command will fail + err = label.Relabel(runDir, "system_u:object_r:iptables_var_run_t:s0", false) + if err != nil { + if !errors.Is(err, unix.ENOTSUP) { + return wrapError("relabel iptables_var_run_t", err) + } + logrus.Debugf("Labeling not supported on %q", runDir) + } + err = mountAndMkdirDest(runDir, "/run", none, unix.MS_BIND|unix.MS_REC) + if err != nil { + return err + } + return nil +} + +func (n *Netns) mountCNIVarDir() error { + varDir := "" + varTarget := persistentCNIDir + // we can only mount to a target dir which exists, check /var/lib/cni recursively + // while we could always use /var there are cases where a user might store the cni + // configs under /var/custom and this would break + for { + if _, err := os.Stat(varTarget); err == nil { + varDir = n.getPath(varTarget) + break + } + varTarget = filepath.Dir(varTarget) + if varTarget == "/" { + break + } + } + if varDir == "" { + return errors.New("failed to stat /var directory") + } + if err := os.MkdirAll(varDir, 0o700); err != nil { + return wrapError("create var dir", err) + } + // make sure to mount var first + err := unix.Mount(varDir, varTarget, none, unix.MS_BIND, "") + if err != nil { + return wrapError(fmt.Sprintf("mount %q to %q", varDir, varTarget), err) + } + return nil +} + +func (n *Netns) runInner(toRun func() error) (err error) { + nsRef, newNs, err := n.getOrCreateNetns() + if err != nil { + return err + } + defer nsRef.Close() + // If a new netns was created make sure to clean it up again on an error to not leak it. + if newNs { + defer func() { + if err != nil { + if err := n.cleanup(); err != nil { + logrus.Errorf("Rootless netns cleanup error after failed setup: %v", err) + } + } + }() + } + + return nsRef.Do(func(_ ns.NetNS) error { + if err := n.setupMounts(); err != nil { + return err + } + return toRun() + }) +} + +func (n *Netns) Setup(nets int, toRun func() error) error { + err := n.runInner(toRun) + if err != nil { + return err + } + _, err = refCount(n.dir, nets) + return err +} + +func (n *Netns) Teardown(nets int, toRun func() error) error { + var multiErr *multierror.Error + count, countErr := refCount(n.dir, -nets) + if countErr != nil { + multiErr = multierror.Append(multiErr, countErr) + } + err := n.runInner(toRun) + if err != nil { + multiErr = multierror.Append(multiErr, err) + } + + // only cleanup if the ref count did not throw an error + if count == 0 && countErr == nil { + err = n.cleanup() + if err != nil { + multiErr = multierror.Append(multiErr, wrapError("cleanup", err)) + } + } + + return multiErr.ErrorOrNil() +} + +// Run any long running function in the userns. +// We need to ensure that during setup/cleanup we are locked to avoid races. +// However because the given function could be running a long time we must +// unlock in between, i.e. this is used by podman unshare --rootless-nets +// and we do not want to keep it locked for the lifetime of the given command. +func (n *Netns) Run(lock *lockfile.LockFile, toRun func() error) error { + lock.Lock() + defer lock.Unlock() + _, err := refCount(n.dir, 1) + if err != nil { + return err + } + inner := func() error { + lock.Unlock() + err = toRun() + lock.Lock() + return err + } + + inErr := n.runInner(inner) + // make sure to always reset the ref counter afterwards + count, err := refCount(n.dir, -1) + if err != nil { + if inErr == nil { + return err + } + logrus.Errorf("Failed to decrement ref count: %v", err) + return inErr + } + if count == 0 { + err = n.cleanup() + if err != nil { + err = wrapError("cleanup", err) + if inErr == nil { + return err + } + logrus.Errorf("Failed to cleanup rootless netns: %v", err) + return inErr + } + } + + return inErr +} + +func refCount(dir string, inc int) (int, error) { + file := filepath.Join(dir, refCountFile) + content, err := os.ReadFile(file) + if err != nil && !errors.Is(err, fs.ErrNotExist) { + return -1, wrapError("read ref counter", err) + } + + currentCount := 0 + if len(content) > 0 { + currentCount, err = strconv.Atoi(string(content)) + if err != nil { + return -1, wrapError("parse ref counter", err) + } + } + + currentCount += inc + if currentCount < 0 { + logrus.Errorf("rootless netns ref counter out of sync, counter is at %d, resetting it back to 0", currentCount) + currentCount = 0 + } + + newNum := strconv.Itoa(currentCount) + if err = os.WriteFile(file, []byte(newNum), 0o600); err != nil { + return -1, wrapError("write ref counter", err) + } + + return currentCount, nil +} diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/bridge.go b/vendor/github.com/containers/common/libnetwork/internal/util/bridge.go index 7197a23b..c511a2df 100644 --- a/vendor/github.com/containers/common/libnetwork/internal/util/bridge.go +++ b/vendor/github.com/containers/common/libnetwork/internal/util/bridge.go @@ -7,13 +7,13 @@ import ( "github.com/containers/common/libnetwork/types" "github.com/containers/common/libnetwork/util" "github.com/containers/common/pkg/config" - pkgutil "github.com/containers/common/pkg/util" + "golang.org/x/exp/slices" ) func CreateBridge(n NetUtil, network *types.Network, usedNetworks []*net.IPNet, subnetPools []config.SubnetPool) error { if network.NetworkInterface != "" { bridges := GetBridgeInterfaceNames(n) - if pkgutil.StringInSlice(network.NetworkInterface, bridges) { + if slices.Contains(bridges, network.NetworkInterface) { return fmt.Errorf("bridge name %s already in use", network.NetworkInterface) } if !types.NameRegex.MatchString(network.NetworkInterface) { diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/interface.go b/vendor/github.com/containers/common/libnetwork/internal/util/interface.go index 650fcb19..9b66e66a 100644 --- a/vendor/github.com/containers/common/libnetwork/internal/util/interface.go +++ b/vendor/github.com/containers/common/libnetwork/internal/util/interface.go @@ -7,7 +7,7 @@ import "github.com/containers/common/libnetwork/types" // NetUtil is a helper interface which all network interfaces should implement to allow easy code sharing type NetUtil interface { - // ForEach eaxecutes the given function for each network + // ForEach executes the given function for each network ForEach(func(types.Network)) // Len returns the number of networks Len() int diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/util.go b/vendor/github.com/containers/common/libnetwork/internal/util/util.go index 2ab24c56..8405bffd 100644 --- a/vendor/github.com/containers/common/libnetwork/internal/util/util.go +++ b/vendor/github.com/containers/common/libnetwork/internal/util/util.go @@ -7,8 +7,8 @@ import ( "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/config" - "github.com/containers/common/pkg/util" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" ) // GetBridgeInterfaceNames returns all bridge interface names @@ -51,7 +51,7 @@ func GetFreeDeviceName(n NetUtil) (string, error) { // Start by 1, 0 is reserved for the default network for i := 1; i < 1000000; i++ { deviceName := fmt.Sprintf("%s%d", n.DefaultInterfaceName(), i) - if !util.StringInSlice(deviceName, names) { + if !slices.Contains(names, deviceName) { logrus.Debugf("found free device name %s", deviceName) return deviceName, nil } diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/validate.go b/vendor/github.com/containers/common/libnetwork/internal/util/validate.go index adf61555..55995440 100644 --- a/vendor/github.com/containers/common/libnetwork/internal/util/validate.go +++ b/vendor/github.com/containers/common/libnetwork/internal/util/validate.go @@ -93,15 +93,15 @@ func ValidateRoutes(routes []types.Route) error { func ValidateRoute(route types.Route) error { if route.Destination.IP == nil { - return fmt.Errorf("route destination ip nil") + return errors.New("route destination ip nil") } if route.Destination.Mask == nil { - return fmt.Errorf("route destination mask nil") + return errors.New("route destination mask nil") } if route.Gateway == nil { - return fmt.Errorf("route gateway nil") + return errors.New("route gateway nil") } // Reparse to ensure destination is valid. @@ -112,7 +112,7 @@ func ValidateRoute(route types.Route) error { // check that destination is a network and not an address if !ip.Equal(ipNet.IP) { - return fmt.Errorf("route destination invalid") + return errors.New("route destination invalid") } return nil diff --git a/vendor/github.com/containers/common/libnetwork/netavark/config.go b/vendor/github.com/containers/common/libnetwork/netavark/config.go index de7af957..bcd1eaea 100644 --- a/vendor/github.com/containers/common/libnetwork/netavark/config.go +++ b/vendor/github.com/containers/common/libnetwork/netavark/config.go @@ -1,5 +1,4 @@ //go:build linux || freebsd -// +build linux freebsd package netavark @@ -16,14 +15,14 @@ import ( internalutil "github.com/containers/common/libnetwork/internal/util" "github.com/containers/common/libnetwork/types" - "github.com/containers/common/pkg/util" "github.com/containers/storage/pkg/stringid" + "golang.org/x/exp/slices" ) func sliceRemoveDuplicates(strList []string) []string { list := make([]string, 0, len(strList)) for _, item := range strList { - if !util.StringInSlice(item, list) { + if !slices.Contains(list, item) { list = append(list, item) } } @@ -71,7 +70,7 @@ func (n *netavarkNetwork) NetworkUpdate(name string, options types.NetworkUpdate networkDNSServersBefore := network.NetworkDNSServers networkDNSServersAfter := []string{} for _, server := range networkDNSServersBefore { - if util.StringInSlice(server, options.RemoveDNSServers) { + if slices.Contains(options.RemoveDNSServers, server) { continue } networkDNSServersAfter = append(networkDNSServersAfter, server) @@ -273,7 +272,7 @@ func createIpvlanOrMacvlan(network *types.Network) error { if err != nil { return err } - if !util.StringInSlice(network.NetworkInterface, interfaceNames) { + if !slices.Contains(interfaceNames, network.NetworkInterface) { return fmt.Errorf("parent interface %s does not exist", network.NetworkInterface) } } @@ -310,7 +309,7 @@ func createIpvlanOrMacvlan(network *types.Network) error { return errIpvlanNoDHCP } if len(network.Subnets) > 0 { - return fmt.Errorf("ipam driver dhcp set but subnets are set") + return errors.New("ipam driver dhcp set but subnets are set") } } @@ -319,11 +318,11 @@ func createIpvlanOrMacvlan(network *types.Network) error { switch key { case types.ModeOption: if isMacVlan { - if !util.StringInSlice(value, types.ValidMacVLANModes) { + if !slices.Contains(types.ValidMacVLANModes, value) { return fmt.Errorf("unknown macvlan mode %q", value) } } else { - if !util.StringInSlice(value, types.ValidIPVLANModes) { + if !slices.Contains(types.ValidIPVLANModes, value) { return fmt.Errorf("unknown ipvlan mode %q", value) } } @@ -473,7 +472,7 @@ func getAllPlugins(dirs []string) []string { if err == nil { for _, entry := range entries { name := entry.Name() - if !util.StringInSlice(name, plugins) { + if !slices.Contains(plugins, name) { plugins = append(plugins, name) } } diff --git a/vendor/github.com/containers/common/libnetwork/netavark/const.go b/vendor/github.com/containers/common/libnetwork/netavark/const.go index 70c50b1a..fd975a1f 100644 --- a/vendor/github.com/containers/common/libnetwork/netavark/const.go +++ b/vendor/github.com/containers/common/libnetwork/netavark/const.go @@ -1,5 +1,4 @@ //go:build linux || freebsd -// +build linux freebsd package netavark diff --git a/vendor/github.com/containers/common/libnetwork/netavark/exec.go b/vendor/github.com/containers/common/libnetwork/netavark/exec.go index 20934a3f..2a52a470 100644 --- a/vendor/github.com/containers/common/libnetwork/netavark/exec.go +++ b/vendor/github.com/containers/common/libnetwork/netavark/exec.go @@ -1,5 +1,4 @@ //go:build linux || freebsd -// +build linux freebsd package netavark @@ -10,6 +9,7 @@ import ( "os" "os/exec" "strconv" + "strings" "github.com/sirupsen/logrus" ) @@ -76,9 +76,18 @@ func getRustLogEnv() string { // used to marshal the netavark output into it. This can be nil. // All errors return by this function should be of the type netavarkError // to provide a helpful error message. -func (n *netavarkNetwork) execNetavark(args []string, needPlugin bool, stdin, result interface{}) error { +func (n *netavarkNetwork) execNetavark(args []string, needPlugin bool, stdin, result any) error { // set the netavark log level to the same as the podman env := append(os.Environ(), getRustLogEnv()) + // Netavark need access to iptables in $PATH. As it turns out debian doesn't put + // /usr/sbin in $PATH for rootless users. This will break rootless networking completely. + // We might break existing users and we cannot expect everyone to change their $PATH so + // let's add /usr/sbin to $PATH ourselves. + path := os.Getenv("PATH") + if !strings.Contains(path, "/usr/sbin") { + path += ":/usr/sbin" + env = append(env, "PATH="+path) + } // if we run with debug log level lets also set RUST_BACKTRACE=1 so we can get the full stack trace in case of panics if logrus.IsLevelEnabled(logrus.DebugLevel) { env = append(env, "RUST_BACKTRACE=1") @@ -86,14 +95,17 @@ func (n *netavarkNetwork) execNetavark(args []string, needPlugin bool, stdin, re if n.dnsBindPort != 0 { env = append(env, "NETAVARK_DNS_PORT="+strconv.Itoa(int(n.dnsBindPort))) } + if n.firewallDriver != "" { + env = append(env, "NETAVARK_FW="+n.firewallDriver) + } return n.execBinary(n.netavarkBinary, append(n.getCommonNetavarkOptions(needPlugin), args...), stdin, result, env) } -func (n *netavarkNetwork) execPlugin(path string, args []string, stdin, result interface{}) error { +func (n *netavarkNetwork) execPlugin(path string, args []string, stdin, result any) error { return n.execBinary(path, args, stdin, result, nil) } -func (n *netavarkNetwork) execBinary(path string, args []string, stdin, result interface{}, env []string) error { +func (n *netavarkNetwork) execBinary(path string, args []string, stdin, result any, env []string) error { stdinR, stdinW, err := os.Pipe() if err != nil { return newNetavarkError("failed to create stdin pipe", err) diff --git a/vendor/github.com/containers/common/libnetwork/netavark/ipam.go b/vendor/github.com/containers/common/libnetwork/netavark/ipam.go index 65f21c1e..b9a48d45 100644 --- a/vendor/github.com/containers/common/libnetwork/netavark/ipam.go +++ b/vendor/github.com/containers/common/libnetwork/netavark/ipam.go @@ -1,5 +1,4 @@ //go:build linux || freebsd -// +build linux freebsd package netavark @@ -48,7 +47,7 @@ func (e *ipamError) Error() string { return msg } -func newIPAMError(cause error, msg string, args ...interface{}) *ipamError { +func newIPAMError(cause error, msg string, args ...any) *ipamError { return &ipamError{ msg: fmt.Sprintf(msg, args...), cause: cause, diff --git a/vendor/github.com/containers/common/libnetwork/netavark/network.go b/vendor/github.com/containers/common/libnetwork/netavark/network.go index 0d323db2..9f4ee313 100644 --- a/vendor/github.com/containers/common/libnetwork/netavark/network.go +++ b/vendor/github.com/containers/common/libnetwork/netavark/network.go @@ -1,5 +1,4 @@ //go:build linux || freebsd -// +build linux freebsd package netavark @@ -12,6 +11,7 @@ import ( "strings" "time" + "github.com/containers/common/libnetwork/internal/rootlessnetns" "github.com/containers/common/libnetwork/internal/util" "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/config" @@ -36,6 +36,9 @@ type netavarkNetwork struct { // aardvarkBinary is the path to the aardvark binary. aardvarkBinary string + // firewallDriver sets the firewall driver to use + firewallDriver string + // defaultNetwork is the name for the default network. defaultNetwork string // defaultSubnet is the default subnet for the default network. @@ -65,6 +68,9 @@ type netavarkNetwork struct { // networks is a map with loaded networks, the key is the network name networks map[string]*types.Network + + // rootlessNetns is used for the rootless network setup/teardown + rootlessNetns *rootlessnetns.Netns } type InitConfig struct { @@ -79,23 +85,12 @@ type InitConfig struct { // NetworkRunDir is where temporary files are stored, i.e.the ipam db, aardvark config NetworkRunDir string - // DefaultNetwork is the name for the default network. - DefaultNetwork string - // DefaultSubnet is the default subnet for the default network. - DefaultSubnet string - - // DefaultsubnetPools contains the subnets which must be used to allocate a free subnet by network create - DefaultsubnetPools []config.SubnetPool - - // DNSBindPort is set the port to pass to netavark for aardvark - DNSBindPort uint16 - - // PluginDirs list of directories were netavark plugins are located - PluginDirs []string - // Syslog describes whenever the netavark debug output should be log to the syslog as well. // This will use logrus to do so, make sure logrus is set up to log to the syslog. Syslog bool + + // Config containers.conf options + Config *config.Config } // NewNetworkInterface creates the ContainerNetwork interface for the netavark backend. @@ -112,12 +107,12 @@ func NewNetworkInterface(conf *InitConfig) (types.ContainerNetwork, error) { return nil, err } - defaultNetworkName := conf.DefaultNetwork + defaultNetworkName := conf.Config.Network.DefaultNetwork if defaultNetworkName == "" { defaultNetworkName = types.DefaultNetworkName } - defaultSubnet := conf.DefaultSubnet + defaultSubnet := conf.Config.Network.DefaultSubnet if defaultSubnet == "" { defaultSubnet = types.DefaultSubnet } @@ -134,11 +129,19 @@ func NewNetworkInterface(conf *InitConfig) (types.ContainerNetwork, error) { return nil, err } - defaultSubnetPools := conf.DefaultsubnetPools + defaultSubnetPools := conf.Config.Network.DefaultSubnetPools if defaultSubnetPools == nil { defaultSubnetPools = config.DefaultSubnetPools } + var netns *rootlessnetns.Netns + if unshare.IsRootless() { + netns, err = rootlessnetns.New(conf.NetworkRunDir, rootlessnetns.Netavark, conf.Config) + if err != nil { + return nil, err + } + } + n := &netavarkNetwork{ networkConfigDir: conf.NetworkConfigDir, networkRunDir: conf.NetworkRunDir, @@ -146,13 +149,15 @@ func NewNetworkInterface(conf *InitConfig) (types.ContainerNetwork, error) { aardvarkBinary: conf.AardvarkBinary, networkRootless: unshare.IsRootless(), ipamDBPath: filepath.Join(conf.NetworkRunDir, "ipam.db"), + firewallDriver: conf.Config.Network.FirewallDriver, defaultNetwork: defaultNetworkName, defaultSubnet: defaultNet, defaultsubnetPools: defaultSubnetPools, - dnsBindPort: conf.DNSBindPort, - pluginDirs: conf.PluginDirs, + dnsBindPort: conf.Config.Network.DNSBindPort, + pluginDirs: conf.Config.Network.NetavarkPluginDirs.Get(), lock: lock, syslog: conf.Syslog, + rootlessNetns: netns, } return n, nil diff --git a/vendor/github.com/containers/common/libnetwork/netavark/run.go b/vendor/github.com/containers/common/libnetwork/netavark/run.go index 3df5ced0..d13e51ff 100644 --- a/vendor/github.com/containers/common/libnetwork/netavark/run.go +++ b/vendor/github.com/containers/common/libnetwork/netavark/run.go @@ -1,5 +1,4 @@ //go:build linux || freebsd -// +build linux freebsd package netavark @@ -11,8 +10,8 @@ import ( "github.com/containers/common/libnetwork/internal/util" "github.com/containers/common/libnetwork/types" - pkgutil "github.com/containers/common/pkg/util" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" ) type netavarkOptions struct { @@ -72,12 +71,24 @@ func (n *netavarkNetwork) Setup(namespacePath string, options types.SetupOptions } result := map[string]types.StatusBlock{} - err = n.execNetavark([]string{"setup", namespacePath}, needPlugin, netavarkOpts, &result) - if err != nil { - // lets dealloc ips to prevent leaking - if err := n.deallocIPs(&options.NetworkOptions); err != nil { - logrus.Error(err) + setup := func() error { + err := n.execNetavark([]string{"setup", namespacePath}, needPlugin, netavarkOpts, &result) + if err != nil { + // lets dealloc ips to prevent leaking + if err := n.deallocIPs(&options.NetworkOptions); err != nil { + logrus.Error(err) + } + return err } + return nil + } + + if n.rootlessNetns != nil { + err = n.rootlessNetns.Setup(len(options.Networks), setup) + } else { + err = setup() + } + if err != nil { return nil, err } @@ -112,7 +123,16 @@ func (n *netavarkNetwork) Teardown(namespacePath string, options types.TeardownO return fmt.Errorf("failed to convert net opts: %w", err) } - retErr := n.execNetavark([]string{"teardown", namespacePath}, needPlugin, netavarkOpts, nil) + var retErr error + teardown := func() error { + return n.execNetavark([]string{"teardown", namespacePath}, needPlugin, netavarkOpts, nil) + } + + if n.rootlessNetns != nil { + retErr = n.rootlessNetns.Teardown(len(options.Networks), teardown) + } else { + retErr = teardown() + } // when netavark returned an error we still free the used ips // otherwise we could end up in a state where block the ips forever @@ -154,9 +174,16 @@ func (n *netavarkNetwork) convertNetOpts(opts types.NetworkOptions) (*netavarkOp return nil, false, err } netavarkOptions.Networks[network] = net - if !pkgutil.StringInSlice(net.Driver, builtinDrivers) { + if !slices.Contains(builtinDrivers, net.Driver) { needsPlugin = true } } return &netavarkOptions, needsPlugin, nil } + +func (n *netavarkNetwork) RunInRootlessNetns(toRun func() error) error { + if n.rootlessNetns == nil { + return types.ErrNotRootlessNetns + } + return n.rootlessNetns.Run(n.lock, toRun) +} diff --git a/vendor/github.com/containers/common/libnetwork/network/interface.go b/vendor/github.com/containers/common/libnetwork/network/interface.go index aeac8d9c..51c1ae71 100644 --- a/vendor/github.com/containers/common/libnetwork/network/interface.go +++ b/vendor/github.com/containers/common/libnetwork/network/interface.go @@ -1,5 +1,4 @@ //go:build linux || freebsd -// +build linux freebsd package network @@ -9,13 +8,10 @@ import ( "os" "path/filepath" - "github.com/containers/common/libnetwork/cni" "github.com/containers/common/libnetwork/netavark" "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/config" - "github.com/containers/common/pkg/machine" "github.com/containers/storage" - "github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/unshare" "github.com/sirupsen/logrus" @@ -24,8 +20,6 @@ import ( const ( // defaultNetworkBackendFileName is the file name for sentinel file to store the backend defaultNetworkBackendFileName = "defaultNetworkBackend" - // cniConfigDirRootless is the directory in XDG_CONFIG_HOME for cni plugins - cniConfigDirRootless = "cni/net.d/" // netavarkBinary is the name of the netavark binary netavarkBinary = "netavark" @@ -53,153 +47,94 @@ func NetworkBackend(store storage.Store, conf *config.Config, syslog bool) (type } } - switch backend { - case types.Netavark: - netavarkBin, err := conf.FindHelperBinary(netavarkBinary, false) - if err != nil { - return "", nil, err - } + return backendFromType(backend, store, conf, syslog) +} - aardvarkBin, _ := conf.FindHelperBinary(aardvarkBinary, false) +func netavarkBackendFromConf(store storage.Store, conf *config.Config, syslog bool) (types.ContainerNetwork, error) { + netavarkBin, err := conf.FindHelperBinary(netavarkBinary, false) + if err != nil { + return nil, err + } - confDir := conf.Network.NetworkConfigDir - if confDir == "" { - confDir = getDefaultNetavarkConfigDir(store) - } + aardvarkBin, _ := conf.FindHelperBinary(aardvarkBinary, false) - // We cannot use the runroot for rootful since the network namespace is shared for all - // libpod instances they also have to share the same ipam db. - // For rootless we have our own network namespace per libpod instances, - // so this is not a problem there. - runDir := netavarkRunDir - if unshare.IsRootless() { - runDir = filepath.Join(store.RunRoot(), "networks") - } + confDir := conf.Network.NetworkConfigDir + if confDir == "" { + confDir = getDefaultNetavarkConfigDir(store) + } - netInt, err := netavark.NewNetworkInterface(&netavark.InitConfig{ - NetworkConfigDir: confDir, - NetworkRunDir: runDir, - NetavarkBinary: netavarkBin, - AardvarkBinary: aardvarkBin, - PluginDirs: conf.Network.NetavarkPluginDirs.Get(), - DefaultNetwork: conf.Network.DefaultNetwork, - DefaultSubnet: conf.Network.DefaultSubnet, - DefaultsubnetPools: conf.Network.DefaultSubnetPools, - DNSBindPort: conf.Network.DNSBindPort, - Syslog: syslog, - }) - return types.Netavark, netInt, err - case types.CNI: - netInt, err := getCniInterface(conf) - return types.CNI, netInt, err - - default: - return "", nil, fmt.Errorf("unsupported network backend %q, check network_backend in containers.conf", backend) + // We cannot use the runroot for rootful since the network namespace is shared for all + // libpod instances they also have to share the same ipam db. + // For rootless we have our own network namespace per libpod instances, + // so this is not a problem there. + runDir := netavarkRunDir + if unshare.IsRootless() { + runDir = filepath.Join(store.RunRoot(), "networks") } + + netInt, err := netavark.NewNetworkInterface(&netavark.InitConfig{ + Config: conf, + NetworkConfigDir: confDir, + NetworkRunDir: runDir, + NetavarkBinary: netavarkBin, + AardvarkBinary: aardvarkBin, + Syslog: syslog, + }) + return netInt, err } func defaultNetworkBackend(store storage.Store, conf *config.Config) (backend types.NetworkBackend, err error) { - // read defaultNetworkBackend file + err = nil + file := filepath.Join(store.GraphRoot(), defaultNetworkBackendFileName) - b, err := os.ReadFile(file) - if err == nil { - val := string(b) - if val == string(types.Netavark) { - return types.Netavark, nil - } - if val == string(types.CNI) { - return types.CNI, nil - } - return "", fmt.Errorf("unknown network backend value %q in %q", val, file) - } - // fail for all errors except ENOENT - if !errors.Is(err, os.ErrNotExist) { - return "", fmt.Errorf("could not read network backend value: %w", err) - } - // cache the network backend to make sure always the same one will be used - defer func() { + writeBackendToFile := func(backendT types.NetworkBackend) { // only write when there is no error if err == nil { - if err := ioutils.AtomicWriteFile(file, []byte(backend), 0o644); err != nil { + if err := ioutils.AtomicWriteFile(file, []byte(backendT), 0o644); err != nil { logrus.Errorf("could not write network backend to file: %v", err) } } - }() - - _, err = conf.FindHelperBinary("netavark", false) - if err != nil { - // if we cannot find netavark use CNI - return types.CNI, nil - } - - // If there are any containers then return CNI - cons, err := store.Containers() - if err != nil { - return "", err - } - if len(cons) != 0 { - return types.CNI, nil - } - - // If there are any non ReadOnly images then return CNI - imgs, err := store.Images() - if err != nil { - return "", err - } - for _, i := range imgs { - if !i.ReadOnly { - return types.CNI, nil - } } - // If there are CNI Networks then return CNI - cniInterface, err := getCniInterface(conf) + // read defaultNetworkBackend file + b, err := os.ReadFile(file) if err == nil { - nets, err := cniInterface.NetworkList() - // there is always a default network so check > 1 - if err != nil && !errors.Is(err, os.ErrNotExist) { - return "", err - } - - if len(nets) > 1 { - // we do not have a fresh system so use CNI - return types.CNI, nil - } - } - return types.Netavark, nil -} + val := string(b) -func getCniInterface(conf *config.Config) (types.ContainerNetwork, error) { - confDir := conf.Network.NetworkConfigDir - if confDir == "" { - var err error - confDir, err = getDefaultCNIConfigDir() - if err != nil { - return nil, err + // if the network backend has been already set previously, + // handle the values depending on whether CNI is supported and + // whether the network backend is explicitly configured + if val == string(types.Netavark) { + // netavark is always good + return types.Netavark, nil + } else if val == string(types.CNI) { + if cniSupported { + return types.CNI, nil + } + // the user has *not* configured a network + // backend explicitly but used CNI in the past + // => we upgrade them in this case to netavark only + writeBackendToFile(types.Netavark) + logrus.Info("Migrating network backend to netavark as no backend has been configured previously") + return types.Netavark, nil } + return "", fmt.Errorf("unknown network backend value %q in %q", val, file) } - return cni.NewCNINetworkInterface(&cni.InitConfig{ - CNIConfigDir: confDir, - CNIPluginDirs: conf.Network.CNIPluginDirs.Get(), - RunDir: conf.Engine.TmpDir, - DefaultNetwork: conf.Network.DefaultNetwork, - DefaultSubnet: conf.Network.DefaultSubnet, - DefaultsubnetPools: conf.Network.DefaultSubnetPools, - IsMachine: machine.IsGvProxyBased(), - }) -} -func getDefaultCNIConfigDir() (string, error) { - if !unshare.IsRootless() { - return cniConfigDir, nil + // fail for all errors except ENOENT + if !errors.Is(err, os.ErrNotExist) { + return "", fmt.Errorf("could not read network backend value: %w", err) } - configHome, err := homedir.GetConfigHome() + backend, err = networkBackendFromStore(store, conf) if err != nil { return "", err } - return filepath.Join(configHome, cniConfigDirRootless), nil + // cache the network backend to make sure always the same one will be used + writeBackendToFile(backend) + + return backend, nil } // getDefaultNetavarkConfigDir return the netavark config dir. For rootful it will diff --git a/vendor/github.com/containers/common/libnetwork/network/interface_cni.go b/vendor/github.com/containers/common/libnetwork/network/interface_cni.go new file mode 100644 index 00000000..0a8bc447 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/network/interface_cni.go @@ -0,0 +1,121 @@ +//go:build (linux || freebsd) && cni +// +build linux freebsd +// +build cni + +package network + +import ( + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/containers/common/libnetwork/cni" + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/config" + "github.com/containers/common/pkg/machine" + "github.com/containers/storage" + "github.com/containers/storage/pkg/homedir" + "github.com/containers/storage/pkg/unshare" +) + +const ( + // cniConfigDirRootless is the directory in XDG_CONFIG_HOME for cni plugins + cniConfigDirRootless = "cni/net.d/" + + cniSupported = true +) + +func getCniInterface(conf *config.Config) (types.ContainerNetwork, error) { + confDir := conf.Network.NetworkConfigDir + if confDir == "" { + var err error + confDir, err = getDefaultCNIConfigDir() + if err != nil { + return nil, err + } + } + return cni.NewCNINetworkInterface(&cni.InitConfig{ + Config: conf, + CNIConfigDir: confDir, + RunDir: conf.Engine.TmpDir, + IsMachine: machine.IsGvProxyBased(), + }) +} + +func getDefaultCNIConfigDir() (string, error) { + if !unshare.IsRootless() { + return cniConfigDir, nil + } + + configHome, err := homedir.GetConfigHome() + if err != nil { + return "", err + } + + return filepath.Join(configHome, cniConfigDirRootless), nil +} + +func networkBackendFromStore(store storage.Store, conf *config.Config) (backend types.NetworkBackend, err error) { + _, err = conf.FindHelperBinary("netavark", false) + if err != nil { + // if we cannot find netavark use CNI + return types.CNI, nil + } + + // If there are any containers then return CNI + cons, err := store.Containers() + if err != nil { + return "", err + } + if len(cons) != 0 { + return types.CNI, nil + } + + // If there are any non ReadOnly images then return CNI + imgs, err := store.Images() + if err != nil { + return "", err + } + for _, i := range imgs { + if !i.ReadOnly { + return types.CNI, nil + } + } + + // If there are CNI Networks then return CNI + cniInterface, err := getCniInterface(conf) + if err == nil { + nets, err := cniInterface.NetworkList() + // there is always a default network so check > 1 + if err != nil && !errors.Is(err, os.ErrNotExist) { + return "", err + } + + if len(nets) > 1 { + // we do not have a fresh system so use CNI + return types.CNI, nil + } + } + return types.Netavark, nil +} + +func backendFromType(backend types.NetworkBackend, store storage.Store, conf *config.Config, syslog bool) (types.NetworkBackend, types.ContainerNetwork, error) { + switch backend { + case types.Netavark: + netInt, err := netavarkBackendFromConf(store, conf, syslog) + if err != nil { + return "", nil, err + } + return types.Netavark, netInt, err + case types.CNI: + netInt, err := getCniInterface(conf) + if err != nil { + return "", nil, err + } + return types.CNI, netInt, err + + default: + return "", nil, fmt.Errorf("unsupported network backend %q, check network_backend in containers.conf", backend) + } +} diff --git a/vendor/github.com/containers/common/libnetwork/network/interface_cni_unsupported.go b/vendor/github.com/containers/common/libnetwork/network/interface_cni_unsupported.go new file mode 100644 index 00000000..2f4bb837 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/network/interface_cni_unsupported.go @@ -0,0 +1,32 @@ +//go:build (linux || freebsd) && !cni +// +build linux freebsd +// +build !cni + +package network + +import ( + "fmt" + + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/config" + "github.com/containers/storage" +) + +const ( + cniSupported = false +) + +func networkBackendFromStore(_store storage.Store, _conf *config.Config) (backend types.NetworkBackend, err error) { + return types.Netavark, nil +} + +func backendFromType(backend types.NetworkBackend, store storage.Store, conf *config.Config, syslog bool) (types.NetworkBackend, types.ContainerNetwork, error) { + if backend != types.Netavark { + return "", nil, fmt.Errorf("cni support is not enabled in this build, only netavark. Got unsupported network backend %q", backend) + } + cn, err := netavarkBackendFromConf(store, conf, syslog) + if err != nil { + return "", nil, err + } + return types.Netavark, cn, err +} diff --git a/vendor/github.com/containers/common/libnetwork/pasta/pasta.go b/vendor/github.com/containers/common/libnetwork/pasta/pasta.go index b787a781..0da7607f 100644 --- a/vendor/github.com/containers/common/libnetwork/pasta/pasta.go +++ b/vendor/github.com/containers/common/libnetwork/pasta/pasta.go @@ -47,6 +47,7 @@ func Setup(opts *SetupOptions) error { NoTCPNamespacePorts := true NoUDPNamespacePorts := true NoMapGW := true + NoDNS := true path, err := opts.Config.FindHelperBinary(BinaryName, true) if err != nil { @@ -62,7 +63,7 @@ func Setup(opts *SetupOptions) error { var addr string if i.HostIP != "" { - addr = fmt.Sprintf("%s/", i.HostIP) + addr = i.HostIP + "/" } switch protocol { @@ -102,6 +103,8 @@ func Setup(opts *SetupOptions) error { NoMapGW = false // not an actual pasta(1) option cmdArgs = append(cmdArgs[:i], cmdArgs[i+1:]...) + case "-D", "--dns", "--dns-forward": + NoDNS = false } } @@ -120,21 +123,36 @@ func Setup(opts *SetupOptions) error { if NoMapGW { cmdArgs = append(cmdArgs, "--no-map-gw") } + if NoDNS { + // disable pasta reading from /etc/resolv.conf which hides the + // "Couldn't get any nameserver address" warning when only + // localhost resolvers are configured. + cmdArgs = append(cmdArgs, "--dns", "none") + } - cmdArgs = append(cmdArgs, "--netns", opts.Netns) + // always pass --quiet to silence the info output from pasta + cmdArgs = append(cmdArgs, "--quiet", "--netns", opts.Netns) logrus.Debugf("pasta arguments: %s", strings.Join(cmdArgs, " ")) // pasta forks once ready, and quits once we delete the target namespace - _, err = exec.Command(path, cmdArgs...).Output() + out, err := exec.Command(path, cmdArgs...).CombinedOutput() if err != nil { exitErr := &exec.ExitError{} if errors.As(err, &exitErr) { return fmt.Errorf("pasta failed with exit code %d:\n%s", - exitErr.ExitCode(), exitErr.Stderr) + exitErr.ExitCode(), string(out)) } return fmt.Errorf("failed to start pasta: %w", err) } + if len(out) > 0 { + // TODO: This should be warning but right now pasta still prints + // things with --quiet that we do not care about. + // For now info is fine and we can bump it up later, it is only a + // nice to have. + logrus.Infof("pasta logged warnings: %q", string(out)) + } + return nil } diff --git a/vendor/github.com/containers/common/libnetwork/resolvconf/resolv.go b/vendor/github.com/containers/common/libnetwork/resolvconf/resolv.go index c451d3b4..472fb945 100644 --- a/vendor/github.com/containers/common/libnetwork/resolvconf/resolv.go +++ b/vendor/github.com/containers/common/libnetwork/resolvconf/resolv.go @@ -7,9 +7,9 @@ import ( "path/filepath" "strings" - "github.com/containers/common/pkg/util" "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" ) const ( @@ -111,7 +111,7 @@ func getDefaultResolvConf(params *Params) ([]byte, bool, error) { // unsetSearchDomainsIfNeeded removes the search domain when they contain a single dot as element. func unsetSearchDomainsIfNeeded(searches []string) []string { - if util.StringInSlice(".", searches) { + if slices.Contains(searches, ".") { return nil } return searches @@ -173,7 +173,7 @@ func Remove(path string, nameservers []string) error { oldNameservers := getNameservers(contents) newNameserver := make([]string, 0, len(oldNameservers)) for _, ns := range oldNameservers { - if !util.StringInSlice(ns, nameservers) { + if !slices.Contains(nameservers, ns) { newNameserver = append(newNameserver, ns) } } diff --git a/vendor/github.com/containers/common/libnetwork/slirp4netns/slirp4netns.go b/vendor/github.com/containers/common/libnetwork/slirp4netns/slirp4netns.go index 43ca9780..d3789341 100644 --- a/vendor/github.com/containers/common/libnetwork/slirp4netns/slirp4netns.go +++ b/vendor/github.com/containers/common/libnetwork/slirp4netns/slirp4netns.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package slirp4netns @@ -211,7 +210,7 @@ func createBasicSlirpCmdArgs(options *networkOptions, features *slirpFeatures) ( cmdArgs = append(cmdArgs, "--disable-host-loopback") } if options.mtu > -1 && features.HasMTU { - cmdArgs = append(cmdArgs, fmt.Sprintf("--mtu=%d", options.mtu)) + cmdArgs = append(cmdArgs, "--mtu="+strconv.Itoa(options.mtu)) } if !options.noPivotRoot && features.HasEnableSandbox { cmdArgs = append(cmdArgs, "--enable-sandbox") @@ -222,33 +221,33 @@ func createBasicSlirpCmdArgs(options *networkOptions, features *slirpFeatures) ( if options.cidr != "" { if !features.HasCIDR { - return nil, fmt.Errorf("cidr not supported") + return nil, errors.New("cidr not supported") } - cmdArgs = append(cmdArgs, fmt.Sprintf("--cidr=%s", options.cidr)) + cmdArgs = append(cmdArgs, "--cidr="+options.cidr) } if options.enableIPv6 { if !features.HasIPv6 { - return nil, fmt.Errorf("enable_ipv6 not supported") + return nil, errors.New("enable_ipv6 not supported") } cmdArgs = append(cmdArgs, "--enable-ipv6") } if options.outboundAddr != "" { if !features.HasOutboundAddr { - return nil, fmt.Errorf("outbound_addr not supported") + return nil, errors.New("outbound_addr not supported") } - cmdArgs = append(cmdArgs, fmt.Sprintf("--outbound-addr=%s", options.outboundAddr)) + cmdArgs = append(cmdArgs, "--outbound-addr="+options.outboundAddr) } if options.outboundAddr6 != "" { if !features.HasOutboundAddr || !features.HasIPv6 { - return nil, fmt.Errorf("outbound_addr6 not supported") + return nil, errors.New("outbound_addr6 not supported") } if !options.enableIPv6 { - return nil, fmt.Errorf("enable_ipv6=true is required for outbound_addr6") + return nil, errors.New("enable_ipv6=true is required for outbound_addr6") } - cmdArgs = append(cmdArgs, fmt.Sprintf("--outbound-addr6=%s", options.outboundAddr6)) + cmdArgs = append(cmdArgs, "--outbound-addr6="+options.outboundAddr6) } return cmdArgs, nil @@ -301,7 +300,7 @@ func Setup(opts *SetupOptions) (*SetupResult, error) { var apiSocket string if havePortMapping && netOptions.isSlirpHostForward { - apiSocket = filepath.Join(opts.Config.Engine.TmpDir, fmt.Sprintf("%s.net", opts.ContainerID)) + apiSocket = filepath.Join(opts.Config.Engine.TmpDir, opts.ContainerID+".net") cmdArgs = append(cmdArgs, "--api-socket", apiSocket) } @@ -611,7 +610,7 @@ func SetupRootlessPortMappingViaRLK(opts *SetupOptions, slirpSubnet *net.IPNet, if stdoutStr != "" { // err contains full debug log and too verbose, so return stdoutStr logrus.Debug(err) - return fmt.Errorf("rootlessport " + strings.TrimSuffix(stdoutStr, "\n")) + return errors.New("rootlessport " + strings.TrimSuffix(stdoutStr, "\n")) } return err } @@ -706,7 +705,7 @@ func openSlirp4netnsPort(apiSocket, proto, hostip string, hostport, guestport ui } // if there is no 'error' key in the received JSON data, then the operation was // successful. - var y map[string]interface{} + var y map[string]any if err := json.Unmarshal(buf[0:readLength], &y); err != nil { return fmt.Errorf("parsing error status from slirp4netns: %w", err) } diff --git a/vendor/github.com/containers/common/libnetwork/types/define.go b/vendor/github.com/containers/common/libnetwork/types/define.go index 6e91ccda..193377b1 100644 --- a/vendor/github.com/containers/common/libnetwork/types/define.go +++ b/vendor/github.com/containers/common/libnetwork/types/define.go @@ -18,6 +18,9 @@ var ( // exists. ErrNetworkExists = errors.New("network already exists") + // ErrNotRootlessNetns indicates the rootless netns can only be used as root + ErrNotRootlessNetns = errors.New("rootless netns cannot be used as root") + // NameRegex is a regular expression to validate names. // This must NOT be changed. NameRegex = regexp.Delayed("^[a-zA-Z0-9][a-zA-Z0-9_.-]*$") diff --git a/vendor/github.com/containers/common/libnetwork/types/network.go b/vendor/github.com/containers/common/libnetwork/types/network.go index 94087fd3..9e30975c 100644 --- a/vendor/github.com/containers/common/libnetwork/types/network.go +++ b/vendor/github.com/containers/common/libnetwork/types/network.go @@ -27,6 +27,10 @@ type ContainerNetwork interface { // Teardown will teardown the container network namespace. Teardown(namespacePath string, options TeardownOptions) error + // RunInRootlessNetns is used to run the given function in the rootless netns. + // Only used as rootless and should return an error as root. + RunInRootlessNetns(toRun func() error) error + // Drivers will return the list of supported network drivers // for this interface. Drivers() []string diff --git a/vendor/github.com/containers/common/libnetwork/util/filters.go b/vendor/github.com/containers/common/libnetwork/util/filters.go index 70f90918..faea9c1a 100644 --- a/vendor/github.com/containers/common/libnetwork/util/filters.go +++ b/vendor/github.com/containers/common/libnetwork/util/filters.go @@ -7,6 +7,7 @@ import ( "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/filters" "github.com/containers/common/pkg/util" + "golang.org/x/exp/slices" ) func GenerateNetworkFilters(f map[string][]string) ([]types.FilterFunc, error) { @@ -32,7 +33,7 @@ func createFilterFuncs(key string, filterValues []string) (types.FilterFunc, err case types.Driver: // matches network driver return func(net types.Network) bool { - return util.StringInSlice(net.Driver, filterValues) + return slices.Contains(filterValues, net.Driver) }, nil case "id": diff --git a/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go index 435422c2..5cbb6ba9 100644 --- a/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go +++ b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go @@ -1,5 +1,4 @@ //go:build linux && apparmor -// +build linux,apparmor package apparmor diff --git a/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux_template.go b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux_template.go index 99e0e14e..b8212542 100644 --- a/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux_template.go +++ b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux_template.go @@ -1,5 +1,4 @@ //go:build linux && apparmor -// +build linux,apparmor package apparmor diff --git a/vendor/github.com/containers/common/pkg/apparmor/apparmor_unsupported.go b/vendor/github.com/containers/common/pkg/apparmor/apparmor_unsupported.go index dacfc2f4..6c1ce46d 100644 --- a/vendor/github.com/containers/common/pkg/apparmor/apparmor_unsupported.go +++ b/vendor/github.com/containers/common/pkg/apparmor/apparmor_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux || !apparmor -// +build !linux !apparmor package apparmor diff --git a/vendor/github.com/containers/common/pkg/auth/auth.go b/vendor/github.com/containers/common/pkg/auth/auth.go index 6536d0f2..3b6a05f0 100644 --- a/vendor/github.com/containers/common/pkg/auth/auth.go +++ b/vendor/github.com/containers/common/pkg/auth/auth.go @@ -285,7 +285,7 @@ func getUserAndPass(opts *LoginOptions, password, userFromAuthFile string) (user username := opts.Username if username == "" { if opts.Stdin == nil { - return "", "", fmt.Errorf("cannot prompt for username without stdin") + return "", "", errors.New("cannot prompt for username without stdin") } if userFromAuthFile != "" { diff --git a/vendor/github.com/containers/common/pkg/capabilities/capabilities.go b/vendor/github.com/containers/common/pkg/capabilities/capabilities.go index 3bf25e08..e0b0ac95 100644 --- a/vendor/github.com/containers/common/pkg/capabilities/capabilities.go +++ b/vendor/github.com/containers/common/pkg/capabilities/capabilities.go @@ -13,6 +13,7 @@ import ( "sync" "github.com/syndtr/gocapability/capability" + "golang.org/x/exp/slices" ) var ( @@ -54,16 +55,6 @@ func init() { } } -// stringInSlice determines if a string is in a string slice, returns bool -func stringInSlice(s string, sl []string) bool { - for _, i := range sl { - if i == s { - return true - } - } - return false -} - var ( boundingSetOnce sync.Once boundingSetRet []string @@ -115,7 +106,7 @@ func NormalizeCapabilities(caps []string) ([]string, error) { if !strings.HasPrefix(c, "CAP_") { c = "CAP_" + c } - if !stringInSlice(c, capabilityList) { + if !slices.Contains(capabilityList, c) { return nil, fmt.Errorf("%q: %w", c, ErrUnknownCapability) } normalized = append(normalized, c) @@ -127,7 +118,7 @@ func NormalizeCapabilities(caps []string) ([]string, error) { // ValidateCapabilities validates if caps only contains valid capabilities. func ValidateCapabilities(caps []string) error { for _, c := range caps { - if !stringInSlice(c, capabilityList) { + if !slices.Contains(capabilityList, c) { return fmt.Errorf("%q: %w", c, ErrUnknownCapability) } } @@ -159,8 +150,8 @@ func MergeCapabilities(base, adds, drops []string) ([]string, error) { return nil, err } - if stringInSlice(All, capDrop) { - if stringInSlice(All, capAdd) { + if slices.Contains(capDrop, All) { + if slices.Contains(capAdd, All) { return nil, errors.New("adding all caps and removing all caps not allowed") } // "Drop" all capabilities; return what's in capAdd instead @@ -168,7 +159,7 @@ func MergeCapabilities(base, adds, drops []string) ([]string, error) { return capAdd, nil } - if stringInSlice(All, capAdd) { + if slices.Contains(capAdd, All) { base, err = BoundingSet() if err != nil { return nil, err @@ -176,14 +167,14 @@ func MergeCapabilities(base, adds, drops []string) ([]string, error) { capAdd = []string{} } else { for _, add := range capAdd { - if stringInSlice(add, capDrop) { + if slices.Contains(capDrop, add) { return nil, fmt.Errorf("capability %q cannot be dropped and added", add) } } } for _, drop := range capDrop { - if stringInSlice(drop, capAdd) { + if slices.Contains(capAdd, drop) { return nil, fmt.Errorf("capability %q cannot be dropped and added", drop) } } @@ -191,7 +182,7 @@ func MergeCapabilities(base, adds, drops []string) ([]string, error) { caps := make([]string, 0, len(base)+len(capAdd)) // Drop any capabilities in capDrop that are in base for _, cap := range base { - if stringInSlice(cap, capDrop) { + if slices.Contains(capDrop, cap) { continue } caps = append(caps, cap) @@ -199,7 +190,7 @@ func MergeCapabilities(base, adds, drops []string) ([]string, error) { // Add any capabilities in capAdd that are not in base for _, cap := range capAdd { - if stringInSlice(cap, base) { + if slices.Contains(base, cap) { continue } caps = append(caps, cap) diff --git a/vendor/github.com/containers/common/pkg/cgroups/blkio_linux.go b/vendor/github.com/containers/common/pkg/cgroups/blkio_linux.go index dd18ed56..246f2c23 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/blkio_linux.go +++ b/vendor/github.com/containers/common/pkg/cgroups/blkio_linux.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package cgroups diff --git a/vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go b/vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go index 7605b500..3b553692 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go +++ b/vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package cgroups @@ -14,6 +13,9 @@ import ( "path/filepath" "strconv" "strings" + "sync" + "syscall" + "time" "github.com/containers/storage/pkg/unshare" systemdDbus "github.com/coreos/go-systemd/v22/dbus" @@ -22,6 +24,8 @@ import ( "github.com/opencontainers/runc/libcontainer/cgroups/fs2" "github.com/opencontainers/runc/libcontainer/configs" "github.com/sirupsen/logrus" + "golang.org/x/exp/maps" + "golang.org/x/sys/unix" ) var ( @@ -30,6 +34,10 @@ var ( // ErrCgroupV1Rootless means the cgroup v1 were attempted to be used in rootless environment ErrCgroupV1Rootless = errors.New("no support for CGroups V1 in rootless environments") ErrStatCgroup = errors.New("no cgroup available for gathering user statistics") + + isUnifiedOnce sync.Once + isUnified bool + isUnifiedErr error ) // CgroupControl controls a cgroup hierarchy @@ -73,12 +81,13 @@ const ( var handlers map[string]controllerHandler func init() { - handlers = make(map[string]controllerHandler) - handlers[CPU] = getCPUHandler() - handlers[CPUset] = getCpusetHandler() - handlers[Memory] = getMemoryHandler() - handlers[Pids] = getPidsHandler() - handlers[Blkio] = getBlkioHandler() + handlers = map[string]controllerHandler{ + CPU: getCPUHandler(), + CPUset: getCpusetHandler(), + Memory: getMemoryHandler(), + Pids: getPidsHandler(), + Blkio: getBlkioHandler(), + } } // getAvailableControllers get the available controllers @@ -94,7 +103,7 @@ func getAvailableControllers(exclude map[string]controllerHandler, cgroup2 bool) } // userSlice already contains '/' so not adding here basePath := cgroupRoot + userSlice - controllersFile = fmt.Sprintf("%s/cgroup.controllers", basePath) + controllersFile = basePath + "/cgroup.controllers" } controllersFileBytes, err := os.ReadFile(controllersFile) if err != nil { @@ -380,7 +389,7 @@ func Load(path string) (*CgroupControl, error) { // CreateSystemdUnit creates the systemd cgroup func (c *CgroupControl) CreateSystemdUnit(path string) error { if !c.systemd { - return fmt.Errorf("the cgroup controller is not using systemd") + return errors.New("the cgroup controller is not using systemd") } conn, err := systemdDbus.NewWithContext(context.TODO()) @@ -395,7 +404,7 @@ func (c *CgroupControl) CreateSystemdUnit(path string) error { // CreateSystemdUserUnit creates the systemd cgroup for the specified user func (c *CgroupControl) CreateSystemdUserUnit(path string, uid int) error { if !c.systemd { - return fmt.Errorf("the cgroup controller is not using systemd") + return errors.New("the cgroup controller is not using systemd") } conn, err := UserConnection(uid) @@ -492,10 +501,7 @@ func (c *CgroupControl) AddPid(pid int) error { return fs2.CreateCgroupPath(path, c.config) } - names := make([]string, 0, len(handlers)) - for n := range handlers { - names = append(names, n) - } + names := maps.Keys(handlers) for _, c := range c.additionalControllers { if !c.symlink { @@ -672,7 +678,7 @@ func cpusetCopyFileFromParent(dir, file string, cgroupv2 bool) ([]byte, error) { path := filepath.Join(dir, file) parentPath := path if cgroupv2 { - parentPath = fmt.Sprintf("%s.effective", parentPath) + parentPath += ".effective" } data, err := os.ReadFile(parentPath) if err != nil { @@ -733,3 +739,139 @@ func SystemCPUUsage() (uint64, error) { } return total, nil } + +// IsCgroup2UnifiedMode returns whether we are running in cgroup 2 cgroup2 mode. +func IsCgroup2UnifiedMode() (bool, error) { + isUnifiedOnce.Do(func() { + var st syscall.Statfs_t + if err := syscall.Statfs("/sys/fs/cgroup", &st); err != nil { + isUnified, isUnifiedErr = false, err + } else { + isUnified, isUnifiedErr = st.Type == unix.CGROUP2_SUPER_MAGIC, nil + } + }) + return isUnified, isUnifiedErr +} + +// UserConnection returns an user connection to D-BUS +func UserConnection(uid int) (*systemdDbus.Conn, error) { + return systemdDbus.NewConnection(func() (*dbus.Conn, error) { + return dbusAuthConnection(uid, dbus.SessionBusPrivateNoAutoStartup) + }) +} + +// UserOwnsCurrentSystemdCgroup checks whether the current EUID owns the +// current cgroup. +func UserOwnsCurrentSystemdCgroup() (bool, error) { + uid := os.Geteuid() + + cgroup2, err := IsCgroup2UnifiedMode() + if err != nil { + return false, err + } + + f, err := os.Open("/proc/self/cgroup") + if err != nil { + return false, err + } + defer f.Close() + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + parts := strings.SplitN(line, ":", 3) + + if len(parts) < 3 { + continue + } + + var cgroupPath string + + if cgroup2 { + cgroupPath = filepath.Join(cgroupRoot, parts[2]) + } else { + if parts[1] != "name=systemd" { + continue + } + cgroupPath = filepath.Join(cgroupRoot, "systemd", parts[2]) + } + + st, err := os.Stat(cgroupPath) + if err != nil { + return false, err + } + s := st.Sys() + if s == nil { + return false, fmt.Errorf("stat cgroup path %s", cgroupPath) + } + + if int(s.(*syscall.Stat_t).Uid) != uid { + return false, nil + } + } + if err := scanner.Err(); err != nil { + return false, fmt.Errorf("parsing file /proc/self/cgroup: %w", err) + } + return true, nil +} + +// rmDirRecursively delete recursively a cgroup directory. +// It differs from os.RemoveAll as it doesn't attempt to unlink files. +// On cgroupfs we are allowed only to rmdir empty directories. +func rmDirRecursively(path string) error { + killProcesses := func(signal syscall.Signal) { + if signal == unix.SIGKILL { + if err := os.WriteFile(filepath.Join(path, "cgroup.kill"), []byte("1"), 0o600); err == nil { + return + } + } + // kill all the processes that are still part of the cgroup + if procs, err := os.ReadFile(filepath.Join(path, "cgroup.procs")); err == nil { + for _, pidS := range strings.Split(string(procs), "\n") { + if pid, err := strconv.Atoi(pidS); err == nil { + _ = unix.Kill(pid, signal) + } + } + } + } + + if err := os.Remove(path); err == nil || errors.Is(err, os.ErrNotExist) { + return nil + } + entries, err := os.ReadDir(path) + if err != nil { + return err + } + for _, i := range entries { + if i.IsDir() { + if err := rmDirRecursively(filepath.Join(path, i.Name())); err != nil { + return err + } + } + } + + attempts := 0 + for { + err := os.Remove(path) + if err == nil || errors.Is(err, os.ErrNotExist) { + return nil + } + if errors.Is(err, unix.EBUSY) { + // send a SIGTERM after 3 second + if attempts == 300 { + killProcesses(unix.SIGTERM) + } + // send SIGKILL after 8 seconds + if attempts == 800 { + killProcesses(unix.SIGKILL) + } + // give up after 10 seconds + if attempts < 1000 { + time.Sleep(time.Millisecond * 10) + attempts++ + continue + } + } + return fmt.Errorf("remove %s: %w", path, err) + } +} diff --git a/vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go b/vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go deleted file mode 100644 index 5c0cac64..00000000 --- a/vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go +++ /dev/null @@ -1,163 +0,0 @@ -//go:build linux -// +build linux - -package cgroups - -import ( - "bufio" - "errors" - "fmt" - "os" - "path/filepath" - "strconv" - "strings" - "sync" - "syscall" - "time" - - systemdDbus "github.com/coreos/go-systemd/v22/dbus" - "github.com/godbus/dbus/v5" - "golang.org/x/sys/unix" -) - -var ( - isUnifiedOnce sync.Once - isUnified bool - isUnifiedErr error -) - -// IsCgroup2UnifiedMode returns whether we are running in cgroup 2 cgroup2 mode. -func IsCgroup2UnifiedMode() (bool, error) { - isUnifiedOnce.Do(func() { - var st syscall.Statfs_t - if err := syscall.Statfs("/sys/fs/cgroup", &st); err != nil { - isUnified, isUnifiedErr = false, err - } else { - isUnified, isUnifiedErr = st.Type == unix.CGROUP2_SUPER_MAGIC, nil - } - }) - return isUnified, isUnifiedErr -} - -// UserConnection returns an user connection to D-BUS -func UserConnection(uid int) (*systemdDbus.Conn, error) { - return systemdDbus.NewConnection(func() (*dbus.Conn, error) { - return dbusAuthConnection(uid, dbus.SessionBusPrivateNoAutoStartup) - }) -} - -// UserOwnsCurrentSystemdCgroup checks whether the current EUID owns the -// current cgroup. -func UserOwnsCurrentSystemdCgroup() (bool, error) { - uid := os.Geteuid() - - cgroup2, err := IsCgroup2UnifiedMode() - if err != nil { - return false, err - } - - f, err := os.Open("/proc/self/cgroup") - if err != nil { - return false, err - } - defer f.Close() - - scanner := bufio.NewScanner(f) - for scanner.Scan() { - line := scanner.Text() - parts := strings.SplitN(line, ":", 3) - - if len(parts) < 3 { - continue - } - - var cgroupPath string - - if cgroup2 { - cgroupPath = filepath.Join(cgroupRoot, parts[2]) - } else { - if parts[1] != "name=systemd" { - continue - } - cgroupPath = filepath.Join(cgroupRoot, "systemd", parts[2]) - } - - st, err := os.Stat(cgroupPath) - if err != nil { - return false, err - } - s := st.Sys() - if s == nil { - return false, fmt.Errorf("stat cgroup path %s", cgroupPath) - } - - if int(s.(*syscall.Stat_t).Uid) != uid { - return false, nil - } - } - if err := scanner.Err(); err != nil { - return false, fmt.Errorf("parsing file /proc/self/cgroup: %w", err) - } - return true, nil -} - -// rmDirRecursively delete recursively a cgroup directory. -// It differs from os.RemoveAll as it doesn't attempt to unlink files. -// On cgroupfs we are allowed only to rmdir empty directories. -func rmDirRecursively(path string) error { - killProcesses := func(signal syscall.Signal) { - if signal == unix.SIGKILL { - if err := os.WriteFile(filepath.Join(path, "cgroup.kill"), []byte("1"), 0o600); err == nil { - return - } - } - // kill all the processes that are still part of the cgroup - if procs, err := os.ReadFile(filepath.Join(path, "cgroup.procs")); err == nil { - for _, pidS := range strings.Split(string(procs), "\n") { - if pid, err := strconv.Atoi(pidS); err == nil { - _ = unix.Kill(pid, signal) - } - } - } - } - - if err := os.Remove(path); err == nil || errors.Is(err, os.ErrNotExist) { - return nil - } - entries, err := os.ReadDir(path) - if err != nil { - return err - } - for _, i := range entries { - if i.IsDir() { - if err := rmDirRecursively(filepath.Join(path, i.Name())); err != nil { - return err - } - } - } - - attempts := 0 - for { - err := os.Remove(path) - if err == nil || errors.Is(err, os.ErrNotExist) { - return nil - } - if errors.Is(err, unix.EBUSY) { - // send a SIGTERM after 3 second - if attempts == 300 { - killProcesses(unix.SIGTERM) - } - // send SIGKILL after 8 seconds - if attempts == 800 { - killProcesses(unix.SIGKILL) - } - // give up after 10 seconds - if attempts < 1000 { - time.Sleep(time.Millisecond * 10) - attempts++ - continue - } - } - return fmt.Errorf("remove %s: %w", path, err) - } -} diff --git a/vendor/github.com/containers/common/pkg/cgroups/cgroups_unsupported.go b/vendor/github.com/containers/common/pkg/cgroups/cgroups_unsupported.go index f2558728..16029121 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/cgroups_unsupported.go +++ b/vendor/github.com/containers/common/pkg/cgroups/cgroups_unsupported.go @@ -1,13 +1,9 @@ //go:build !linux -// +build !linux package cgroups import ( - "fmt" "os" - - systemdDbus "github.com/coreos/go-systemd/v22/dbus" ) // IsCgroup2UnifiedMode returns whether we are running in cgroup 2 cgroup2 mode. @@ -24,8 +20,3 @@ func UserOwnsCurrentSystemdCgroup() (bool, error) { func rmDirRecursively(path string) error { return os.RemoveAll(path) } - -// UserConnection returns an user connection to D-BUS -func UserConnection(uid int) (*systemdDbus.Conn, error) { - return nil, fmt.Errorf("systemd d-bus is not supported on this platform") -} diff --git a/vendor/github.com/containers/common/pkg/cgroups/cpu_linux.go b/vendor/github.com/containers/common/pkg/cgroups/cpu_linux.go index 4931be6e..c10df796 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/cpu_linux.go +++ b/vendor/github.com/containers/common/pkg/cgroups/cpu_linux.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package cgroups diff --git a/vendor/github.com/containers/common/pkg/cgroups/cpuset_linux.go b/vendor/github.com/containers/common/pkg/cgroups/cpuset_linux.go index c55c7686..28b46fb2 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/cpuset_linux.go +++ b/vendor/github.com/containers/common/pkg/cgroups/cpuset_linux.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package cgroups diff --git a/vendor/github.com/containers/common/pkg/cgroups/memory_linux.go b/vendor/github.com/containers/common/pkg/cgroups/memory_linux.go index 3335cdff..b82945c5 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/memory_linux.go +++ b/vendor/github.com/containers/common/pkg/cgroups/memory_linux.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package cgroups diff --git a/vendor/github.com/containers/common/pkg/cgroups/pids_linux.go b/vendor/github.com/containers/common/pkg/cgroups/pids_linux.go index a8163ce4..61ee8539 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/pids_linux.go +++ b/vendor/github.com/containers/common/pkg/cgroups/pids_linux.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package cgroups diff --git a/vendor/github.com/containers/common/pkg/cgroups/systemd.go b/vendor/github.com/containers/common/pkg/cgroups/systemd.go deleted file mode 100644 index 80a7bde2..00000000 --- a/vendor/github.com/containers/common/pkg/cgroups/systemd.go +++ /dev/null @@ -1,81 +0,0 @@ -//go:build !linux -// +build !linux - -package cgroups - -import ( - "context" - "fmt" - "path/filepath" - "strings" - - systemdDbus "github.com/coreos/go-systemd/v22/dbus" - "github.com/godbus/dbus/v5" -) - -func systemdCreate(path string, c *systemdDbus.Conn) error { - slice, name := filepath.Split(path) - slice = strings.TrimSuffix(slice, "/") - - var lastError error - for i := 0; i < 2; i++ { - properties := []systemdDbus.Property{ - systemdDbus.PropDescription(fmt.Sprintf("cgroup %s", name)), - systemdDbus.PropWants(slice), - } - pMap := map[string]bool{ - "DefaultDependencies": false, - "MemoryAccounting": true, - "CPUAccounting": true, - "BlockIOAccounting": true, - } - if i == 0 { - pMap["Delegate"] = true - } - for k, v := range pMap { - p := systemdDbus.Property{ - Name: k, - Value: dbus.MakeVariant(v), - } - properties = append(properties, p) - } - - ch := make(chan string) - _, err := c.StartTransientUnitContext(context.TODO(), name, "replace", properties, ch) - if err != nil { - lastError = err - continue - } - <-ch - return nil - } - return lastError -} - -/* -systemdDestroyConn is copied from containerd/cgroups/systemd.go file, that -has the following license: -Copyright The containerd Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -func systemdDestroyConn(path string, c *systemdDbus.Conn) error { - name := filepath.Base(path) - - ch := make(chan string) - _, err := c.StopUnitContext(context.TODO(), name, "replace", ch) - if err != nil { - return err - } - <-ch - return nil -} diff --git a/vendor/github.com/containers/common/pkg/cgroups/systemd_linux.go b/vendor/github.com/containers/common/pkg/cgroups/systemd_linux.go index 906c716d..cde2d596 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/systemd_linux.go +++ b/vendor/github.com/containers/common/pkg/cgroups/systemd_linux.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package cgroups @@ -26,7 +25,7 @@ func systemdCreate(resources *configs.Resources, path string, c *systemdDbus.Con var lastError error for i := 0; i < 2; i++ { properties := []systemdDbus.Property{ - systemdDbus.PropDescription(fmt.Sprintf("cgroup %s", name)), + systemdDbus.PropDescription("cgroup " + name), systemdDbus.PropWants(slice), } var ioString string diff --git a/vendor/github.com/containers/common/pkg/cgroups/utils_linux.go b/vendor/github.com/containers/common/pkg/cgroups/utils_linux.go index ed9f0761..be9d1158 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/utils_linux.go +++ b/vendor/github.com/containers/common/pkg/cgroups/utils_linux.go @@ -1,9 +1,9 @@ //go:build linux -// +build linux package cgroups import ( + "bufio" "bytes" "errors" "fmt" @@ -11,6 +11,7 @@ import ( "path" "path/filepath" "strings" + "sync" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/configs" @@ -143,3 +144,171 @@ func SetBlkioThrottle(res *configs.Resources, cgroupPath string) error { } return nil } + +// Code below was moved from podman/utils/utils_supported.go and should properly better +// integrated here as some parts may be redundant. + +func getCgroupProcess(procFile string, allowRoot bool) (string, error) { + f, err := os.Open(procFile) + if err != nil { + return "", err + } + defer f.Close() + + scanner := bufio.NewScanner(f) + cgroup := "" + for scanner.Scan() { + line := scanner.Text() + parts := strings.SplitN(line, ":", 3) + if len(parts) != 3 { + return "", fmt.Errorf("cannot parse cgroup line %q", line) + } + if strings.HasPrefix(line, "0::") { + cgroup = line[3:] + break + } + if len(parts[2]) > len(cgroup) { + cgroup = parts[2] + } + } + if len(cgroup) == 0 || (!allowRoot && cgroup == "/") { + return "", fmt.Errorf("could not find cgroup mount in %q", procFile) + } + return cgroup, nil +} + +// GetOwnCgroup returns the cgroup for the current process. +func GetOwnCgroup() (string, error) { + return getCgroupProcess("/proc/self/cgroup", true) +} + +func GetOwnCgroupDisallowRoot() (string, error) { + return getCgroupProcess("/proc/self/cgroup", false) +} + +// GetCgroupProcess returns the cgroup for the specified process process. +func GetCgroupProcess(pid int) (string, error) { + return getCgroupProcess(fmt.Sprintf("/proc/%d/cgroup", pid), true) +} + +// MoveUnderCgroupSubtree moves the PID under a cgroup subtree. +func MoveUnderCgroupSubtree(subtree string) error { + return MoveUnderCgroup("", subtree, nil) +} + +// MoveUnderCgroup moves a group of processes to a new cgroup. +// If cgroup is the empty string, then the current calling process cgroup is used. +// If processes is empty, then the processes from the current cgroup are moved. +func MoveUnderCgroup(cgroup, subtree string, processes []uint32) error { + procFile := "/proc/self/cgroup" + f, err := os.Open(procFile) + if err != nil { + return err + } + defer f.Close() + + unifiedMode, err := IsCgroup2UnifiedMode() + if err != nil { + return err + } + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + parts := strings.SplitN(line, ":", 3) + if len(parts) != 3 { + return fmt.Errorf("cannot parse cgroup line %q", line) + } + + // root cgroup, skip it + if parts[2] == "/" && !(unifiedMode && parts[1] == "") { + continue + } + + cgroupRoot := "/sys/fs/cgroup" + // Special case the unified mount on hybrid cgroup and named hierarchies. + // This works on Fedora 31, but we should really parse the mounts to see + // where the cgroup hierarchy is mounted. + if parts[1] == "" && !unifiedMode { + // If it is not using unified mode, the cgroup v2 hierarchy is + // usually mounted under /sys/fs/cgroup/unified + cgroupRoot = filepath.Join(cgroupRoot, "unified") + + // Ignore the unified mount if it doesn't exist + if _, err := os.Stat(cgroupRoot); err != nil && os.IsNotExist(err) { + continue + } + } else if parts[1] != "" { + // Assume the controller is mounted at /sys/fs/cgroup/$CONTROLLER. + controller := strings.TrimPrefix(parts[1], "name=") + cgroupRoot = filepath.Join(cgroupRoot, controller) + } + + parentCgroup := cgroup + if parentCgroup == "" { + parentCgroup = parts[2] + } + newCgroup := filepath.Join(cgroupRoot, parentCgroup, subtree) + if err := os.MkdirAll(newCgroup, 0o755); err != nil && !os.IsExist(err) { + return err + } + + f, err := os.OpenFile(filepath.Join(newCgroup, "cgroup.procs"), os.O_RDWR, 0o755) + if err != nil { + return err + } + defer f.Close() + + if len(processes) > 0 { + for _, pid := range processes { + if _, err := f.WriteString(fmt.Sprintf("%d\n", pid)); err != nil { + logrus.Debugf("Cannot move process %d to cgroup %q: %v", pid, newCgroup, err) + } + } + } else { + processesData, err := os.ReadFile(filepath.Join(cgroupRoot, parts[2], "cgroup.procs")) + if err != nil { + return err + } + for _, pid := range bytes.Split(processesData, []byte("\n")) { + if len(pid) == 0 { + continue + } + if _, err := f.Write(pid); err != nil { + logrus.Debugf("Cannot move process %s to cgroup %q: %v", string(pid), newCgroup, err) + } + } + } + } + return nil +} + +var ( + maybeMoveToSubCgroupSync sync.Once + maybeMoveToSubCgroupSyncErr error +) + +// MaybeMoveToSubCgroup moves the current process in a sub cgroup when +// it is running in the root cgroup on a system that uses cgroupv2. +func MaybeMoveToSubCgroup() error { + maybeMoveToSubCgroupSync.Do(func() { + unifiedMode, err := IsCgroup2UnifiedMode() + if err != nil { + maybeMoveToSubCgroupSyncErr = err + return + } + if !unifiedMode { + maybeMoveToSubCgroupSyncErr = nil + return + } + cgroup, err := GetOwnCgroup() + if err != nil { + maybeMoveToSubCgroupSyncErr = err + return + } + if cgroup == "/" { + maybeMoveToSubCgroupSyncErr = MoveUnderCgroupSubtree("init") + } + }) + return maybeMoveToSubCgroupSyncErr +} diff --git a/vendor/github.com/containers/common/pkg/cgroupv2/cgroups_unsupported.go b/vendor/github.com/containers/common/pkg/cgroupv2/cgroups_unsupported.go index f61bd3bb..56269aa4 100644 --- a/vendor/github.com/containers/common/pkg/cgroupv2/cgroups_unsupported.go +++ b/vendor/github.com/containers/common/pkg/cgroupv2/cgroups_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package cgroupv2 diff --git a/vendor/github.com/containers/common/pkg/chown/chown_unix.go b/vendor/github.com/containers/common/pkg/chown/chown_unix.go index be4b8cfa..b598ac92 100644 --- a/vendor/github.com/containers/common/pkg/chown/chown_unix.go +++ b/vendor/github.com/containers/common/pkg/chown/chown_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package chown diff --git a/vendor/github.com/containers/common/pkg/config/config.go b/vendor/github.com/containers/common/pkg/config/config.go index 75b917f0..15c91860 100644 --- a/vendor/github.com/containers/common/pkg/config/config.go +++ b/vendor/github.com/containers/common/pkg/config/config.go @@ -9,16 +9,14 @@ import ( "runtime" "strings" - "github.com/BurntSushi/toml" "github.com/containers/common/internal/attributedstring" "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/capabilities" - "github.com/containers/common/pkg/util" - "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/unshare" units "github.com/docker/go-units" selinux "github.com/opencontainers/selinux/go-selinux" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" ) const ( @@ -31,7 +29,7 @@ const ( bindirPrefix = "$BINDIR" ) -var validImageVolumeModes = []string{_typeBind, "tmpfs", "ignore"} +var validImageVolumeModes = []string{"anonymous", "tmpfs", "ignore"} // ProxyEnv is a list of Proxy Environment variables var ProxyEnv = []string{ @@ -154,6 +152,13 @@ type ContainersConfig struct { // Deprecated: Do not use this field directly use conf.FindInitBinary() instead. InitPath string `toml:"init_path,omitempty"` + // InterfaceName tells container runtimes how to set interface names + // inside containers. + // The only valid value at the moment is "device" that indicates the + // interface name should be set as the network_interface name from + // the network config. + InterfaceName string `toml:"interface_name,omitempty"` + // IPCNS way to create a ipc namespace for the container IPCNS string `toml:"ipcns,omitempty"` @@ -365,11 +370,6 @@ type EngineConfig struct { // LockType is the type of locking to use. LockType string `toml:"lock_type,omitempty"` - // MachineEnabled indicates if Podman is running in a podman-machine VM - // - // This method is soft deprecated, use machine.IsPodmanMachine instead - MachineEnabled bool `toml:"machine_enabled,omitempty"` - // MultiImageArchive - if true, the container engine allows for storing // archives (e.g., of the docker-archive transport) with multiple // images. By default, Podman creates single-image archives. @@ -416,6 +416,14 @@ type EngineConfig struct { // Indicates whether the application should be running in Remote mode Remote bool `toml:"remote,omitempty"` + // Number of times to retry pulling/pushing images in case of failure + Retry uint `toml:"retry,omitempty"` + + // Delay between retries in case pulling/pushing image fails + // If set, container engines will retry at the set interval, + // otherwise they delay 2 seconds and then exponentially back off. + RetryDelay string `toml:"retry_delay,omitempty"` + // RemoteURI is deprecated, see ActiveService // RemoteURI containers connection information used to connect to remote system. RemoteURI string `toml:"remote_uri,omitempty"` @@ -567,6 +575,9 @@ type NetworkConfig struct { // NetavarkPluginDirs is a list of directories which contain netavark plugins. NetavarkPluginDirs attributedstring.Slice `toml:"netavark_plugin_dirs,omitempty"` + // FirewallDriver is the firewall driver to be used + FirewallDriver string `toml:"firewall_driver,omitempty"` + // DefaultNetwork is the network name of the default network // to attach pods to. DefaultNetwork string `toml:"default_network,omitempty"` @@ -652,14 +663,16 @@ type MachineConfig struct { Volumes attributedstring.Slice `toml:"volumes,omitempty"` // Provider is the virtualization provider used to run podman-machine VM Provider string `toml:"provider,omitempty"` + // Rosetta is the flag to enable Rosetta in the podman-machine VM on Apple Silicon + Rosetta bool `toml:"rosetta,omitempty"` } // FarmConfig represents the "farm" TOML config tables type FarmConfig struct { // Default is the default farm to be used when farming out builds - Default string `toml:"default,omitempty"` + Default string `json:",omitempty" toml:"default,omitempty"` // List is a map of farms created where key=farm-name and value=list of connections - List map[string][]string `toml:"list,omitempty"` + List map[string][]string `json:",omitempty" toml:"list,omitempty"` } // Destination represents destination for remote service @@ -668,10 +681,10 @@ type Destination struct { URI string `toml:"uri"` // Identity file with ssh key, optional - Identity string `toml:"identity,omitempty"` + Identity string `json:",omitempty" toml:"identity,omitempty"` // isMachine describes if the remote destination is a machine. - IsMachine bool `toml:"is_machine,omitempty"` + IsMachine bool `json:",omitempty" toml:"is_machine,omitempty"` } // Consumes container image's os and arch and returns if any dedicated runtime was @@ -811,6 +824,10 @@ func (c *ContainersConfig) Validate() error { return err } + if err := c.validateInterfaceName(); err != nil { + return err + } + if err := c.validateTZ(); err != nil { return err } @@ -915,7 +932,7 @@ func (c *Config) GetDefaultEnvEx(envHost, httpProxy bool) []string { } // Capabilities returns the capabilities parses the Add and Drop capability -// list from the default capabiltiies for the container +// list from the default capabilities for the container func (c *Config) Capabilities(user string, addCapabilities, dropCapabilities []string) ([]string, error) { userNotRoot := func(user string) bool { if user == "" || user == "root" || user == "0" { @@ -994,82 +1011,6 @@ func IsValidDeviceMode(mode string) bool { return true } -func rootlessConfigPath() (string, error) { - if configHome := os.Getenv("XDG_CONFIG_HOME"); configHome != "" { - return filepath.Join(configHome, _configPath), nil - } - home, err := unshare.HomeDir() - if err != nil { - return "", err - } - - return filepath.Join(home, UserOverrideContainersConfig), nil -} - -func Path() string { - if path := os.Getenv("CONTAINERS_CONF"); path != "" { - return path - } - if unshare.IsRootless() { - if rpath, err := rootlessConfigPath(); err == nil { - return rpath - } - return "$HOME/" + UserOverrideContainersConfig - } - return OverrideContainersConfig -} - -// ReadCustomConfig reads the custom config and only generates a config based on it -// If the custom config file does not exists, function will return an empty config -func ReadCustomConfig() (*Config, error) { - path, err := customConfigFile() - if err != nil { - return nil, err - } - newConfig := &Config{} - if _, err := os.Stat(path); err == nil { - if err := readConfigFromFile(path, newConfig); err != nil { - return nil, err - } - } else { - if !errors.Is(err, os.ErrNotExist) { - return nil, err - } - } - // Let's always initialize the farm list so it is never nil - if newConfig.Farms.List == nil { - newConfig.Farms.List = make(map[string][]string) - } - return newConfig, nil -} - -// Write writes the configuration to the default file -func (c *Config) Write() error { - var err error - path, err := customConfigFile() - if err != nil { - return err - } - if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil { - return err - } - - opts := &ioutils.AtomicFileWriterOptions{ExplicitCommit: true} - configFile, err := ioutils.NewAtomicFileWriterWithOpts(path, 0o644, opts) - if err != nil { - return err - } - defer configFile.Close() - - enc := toml.NewEncoder(configFile) - if err := enc.Encode(c); err != nil { - return err - } - - // If no errors commit the changes to the config file - return configFile.Commit() -} - // Reload clean the cached config and reloads the configuration from containers.conf files // This function is meant to be used for long-running processes that need to reload potential changes made to // the cached containers.conf files. @@ -1225,7 +1166,7 @@ func ValidateImageVolumeMode(mode string) error { if mode == "" { return nil } - if util.StringInSlice(mode, validImageVolumeModes) { + if slices.Contains(validImageVolumeModes, mode) { return nil } @@ -1242,7 +1183,7 @@ func (c *Config) FindInitBinary() (string, error) { if c.Engine.InitPath != "" { return c.Engine.InitPath, nil } - // keep old default working to guarantee backwards comapt + // keep old default working to guarantee backwards compat if _, err := os.Stat(DefaultInitPath); err == nil { return DefaultInitPath, nil } diff --git a/vendor/github.com/containers/common/pkg/config/config_darwin.go b/vendor/github.com/containers/common/pkg/config/config_darwin.go index 1b40e2ba..9982d799 100644 --- a/vendor/github.com/containers/common/pkg/config/config_darwin.go +++ b/vendor/github.com/containers/common/pkg/config/config_darwin.go @@ -1,9 +1,5 @@ package config -import ( - "os" -) - const ( // OverrideContainersConfig holds the default config path overridden by the root user OverrideContainersConfig = "/etc/" + _configPath @@ -14,23 +10,8 @@ const ( // DefaultSignaturePolicyPath is the default value for the // policy.json file. DefaultSignaturePolicyPath = "/etc/containers/policy.json" - - // Mount type for mounting host dir - _typeBind = "bind" ) -// podman remote clients on darwin cannot use unshare.isRootless() to determine the configuration file locations. -func customConfigFile() (string, error) { - if path, found := os.LookupEnv("CONTAINERS_CONF"); found { - return path, nil - } - return rootlessConfigPath() -} - -func ifRootlessConfigPath() (string, error) { - return rootlessConfigPath() -} - var defaultHelperBinariesDir = []string{ // Relative to the binary directory "$BINDIR/../libexec/podman", diff --git a/vendor/github.com/containers/common/pkg/config/config_freebsd.go b/vendor/github.com/containers/common/pkg/config/config_freebsd.go index 48bb0994..5b7f55a7 100644 --- a/vendor/github.com/containers/common/pkg/config/config_freebsd.go +++ b/vendor/github.com/containers/common/pkg/config/config_freebsd.go @@ -1,9 +1,5 @@ package config -import ( - "os" -) - const ( // OverrideContainersConfig holds the default config path overridden by the root user OverrideContainersConfig = "/usr/local/etc/" + _configPath @@ -14,23 +10,8 @@ const ( // DefaultSignaturePolicyPath is the default value for the // policy.json file. DefaultSignaturePolicyPath = "/usr/local/etc/containers/policy.json" - - // Mount type for mounting host dir - _typeBind = "nullfs" ) -// podman remote clients on freebsd cannot use unshare.isRootless() to determine the configuration file locations. -func customConfigFile() (string, error) { - if path, found := os.LookupEnv("CONTAINERS_CONF"); found { - return path, nil - } - return rootlessConfigPath() -} - -func ifRootlessConfigPath() (string, error) { - return rootlessConfigPath() -} - var defaultHelperBinariesDir = []string{ "/usr/local/bin", "/usr/local/libexec/podman", diff --git a/vendor/github.com/containers/common/pkg/config/config_linux.go b/vendor/github.com/containers/common/pkg/config/config_linux.go index f294402a..66d19346 100644 --- a/vendor/github.com/containers/common/pkg/config/config_linux.go +++ b/vendor/github.com/containers/common/pkg/config/config_linux.go @@ -1,9 +1,6 @@ package config import ( - "os" - - "github.com/containers/storage/pkg/unshare" selinux "github.com/opencontainers/selinux/go-selinux" ) @@ -17,40 +14,12 @@ const ( // DefaultSignaturePolicyPath is the default value for the // policy.json file. DefaultSignaturePolicyPath = "/etc/containers/policy.json" - - // Mount type for mounting host dir - _typeBind = "bind" ) func selinuxEnabled() bool { return selinux.GetEnabled() } -func customConfigFile() (string, error) { - if path, found := os.LookupEnv("CONTAINERS_CONF"); found { - return path, nil - } - if unshare.GetRootlessUID() > 0 { - path, err := rootlessConfigPath() - if err != nil { - return "", err - } - return path, nil - } - return OverrideContainersConfig, nil -} - -func ifRootlessConfigPath() (string, error) { - if unshare.GetRootlessUID() > 0 { - path, err := rootlessConfigPath() - if err != nil { - return "", err - } - return path, nil - } - return "", nil -} - var defaultHelperBinariesDir = []string{ "/usr/local/libexec/podman", "/usr/local/lib/podman", diff --git a/vendor/github.com/containers/common/pkg/config/config_local.go b/vendor/github.com/containers/common/pkg/config/config_local.go index dae3ea0d..e9826d62 100644 --- a/vendor/github.com/containers/common/pkg/config/config_local.go +++ b/vendor/github.com/containers/common/pkg/config/config_local.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package config @@ -43,6 +42,14 @@ func (c *ContainersConfig) validateDevices() error { return nil } +func (c *ContainersConfig) validateInterfaceName() error { + if c.InterfaceName == "device" || c.InterfaceName == "" { + return nil + } + + return fmt.Errorf("invalid interface_name option %s", c.InterfaceName) +} + func (c *ContainersConfig) validateUlimits() error { for _, u := range c.DefaultUlimits.Get() { ul, err := units.ParseUlimit(u) @@ -67,6 +74,13 @@ func (c *ContainersConfig) validateTZ() error { "/etc/zoneinfo", } + // Allow using TZDIR to override the lookupPaths. Ref: + // https://sourceware.org/git/?p=glibc.git;a=blob;f=time/tzfile.c;h=8a923d0cccc927a106dc3e3c641be310893bab4e;hb=HEAD#l149 + tzdir := os.Getenv("TZDIR") + if tzdir != "" { + lookupPaths = []string{tzdir} + } + for _, paths := range lookupPaths { zonePath := filepath.Join(paths, c.TZ) if _, err := os.Stat(zonePath); err == nil { diff --git a/vendor/github.com/containers/common/pkg/config/config_remote.go b/vendor/github.com/containers/common/pkg/config/config_remote.go index bff869ef..f7b3d1a0 100644 --- a/vendor/github.com/containers/common/pkg/config/config_remote.go +++ b/vendor/github.com/containers/common/pkg/config/config_remote.go @@ -1,5 +1,4 @@ //go:build remote -// +build remote package config @@ -21,6 +20,10 @@ func (c *ContainersConfig) validateDevices() error { return nil } +func (c *ContainersConfig) validateInterfaceName() error { + return nil +} + func (c *ContainersConfig) validateUlimits() error { return nil } diff --git a/vendor/github.com/containers/common/pkg/config/config_unix.go b/vendor/github.com/containers/common/pkg/config/config_unix.go new file mode 100644 index 00000000..bd165278 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/config_unix.go @@ -0,0 +1,25 @@ +//go:build !windows + +package config + +import ( + "os" + "path/filepath" + + "github.com/containers/storage/pkg/unshare" +) + +// userConfigPath returns the path to the users local config that is +// not shared with other users. It uses $XDG_CONFIG_HOME/containers... +// if set or $HOME/.config/containers... if not. +func userConfigPath() (string, error) { + if configHome := os.Getenv("XDG_CONFIG_HOME"); configHome != "" { + return filepath.Join(configHome, _configPath), nil + } + home, err := unshare.HomeDir() + if err != nil { + return "", err + } + + return filepath.Join(home, UserOverrideContainersConfig), nil +} diff --git a/vendor/github.com/containers/common/pkg/config/config_unsupported.go b/vendor/github.com/containers/common/pkg/config/config_unsupported.go index 64e4fcfc..341225f1 100644 --- a/vendor/github.com/containers/common/pkg/config/config_unsupported.go +++ b/vendor/github.com/containers/common/pkg/config/config_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package config diff --git a/vendor/github.com/containers/common/pkg/config/config_windows.go b/vendor/github.com/containers/common/pkg/config/config_windows.go index 67f0aab2..9011687e 100644 --- a/vendor/github.com/containers/common/pkg/config/config_windows.go +++ b/vendor/github.com/containers/common/pkg/config/config_windows.go @@ -17,15 +17,9 @@ const ( _typeBind = "bind" ) -// podman remote clients on windows cannot use unshare.isRootless() to determine the configuration file locations. -func customConfigFile() (string, error) { - if path, found := os.LookupEnv("CONTAINERS_CONF"); found { - return path, nil - } - return os.Getenv("APPDATA") + "\\containers\\containers.conf", nil -} - -func ifRootlessConfigPath() (string, error) { +// userConfigPath returns the path to the users local config that is +// not shared with other users. It uses $APPDATA/containers... +func userConfigPath() (string, error) { return os.Getenv("APPDATA") + "\\containers\\containers.conf", nil } diff --git a/vendor/github.com/containers/common/pkg/config/connections.go b/vendor/github.com/containers/common/pkg/config/connections.go new file mode 100644 index 00000000..d7c2c7d8 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/connections.go @@ -0,0 +1,312 @@ +package config + +import ( + "encoding/json" + "errors" + "fmt" + "io/fs" + "os" + "path/filepath" + + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/lockfile" +) + +const connectionsFile = "podman-connections.json" + +// connectionsConfigFile returns the path to the rw connections config file +func connectionsConfigFile() (string, error) { + if path, found := os.LookupEnv("PODMAN_CONNECTIONS_CONF"); found { + return path, nil + } + path, err := userConfigPath() + if err != nil { + return "", err + } + // file is stored next to containers.conf + return filepath.Join(filepath.Dir(path), connectionsFile), nil +} + +type ConnectionConfig struct { + Default string `json:",omitempty"` + Connections map[string]Destination `json:",omitempty"` +} + +type ConnectionsFile struct { + Connection ConnectionConfig `json:",omitempty"` + Farm FarmConfig `json:",omitempty"` +} + +type Connection struct { + // Name of the connection + Name string + + // Destination for this connection + Destination + + // Default if this connection is the default + Default bool + + // ReadWrite if true the connection is stored in the connections file + ReadWrite bool +} + +type Farm struct { + // Name of the farm + Name string + + // Connections + Connections []string + + // Default if this is the default farm + Default bool + + // ReadWrite if true the farm is stored in the connections file + ReadWrite bool +} + +func readConnectionConf(path string) (*ConnectionsFile, error) { + conf := new(ConnectionsFile) + f, err := os.Open(path) + if err != nil { + // return empty config if file does not exists + if errors.Is(err, fs.ErrNotExist) { + return conf, nil + } + + return nil, err + } + defer f.Close() + + err = json.NewDecoder(f).Decode(conf) + if err != nil { + return nil, fmt.Errorf("parse %q: %w", path, err) + } + return conf, nil +} + +func writeConnectionConf(path string, conf *ConnectionsFile) error { + if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil { + return err + } + + opts := &ioutils.AtomicFileWriterOptions{ExplicitCommit: true} + configFile, err := ioutils.NewAtomicFileWriterWithOpts(path, 0o644, opts) + if err != nil { + return err + } + defer configFile.Close() + + err = json.NewEncoder(configFile).Encode(conf) + if err != nil { + return err + } + + // If no errors commit the changes to the config file + return configFile.Commit() +} + +// EditConnectionConfig must be used to edit the connections config. +// The function will read and write the file automatically and the +// callback function just needs to modify the cfg as needed. +func EditConnectionConfig(callback func(cfg *ConnectionsFile) error) error { + path, err := connectionsConfigFile() + if err != nil { + return err + } + + lockPath := path + ".lock" + lock, err := lockfile.GetLockFile(lockPath) + if err != nil { + return fmt.Errorf("obtain lock file: %w", err) + } + lock.Lock() + defer lock.Unlock() + + conf, err := readConnectionConf(path) + if err != nil { + return fmt.Errorf("read connections file: %w", err) + } + if conf.Farm.List == nil { + conf.Farm.List = make(map[string][]string) + } + + if err := callback(conf); err != nil { + return err + } + + return writeConnectionConf(path, conf) +} + +func makeConnection(name string, dst Destination, def, readWrite bool) *Connection { + return &Connection{ + Name: name, + Destination: dst, + Default: def, + ReadWrite: readWrite, + } +} + +// GetConnection return the connection for the given name or if def is set to true then return the default connection. +func (c *Config) GetConnection(name string, def bool) (*Connection, error) { + path, err := connectionsConfigFile() + if err != nil { + return nil, err + } + conConf, err := readConnectionConf(path) + if err != nil { + return nil, err + } + defaultCon := conConf.Connection.Default + if defaultCon == "" { + defaultCon = c.Engine.ActiveService + } + if def { + if defaultCon == "" { + return nil, errors.New("no default connection found") + } + name = defaultCon + } else { + def = defaultCon == name + } + + if dst, ok := conConf.Connection.Connections[name]; ok { + return makeConnection(name, dst, def, true), nil + } + if dst, ok := c.Engine.ServiceDestinations[name]; ok { + return makeConnection(name, dst, def, false), nil + } + return nil, fmt.Errorf("connection %q not found", name) +} + +// GetAllConnections return all configured connections +func (c *Config) GetAllConnections() ([]Connection, error) { + path, err := connectionsConfigFile() + if err != nil { + return nil, err + } + conConf, err := readConnectionConf(path) + if err != nil { + return nil, err + } + + defaultCon := conConf.Connection.Default + if defaultCon == "" { + defaultCon = c.Engine.ActiveService + } + + connections := make([]Connection, 0, len(conConf.Connection.Connections)) + for name, dst := range conConf.Connection.Connections { + def := defaultCon == name + connections = append(connections, *makeConnection(name, dst, def, true)) + } + for name, dst := range c.Engine.ServiceDestinations { + if _, ok := conConf.Connection.Connections[name]; ok { + // connection name is overwritten by connections file + continue + } + def := defaultCon == name + connections = append(connections, *makeConnection(name, dst, def, false)) + } + + return connections, nil +} + +func getConnections(cons []string, dests map[string]Destination) ([]Connection, error) { + connections := make([]Connection, 0, len(cons)) + for _, name := range cons { + if dst, ok := dests[name]; ok { + connections = append(connections, *makeConnection(name, dst, false, false)) + } else { + return nil, fmt.Errorf("connection %q not found", name) + } + } + return connections, nil +} + +// GetFarmConnections return all the connections for the given farm. +func (c *Config) GetFarmConnections(name string) ([]Connection, error) { + _, cons, err := c.getFarmConnections(name, false) + return cons, err +} + +// GetDefaultFarmConnections returns the name of the default farm +// and the connections. +func (c *Config) GetDefaultFarmConnections() (string, []Connection, error) { + return c.getFarmConnections("", true) +} + +// getFarmConnections returns all connections for the given farm, +// if def is true it will use the default farm instead of the name. +// Returns the name of the farm and the connections for it. +func (c *Config) getFarmConnections(name string, def bool) (string, []Connection, error) { + path, err := connectionsConfigFile() + if err != nil { + return "", nil, err + } + conConf, err := readConnectionConf(path) + if err != nil { + return "", nil, err + } + defaultFarm := conConf.Farm.Default + if defaultFarm == "" { + defaultFarm = c.Farms.Default + } + if def { + if defaultFarm == "" { + return "", nil, errors.New("no default farm found") + } + name = defaultFarm + } + + if cons, ok := conConf.Farm.List[name]; ok { + cons, err := getConnections(cons, conConf.Connection.Connections) + return name, cons, err + } + if cons, ok := c.Farms.List[name]; ok { + cons, err := getConnections(cons, c.Engine.ServiceDestinations) + return name, cons, err + } + return "", nil, fmt.Errorf("farm %q not found", name) +} + +func makeFarm(name string, cons []string, def, readWrite bool) Farm { + return Farm{ + Name: name, + Connections: cons, + Default: def, + ReadWrite: readWrite, + } +} + +// GetAllFarms returns all configured farms +func (c *Config) GetAllFarms() ([]Farm, error) { + path, err := connectionsConfigFile() + if err != nil { + return nil, err + } + conConf, err := readConnectionConf(path) + if err != nil { + return nil, err + } + defaultFarm := conConf.Farm.Default + if defaultFarm == "" { + defaultFarm = c.Farms.Default + } + + farms := make([]Farm, 0, len(conConf.Farm.List)) + for name, cons := range conConf.Farm.List { + def := defaultFarm == name + farms = append(farms, makeFarm(name, cons, def, true)) + } + for name, cons := range c.Farms.List { + if _, ok := conConf.Farm.List[name]; ok { + // farm name is overwritten by connections file + continue + } + def := defaultFarm == name + farms = append(farms, makeFarm(name, cons, def, false)) + } + + return farms, nil +} diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf b/vendor/github.com/containers/common/pkg/config/containers.conf index 8c532f07..6e0044f6 100644 --- a/vendor/github.com/containers/common/pkg/config/containers.conf +++ b/vendor/github.com/containers/common/pkg/config/containers.conf @@ -10,7 +10,8 @@ # locations in the following order: # 1. /usr/share/containers/containers.conf # 2. /etc/containers/containers.conf -# 3. $HOME/.config/containers/containers.conf (Rootless containers ONLY) +# 3. $XDG_CONFIG_HOME/containers/containers.conf or +# $HOME/.config/containers/containers.conf if $XDG_CONFIG_HOME is not set # Items specified in the latter containers.conf, if they exist, override the # previous containers.conf settings, or the default settings. @@ -164,6 +165,13 @@ default_sysctls = [ # #ipcns = "shareable" +# Default way to set an interface name inside container. Defaults to legacy +# pattern of ethX, where X is a integer, when left undefined. +# Options are: +# "device" Uses the network_interface name from the network config as interface name. +# Falls back to the ethX pattern if the network_interface is not set. +#interface_name = "" + # keyring tells the container engine whether to create # a kernel keyring for use within the container. # @@ -340,6 +348,14 @@ default_sysctls = [ # "/usr/lib/netavark", #] +# The firewall driver to be used by netavark. +# The default is empty which means netavark will pick one accordingly. Current supported +# drivers are "iptables", "none" (no firewall rules will be created) and "firewalld" (firewalld is +# experimental at the moment and not recommend outside of testing). In the future we are +# planning to add support for a "nftables" driver. +#firewall_driver = "" + + # The network name of the default network to attach pods to. # #default_network = "podman" @@ -368,9 +384,9 @@ default_sysctls = [ # Configure which rootless network program to use by default. Valid options are -# `slirp4netns` (default) and `pasta`. +# `slirp4netns` and `pasta` (default). # -#default_rootless_network_cmd = "slirp4netns" +#default_rootless_network_cmd = "pasta" # Path to the directory where network configuration files are located. # For the CNI backend the default is "/etc/cni/net.d" as root @@ -419,6 +435,9 @@ default_sysctls = [ # The compression format to use when pushing an image. # Valid options are: `gzip`, `zstd` and `zstd:chunked`. +# This field is ignored when pushing images to the docker-daemon and +# docker-archive formats. It is also ignored when the manifest format is set +# to v2s2. # #compression_format = "gzip" @@ -541,7 +560,7 @@ default_sysctls = [ #image_parallel_copies = 0 # Tells container engines how to handle the built-in image volumes. -# * bind: An anonymous named volume will be created and mounted +# * anonymous: An anonymous named volume will be created and mounted # into the container. # * tmpfs: The volume is mounted onto the container as a tmpfs, # which allows users to create content that disappears when @@ -620,7 +639,8 @@ default_sysctls = [ # #no_pivot_root = false -# Number of locks available for containers and pods. +# Number of locks available for containers, pods, and volumes. Each container, +# pod, and volume consumes 1 lock for as long as it exists. # If this is changed, a lock renumber must be performed (e.g. with the # 'podman system renumber' command). # @@ -639,6 +659,16 @@ default_sysctls = [ # #remote = false +# Number of times to retry pulling/pushing images in case of failure +# +#retry = 3 + +# Delay between retries in case pulling/pushing image fails. +# If set, container engines will retry at the set interval, +# otherwise they delay 2 seconds and then exponentially back off. +# +#retry_delay = "2s" + # Default OCI runtime # #runtime = "crun" @@ -729,6 +759,15 @@ default_sysctls = [ # "/run/current-system/sw/bin/crun", #] +#crun-vm = [ +# "/usr/bin/crun-vm", +# "/usr/local/bin/crun-vm", +# "/usr/local/sbin/crun-vm", +# "/sbin/crun-vm", +# "/bin/crun-vm", +# "/run/current-system/sw/bin/crun-vm", +#] + #kata = [ # "/usr/bin/kata-runtime", # "/usr/sbin/kata-runtime", @@ -784,16 +823,15 @@ default_sysctls = [ # #disk_size=10 -# Default image URI when creating a new VM using `podman machine init`. -# Options: On Linux/Mac, `testing`, `stable`, `next`. On Windows, the major -# version of the OS (e.g `36`) for Fedora 36. For all platforms you can -# alternatively specify a custom download URL to an image. Container engines -# translate URIs $OS and $ARCH to the native OS and ARCH. URI -# "https://example.com/$OS/$ARCH/foobar.ami" becomes +# Default Image used when creating a new VM using `podman machine init`. +# Can be specified as registry with a bootable OCI artifact, download URL, or a local path. +# Registry target must be in the form of `docker://registry/repo/image:version`. +# Container engines translate URIs $OS and $ARCH to the native OS and ARCH. +# URI "https://example.com/$OS/$ARCH/foobar.ami" would become # "https://example.com/linux/amd64/foobar.ami" on a Linux AMD machine. -# The default value is `testing`. +# If unspecified, the default Podman machine image will be used. # -#image = "testing" +#image = "" # Memory in MB a machine is created with. # @@ -818,6 +856,11 @@ default_sysctls = [ # #provider = "" +# Rosetta supports running x86_64 Linux binaries on a Podman machine on Apple silicon. +# The default value is `true`. Supported on AppleHV(arm64) machines only. +# +#rosetta=true + # The [machine] table MUST be the last entry in this file. # (Unless another table is added) # TOML does not provide a way to end a table other than a further table being diff --git a/vendor/github.com/containers/common/pkg/config/default.go b/vendor/github.com/containers/common/pkg/config/default.go index ea2ede1c..b08f1659 100644 --- a/vendor/github.com/containers/common/pkg/config/default.go +++ b/vendor/github.com/containers/common/pkg/config/default.go @@ -13,7 +13,6 @@ import ( nettypes "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/apparmor" "github.com/containers/common/pkg/cgroupv2" - "github.com/containers/common/pkg/util" "github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/unshare" "github.com/containers/storage/types" @@ -30,7 +29,7 @@ const ( _defaultTransport = "docker://" // _defaultImageVolumeMode is a mode to handle built-in image volumes. - _defaultImageVolumeMode = _typeBind + _defaultImageVolumeMode = "anonymous" // defaultInitName is the default name of the init binary defaultInitName = "catatonit" @@ -196,7 +195,9 @@ func defaultConfig() (*Config, error) { } defaultEngineConfig.SignaturePolicyPath = DefaultSignaturePolicyPath - if useUserConfigLocations() { + // NOTE: For now we want Windows to use system locations. + // GetRootlessUID == -1 on Windows, so exclude negative range + if unshare.GetRootlessUID() > 0 { configHome, err := homedir.GetConfigHome() if err != nil { return nil, err @@ -252,10 +253,11 @@ func defaultConfig() (*Config, error) { Volumes: attributedstring.Slice{}, }, Network: NetworkConfig{ + FirewallDriver: "", DefaultNetwork: "podman", DefaultSubnet: DefaultSubnet, DefaultSubnetPools: DefaultSubnetPools, - DefaultRootlessNetworkCmd: "slirp4netns", + DefaultRootlessNetworkCmd: "pasta", DNSBindPort: 0, CNIPluginDirs: attributedstring.NewSlice(DefaultCNIPluginDirs), NetavarkPluginDirs: attributedstring.NewSlice(DefaultNetavarkPluginDirs), @@ -284,18 +286,21 @@ func defaultMachineConfig() MachineConfig { return MachineConfig{ CPUs: uint64(cpus), DiskSize: 100, - Image: getDefaultMachineImage(), - Memory: 2048, - User: getDefaultMachineUser(), - Volumes: attributedstring.NewSlice(getDefaultMachineVolumes()), + // TODO: Set machine image default here + // Currently the default is set in Podman as we need time to stabilize + // VM images and locations between different providers. + Image: "", + Memory: 2048, + User: getDefaultMachineUser(), + Volumes: attributedstring.NewSlice(getDefaultMachineVolumes()), + Rosetta: true, } } // defaultFarmConfig returns the default farms configuration. func defaultFarmConfig() FarmConfig { - emptyList := make(map[string][]string) return FarmConfig{ - List: emptyList, + List: map[string][]string{}, } } @@ -320,7 +325,7 @@ func defaultEngineConfig() (*EngineConfig, error) { return nil, err } } - storeOpts, err := types.DefaultStoreOptions(useUserConfigLocations(), unshare.GetRootlessUID()) + storeOpts, err := types.DefaultStoreOptions() if err != nil { return nil, err } @@ -338,7 +343,7 @@ func defaultEngineConfig() (*EngineConfig, error) { c.HelperBinariesDir.Set(defaultHelperBinariesDir) if additionalHelperBinariesDir != "" { - // Prioritize addtionalHelperBinariesDir over defaults. + // Prioritize additionalHelperBinariesDir over defaults. c.HelperBinariesDir.Set(append([]string{additionalHelperBinariesDir}, c.HelperBinariesDir.Get()...)) } c.HooksDir.Set(DefaultHooksDirs) @@ -353,6 +358,7 @@ func defaultEngineConfig() (*EngineConfig, error) { c.PodmanshTimeout = uint(30) c.ExitCommandDelay = uint(5 * 60) c.Remote = isRemote() + c.Retry = 3 c.OCIRuntimes = map[string][]string{ "crun": { "/usr/bin/crun", @@ -363,6 +369,14 @@ func defaultEngineConfig() (*EngineConfig, error) { "/bin/crun", "/run/current-system/sw/bin/crun", }, + "crun-vm": { + "/usr/bin/crun-vm", + "/usr/local/bin/crun-vm", + "/usr/local/sbin/crun-vm", + "/sbin/crun-vm", + "/bin/crun-vm", + "/run/current-system/sw/bin/crun-vm", + }, "crun-wasm": { "/usr/bin/crun-wasm", "/usr/sbin/crun-wasm", @@ -470,7 +484,6 @@ func defaultEngineConfig() (*EngineConfig, error) { // TODO - ideally we should expose a `type LockType string` along with // constants. c.LockType = getDefaultLockType() - c.MachineEnabled = false c.ChownCopiedFiles = true c.PodExitPolicy = defaultPodExitPolicy @@ -481,11 +494,14 @@ func defaultEngineConfig() (*EngineConfig, error) { } func defaultTmpDir() (string, error) { - if !useUserConfigLocations() { + // NOTE: For now we want Windows to use system locations. + // GetRootlessUID == -1 on Windows, so exclude negative range + rootless := unshare.GetRootlessUID() > 0 + if !rootless { return getLibpodTmpDir(), nil } - runtimeDir, err := util.GetRuntimeDir() + runtimeDir, err := homedir.GetRuntimeDir() if err != nil { return "", err } @@ -515,13 +531,13 @@ func (c EngineConfig) EventsLogMaxSize() uint64 { func (c *Config) SecurityOptions() []string { securityOpts := []string{} if c.Containers.SeccompProfile != "" && c.Containers.SeccompProfile != SeccompDefaultPath { - securityOpts = append(securityOpts, fmt.Sprintf("seccomp=%s", c.Containers.SeccompProfile)) + securityOpts = append(securityOpts, "seccomp="+c.Containers.SeccompProfile) } if apparmor.IsEnabled() && c.Containers.ApparmorProfile != "" { - securityOpts = append(securityOpts, fmt.Sprintf("apparmor=%s", c.Containers.ApparmorProfile)) + securityOpts = append(securityOpts, "apparmor="+c.Containers.ApparmorProfile) } if selinux.GetEnabled() && !c.Containers.EnableLabeling { - securityOpts = append(securityOpts, fmt.Sprintf("label=%s", selinux.DisableSecOpt()[0])) + securityOpts = append(securityOpts, "label="+selinux.DisableSecOpt()[0]) } return securityOpts } @@ -551,7 +567,7 @@ func (c *Config) DNSServers() []string { return c.Containers.DNSServers.Get() } -// DNSSerches returns the default DNS searches to add to resolv.conf in containers. +// DNSSearches returns the default DNS searches to add to resolv.conf in containers. func (c *Config) DNSSearches() []string { return c.Containers.DNSSearches.Get() } @@ -636,11 +652,6 @@ func (c *Config) LogDriver() string { return c.Containers.LogDriver } -// MachineEnabled returns if podman is running inside a VM or not. -func (c *Config) MachineEnabled() bool { - return c.Engine.MachineEnabled -} - // MachineVolumes returns volumes to mount into the VM. func (c *Config) MachineVolumes() ([]string, error) { return machineVolumes(c.Machine.Volumes.Get()) @@ -670,18 +681,6 @@ func getDefaultSSHConfig() string { return filepath.Join(dirname, ".ssh", "config") } -func useUserConfigLocations() bool { - // NOTE: For now we want Windows to use system locations. - // GetRootlessUID == -1 on Windows, so exclude negative range - return unshare.GetRootlessUID() > 0 -} - -// getDefaultImage returns the default machine image stream -// On Windows this refers to the Fedora major release number -func getDefaultMachineImage() string { - return "testing" -} - // getDefaultMachineUser returns the user to use for rootless podman // This is only for the apple, hyperv, and qemu implementations. // WSL's user will be hardcoded in podman to "user" diff --git a/vendor/github.com/containers/common/pkg/config/default_common.go b/vendor/github.com/containers/common/pkg/config/default_common.go index f6546104..2caa3f01 100644 --- a/vendor/github.com/containers/common/pkg/config/default_common.go +++ b/vendor/github.com/containers/common/pkg/config/default_common.go @@ -1,5 +1,4 @@ //go:build !freebsd -// +build !freebsd package config diff --git a/vendor/github.com/containers/common/pkg/config/default_unsupported.go b/vendor/github.com/containers/common/pkg/config/default_unsupported.go index 0d427a05..46653e39 100644 --- a/vendor/github.com/containers/common/pkg/config/default_unsupported.go +++ b/vendor/github.com/containers/common/pkg/config/default_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux && !windows -// +build !linux,!windows package config diff --git a/vendor/github.com/containers/common/pkg/config/default_windows.go b/vendor/github.com/containers/common/pkg/config/default_windows.go index 4f1362bd..70627cbd 100644 --- a/vendor/github.com/containers/common/pkg/config/default_windows.go +++ b/vendor/github.com/containers/common/pkg/config/default_windows.go @@ -1,6 +1,13 @@ package config -import "os" +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/containers/storage/pkg/homedir" +) // isCgroup2UnifiedMode returns whether we are running in cgroup2 mode. func isCgroup2UnifiedMode() (isUnified bool, isUnifiedErr error) { @@ -36,7 +43,10 @@ func getLibpodTmpDir() string { // getDefaultMachineVolumes returns default mounted volumes (possibly with env vars, which will be expanded) func getDefaultMachineVolumes() []string { - return []string{} + hd := homedir.Get() + vol := filepath.VolumeName(hd) + hostMnt := filepath.ToSlash(strings.TrimPrefix(hd, vol)) + return []string{fmt.Sprintf("%s:%s", hd, hostMnt)} } func getDefaultComposeProviders() []string { diff --git a/vendor/github.com/containers/common/pkg/config/new.go b/vendor/github.com/containers/common/pkg/config/new.go index 64ddf471..fb59473f 100644 --- a/vendor/github.com/containers/common/pkg/config/new.go +++ b/vendor/github.com/containers/common/pkg/config/new.go @@ -21,7 +21,6 @@ var ( ) const ( - // FIXME: update code base and tests to use the two constants below. containersConfEnv = "CONTAINERS_CONF" containersConfOverrideEnv = containersConfEnv + "_OVERRIDE" ) @@ -79,11 +78,12 @@ func newLocked(options *Options) (*Config, error) { if err != nil { return nil, fmt.Errorf("finding config on system: %w", err) } + for _, path := range configs { // Merge changes in later configs with the previous configs. // Each config file that specified fields, will override the // previous fields. - if err = readConfigFromFile(path, config); err != nil { + if err = readConfigFromFile(path, config, true); err != nil { return nil, fmt.Errorf("reading system config %q: %w", path, err) } logrus.Debugf("Merged system config %q", path) @@ -115,7 +115,7 @@ func newLocked(options *Options) (*Config, error) { } // readConfigFromFile reads in container config in the specified // file and then merge changes with the current default. - if err := readConfigFromFile(add, config); err != nil { + if err := readConfigFromFile(add, config, false); err != nil { return nil, fmt.Errorf("reading additional config %q: %w", add, err) } logrus.Debugf("Merged additional config %q", add) @@ -157,12 +157,8 @@ func systemConfigs() (configs []string, finalErr error) { } return append(configs, path), nil } - if _, err := os.Stat(DefaultContainersConfig); err == nil { - configs = append(configs, DefaultContainersConfig) - } - if _, err := os.Stat(OverrideContainersConfig); err == nil { - configs = append(configs, OverrideContainersConfig) - } + configs = append(configs, DefaultContainersConfig) + configs = append(configs, OverrideContainersConfig) var err error configs, err = addConfigs(OverrideContainersConfig+".d", configs) @@ -170,18 +166,14 @@ func systemConfigs() (configs []string, finalErr error) { return nil, err } - path, err := ifRootlessConfigPath() + path, err := userConfigPath() if err != nil { return nil, err } - if path != "" { - if _, err := os.Stat(path); err == nil { - configs = append(configs, path) - } - configs, err = addConfigs(path+".d", configs) - if err != nil { - return nil, err - } + configs = append(configs, path) + configs, err = addConfigs(path+".d", configs) + if err != nil { + return nil, err } return configs, nil } @@ -225,10 +217,13 @@ func addConfigs(dirPath string, configs []string) ([]string, error) { // unmarshal its content into a Config. The config param specifies the previous // default config. If the path, only specifies a few fields in the Toml file // the defaults from the config parameter will be used for all other fields. -func readConfigFromFile(path string, config *Config) error { +func readConfigFromFile(path string, config *Config, ignoreErrNotExist bool) error { logrus.Tracef("Reading configuration file %q", path) meta, err := toml.DecodeFile(path, config) if err != nil { + if ignoreErrNotExist && errors.Is(err, fs.ErrNotExist) { + return nil + } return fmt.Errorf("decode configuration %v: %w", path, err) } keys := meta.Undecoded() diff --git a/vendor/github.com/containers/common/pkg/config/nosystemd.go b/vendor/github.com/containers/common/pkg/config/nosystemd.go index 352fddf9..8832d842 100644 --- a/vendor/github.com/containers/common/pkg/config/nosystemd.go +++ b/vendor/github.com/containers/common/pkg/config/nosystemd.go @@ -1,5 +1,4 @@ //go:build !systemd || !cgo -// +build !systemd !cgo package config diff --git a/vendor/github.com/containers/common/pkg/config/systemd.go b/vendor/github.com/containers/common/pkg/config/systemd.go index 3cd6ff84..50637461 100644 --- a/vendor/github.com/containers/common/pkg/config/systemd.go +++ b/vendor/github.com/containers/common/pkg/config/systemd.go @@ -1,5 +1,4 @@ //go:build systemd && cgo -// +build systemd,cgo package config diff --git a/vendor/github.com/containers/common/pkg/filters/filters.go b/vendor/github.com/containers/common/pkg/filters/filters.go index 3d442a53..25f52297 100644 --- a/vendor/github.com/containers/common/pkg/filters/filters.go +++ b/vendor/github.com/containers/common/pkg/filters/filters.go @@ -2,6 +2,7 @@ package filters import ( "encoding/json" + "errors" "fmt" "net/http" "path/filepath" @@ -17,7 +18,7 @@ import ( func ComputeUntilTimestamp(filterValues []string) (time.Time, error) { invalid := time.Time{} if len(filterValues) != 1 { - return invalid, fmt.Errorf("specify exactly one timestamp for until") + return invalid, errors.New("specify exactly one timestamp for until") } ts, err := timetype.GetTimestamp(filterValues[0], time.Now()) if err != nil { @@ -76,13 +77,10 @@ func FiltersFromRequest(r *http.Request) ([]string, error) { libpodFilters := make([]string, 0, len(filters)) for filterKey, filterSlice := range filters { - f := filterKey for _, filterValue := range filterSlice { - f += "=" + filterValue + libpodFilters = append(libpodFilters, fmt.Sprintf("%s=%s", filterKey, filterValue)) } - libpodFilters = append(libpodFilters, f) } - return libpodFilters, nil } diff --git a/vendor/github.com/containers/common/pkg/hooks/hooks.go b/vendor/github.com/containers/common/pkg/hooks/hooks.go index 2758d122..0b4ad052 100644 --- a/vendor/github.com/containers/common/pkg/hooks/hooks.go +++ b/vendor/github.com/containers/common/pkg/hooks/hooks.go @@ -122,10 +122,8 @@ func (m *Manager) Hooks(config *rspec.Spec, annotations map[string]string, hasBi switch stage { case "createContainer": config.Hooks.CreateContainer = append(config.Hooks.CreateContainer, namedHook.hook.Hook) - case "createRuntime": + case "createRuntime", "prestart": config.Hooks.CreateRuntime = append(config.Hooks.CreateRuntime, namedHook.hook.Hook) - case "prestart": - config.Hooks.Prestart = append(config.Hooks.Prestart, namedHook.hook.Hook) case "poststart": config.Hooks.Poststart = append(config.Hooks.Poststart, namedHook.hook.Hook) case "poststop": diff --git a/vendor/github.com/containers/common/pkg/machine/machine.go b/vendor/github.com/containers/common/pkg/machine/machine.go index 36428e58..57797a44 100644 --- a/vendor/github.com/containers/common/pkg/machine/machine.go +++ b/vendor/github.com/containers/common/pkg/machine/machine.go @@ -4,15 +4,9 @@ import ( "os" "strings" "sync" - - "github.com/containers/common/pkg/config" - "github.com/sirupsen/logrus" ) -// TODO: change name to MachineMarker since package is already called machine -// -//nolint:revive -type MachineMarker struct { +type Marker struct { Enabled bool Type string } @@ -21,56 +15,42 @@ const ( markerFile = "/etc/containers/podman-machine" Wsl = "wsl" Qemu = "qemu" + AppleHV = "applehv" + HyperV = "hyperv" ) var ( - markerSync sync.Once - machineMarker *MachineMarker + markerSync sync.Once + marker *Marker ) func loadMachineMarker(file string) { var kind string - - // Support deprecated config value for compatibility - enabled := isLegacyConfigSet() + enabled := false if content, err := os.ReadFile(file); err == nil { enabled = true kind = strings.TrimSpace(string(content)) } - machineMarker = &MachineMarker{enabled, kind} -} - -func isLegacyConfigSet() bool { - config, err := config.Default() - if err != nil { - logrus.Warnf("could not obtain container configuration") - return false - } - - //nolint:staticcheck //lint:ignore SA1019 deprecated call - return config.Engine.MachineEnabled + marker = &Marker{enabled, kind} } func IsPodmanMachine() bool { return GetMachineMarker().Enabled } -// TODO: change name to HostType since package is already called machine -// -//nolint:revive -func MachineHostType() string { +func HostType() string { return GetMachineMarker().Type } func IsGvProxyBased() bool { - return IsPodmanMachine() && MachineHostType() != Wsl + return IsPodmanMachine() && HostType() != Wsl } -func GetMachineMarker() *MachineMarker { +func GetMachineMarker() *Marker { markerSync.Do(func() { loadMachineMarker(markerFile) }) - return machineMarker + return marker } diff --git a/vendor/github.com/containers/common/pkg/manifests/manifests.go b/vendor/github.com/containers/common/pkg/manifests/manifests.go index 8296faa8..30f099a0 100644 --- a/vendor/github.com/containers/common/pkg/manifests/manifests.go +++ b/vendor/github.com/containers/common/pkg/manifests/manifests.go @@ -6,10 +6,12 @@ import ( "fmt" "os" + "github.com/containers/common/internal" "github.com/containers/image/v5/manifest" digest "github.com/opencontainers/go-digest" imgspec "github.com/opencontainers/image-spec/specs-go" v1 "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/exp/slices" ) // List is a generic interface for manipulating a manifest list or an image @@ -19,6 +21,7 @@ type List interface { Remove(instanceDigest digest.Digest) error SetURLs(instanceDigest digest.Digest, urls []string) error URLs(instanceDigest digest.Digest) ([]string, error) + ClearAnnotations(instanceDigest *digest.Digest) error SetAnnotations(instanceDigest *digest.Digest, annotations map[string]string) error Annotations(instanceDigest *digest.Digest) (map[string]string, error) SetOS(instanceDigest digest.Digest, os string) error @@ -33,6 +36,12 @@ type List interface { Features(instanceDigest digest.Digest) ([]string, error) SetOSFeatures(instanceDigest digest.Digest, osFeatures []string) error OSFeatures(instanceDigest digest.Digest) ([]string, error) + SetMediaType(instanceDigest digest.Digest, mediaType string) error + MediaType(instanceDigest digest.Digest) (string, error) + SetArtifactType(instanceDigest *digest.Digest, artifactType string) error + ArtifactType(instanceDigest *digest.Digest) (string, error) + SetSubject(subject *v1.Descriptor) error + Subject() (*v1.Descriptor, error) Serialize(mimeType string) ([]byte, error) Instances() []digest.Digest OCIv1() *v1.Index @@ -96,18 +105,21 @@ func (l *list) AddInstance(manifestDigest digest.Digest, manifestSize int64, man Platform: schema2platform, }) - ociv1platform := v1.Platform{ + ociv1platform := &v1.Platform{ Architecture: architecture, OS: osName, OSVersion: osVersion, OSFeatures: osFeatures, Variant: variant, } + if ociv1platform.Architecture == "" && ociv1platform.OS == "" && ociv1platform.OSVersion == "" && ociv1platform.Variant == "" && len(ociv1platform.OSFeatures) == 0 { + ociv1platform = nil + } l.oci.Manifests = append(l.oci.Manifests, v1.Descriptor{ MediaType: manifestType, Size: manifestSize, Digest: manifestDigest, - Platform: &ociv1platform, + Platform: ociv1platform, }) return nil @@ -166,7 +178,13 @@ func (l *list) SetURLs(instanceDigest digest.Digest, urls []string) error { return err } oci.URLs = append([]string{}, urls...) + if len(oci.URLs) == 0 { + oci.URLs = nil + } docker.URLs = append([]string{}, urls...) + if len(docker.URLs) == 0 { + docker.URLs = nil + } return nil } @@ -179,7 +197,24 @@ func (l *list) URLs(instanceDigest digest.Digest) ([]string, error) { return append([]string{}, oci.URLs...), nil } -// SetAnnotations sets annotations on the image index, or on a specific manifest. +// ClearAnnotations removes all annotations from the image index, or from a +// specific manifest. +// The field is specific to the OCI image index format, and is not present in Docker manifest lists. +func (l *list) ClearAnnotations(instanceDigest *digest.Digest) error { + a := &l.oci.Annotations + if instanceDigest != nil { + oci, err := l.findOCIv1(*instanceDigest) + if err != nil { + return err + } + a = &oci.Annotations + } + *a = nil + return nil +} + +// SetAnnotations sets annotations on the image index, or on a specific +// manifest. // The field is specific to the OCI image index format, and is not present in Docker manifest lists. func (l *list) SetAnnotations(instanceDigest *digest.Digest, annotations map[string]string) error { a := &l.oci.Annotations @@ -190,10 +225,15 @@ func (l *list) SetAnnotations(instanceDigest *digest.Digest, annotations map[str } a = &oci.Annotations } - (*a) = make(map[string]string) + if *a == nil { + (*a) = make(map[string]string) + } for k, v := range annotations { (*a)[k] = v } + if len(*a) == 0 { + *a = nil + } return nil } @@ -226,7 +266,13 @@ func (l *list) SetOS(instanceDigest digest.Digest, os string) error { return err } docker.Platform.OS = os + if oci.Platform == nil { + oci.Platform = &v1.Platform{} + } oci.Platform.OS = os + if oci.Platform.Architecture == "" && oci.Platform.OS == "" && oci.Platform.OSVersion == "" && oci.Platform.Variant == "" && len(oci.Platform.OSFeatures) == 0 { + oci.Platform = nil + } return nil } @@ -236,7 +282,11 @@ func (l *list) OS(instanceDigest digest.Digest) (string, error) { if err != nil { return "", err } - return oci.Platform.OS, nil + platform := oci.Platform + if platform == nil { + platform = &v1.Platform{} + } + return platform.OS, nil } // SetArchitecture sets the Architecture field in the platform information associated with the instance with the specified digest. @@ -250,7 +300,13 @@ func (l *list) SetArchitecture(instanceDigest digest.Digest, arch string) error return err } docker.Platform.Architecture = arch + if oci.Platform == nil { + oci.Platform = &v1.Platform{} + } oci.Platform.Architecture = arch + if oci.Platform.Architecture == "" && oci.Platform.OS == "" && oci.Platform.OSVersion == "" && oci.Platform.Variant == "" && len(oci.Platform.OSFeatures) == 0 { + oci.Platform = nil + } return nil } @@ -260,7 +316,11 @@ func (l *list) Architecture(instanceDigest digest.Digest) (string, error) { if err != nil { return "", err } - return oci.Platform.Architecture, nil + platform := oci.Platform + if platform == nil { + platform = &v1.Platform{} + } + return platform.Architecture, nil } // SetOSVersion sets the OSVersion field in the platform information associated with the instance with the specified digest. @@ -274,7 +334,13 @@ func (l *list) SetOSVersion(instanceDigest digest.Digest, osVersion string) erro return err } docker.Platform.OSVersion = osVersion + if oci.Platform == nil { + oci.Platform = &v1.Platform{} + } oci.Platform.OSVersion = osVersion + if oci.Platform.Architecture == "" && oci.Platform.OS == "" && oci.Platform.OSVersion == "" && oci.Platform.Variant == "" && len(oci.Platform.OSFeatures) == 0 { + oci.Platform = nil + } return nil } @@ -284,7 +350,11 @@ func (l *list) OSVersion(instanceDigest digest.Digest) (string, error) { if err != nil { return "", err } - return oci.Platform.OSVersion, nil + platform := oci.Platform + if platform == nil { + platform = &v1.Platform{} + } + return platform.OSVersion, nil } // SetVariant sets the Variant field in the platform information associated with the instance with the specified digest. @@ -298,7 +368,13 @@ func (l *list) SetVariant(instanceDigest digest.Digest, variant string) error { return err } docker.Platform.Variant = variant + if oci.Platform == nil { + oci.Platform = &v1.Platform{} + } oci.Platform.Variant = variant + if oci.Platform.Architecture == "" && oci.Platform.OS == "" && oci.Platform.OSVersion == "" && oci.Platform.Variant == "" && len(oci.Platform.OSFeatures) == 0 { + oci.Platform = nil + } return nil } @@ -308,7 +384,11 @@ func (l *list) Variant(instanceDigest digest.Digest) (string, error) { if err != nil { return "", err } - return oci.Platform.Variant, nil + platform := oci.Platform + if platform == nil { + platform = &v1.Platform{} + } + return platform.Variant, nil } // SetFeatures sets the features list in the platform information associated with the instance with the specified digest. @@ -319,6 +399,9 @@ func (l *list) SetFeatures(instanceDigest digest.Digest, features []string) erro return err } docker.Platform.Features = append([]string{}, features...) + if len(docker.Platform.Features) == 0 { + docker.Platform.Features = nil + } // no OCI equivalent return nil } @@ -344,7 +427,16 @@ func (l *list) SetOSFeatures(instanceDigest digest.Digest, osFeatures []string) return err } docker.Platform.OSFeatures = append([]string{}, osFeatures...) + if oci.Platform == nil { + oci.Platform = &v1.Platform{} + } oci.Platform.OSFeatures = append([]string{}, osFeatures...) + if len(oci.Platform.OSFeatures) == 0 { + oci.Platform.OSFeatures = nil + } + if oci.Platform.Architecture == "" && oci.Platform.OS == "" && oci.Platform.OSVersion == "" && oci.Platform.Variant == "" && len(oci.Platform.OSFeatures) == 0 { + oci.Platform = nil + } return nil } @@ -354,7 +446,77 @@ func (l *list) OSFeatures(instanceDigest digest.Digest) ([]string, error) { if err != nil { return nil, err } - return append([]string{}, oci.Platform.OSFeatures...), nil + platform := oci.Platform + if platform == nil { + platform = &v1.Platform{} + } + return append([]string{}, platform.OSFeatures...), nil +} + +// SetMediaType sets the MediaType field in the instance with the specified digest. +func (l *list) SetMediaType(instanceDigest digest.Digest, mediaType string) error { + oci, err := l.findOCIv1(instanceDigest) + if err != nil { + return err + } + oci.MediaType = mediaType + return nil +} + +// MediaType retrieves the MediaType field in the instance with the specified digest. +func (l *list) MediaType(instanceDigest digest.Digest) (string, error) { + oci, err := l.findOCIv1(instanceDigest) + if err != nil { + return "", err + } + return oci.MediaType, nil +} + +// SetArtifactType sets the ArtifactType field in the instance with the specified digest. +func (l *list) SetArtifactType(instanceDigest *digest.Digest, artifactType string) error { + artifactTypePtr := &l.oci.ArtifactType + if instanceDigest != nil { + oci, err := l.findOCIv1(*instanceDigest) + if err != nil { + return err + } + artifactTypePtr = &oci.ArtifactType + } + *artifactTypePtr = artifactType + return nil +} + +// ArtifactType retrieves the ArtifactType field in the instance with the specified digest. +func (l *list) ArtifactType(instanceDigest *digest.Digest) (string, error) { + artifactTypePtr := &l.oci.ArtifactType + if instanceDigest != nil { + oci, err := l.findOCIv1(*instanceDigest) + if err != nil { + return "", err + } + artifactTypePtr = &oci.ArtifactType + } + return *artifactTypePtr, nil +} + +// SetSubject sets the image index's subject. +// The field is specific to the OCI image index format, and is not present in Docker manifest lists. +func (l *list) SetSubject(subject *v1.Descriptor) error { + if subject != nil { + subject = internal.DeepCopyDescriptor(subject) + } + l.oci.Subject = subject + return nil +} + +// Subject retrieves the subject which might have been set on the image index. +// The field is specific to the OCI image index format, and is not present in Docker manifest lists. +func (l *list) Subject() (*v1.Descriptor, error) { + s := l.oci.Subject + if s != nil { + s = internal.DeepCopyDescriptor(s) + } + return s, nil } // FromBlob builds a list from an encoded manifest list or image index. @@ -400,11 +562,19 @@ func FromBlob(manifestBytes []byte) (List, error) { if platform == nil { platform = &v1.Platform{} } + if m.Platform != nil && m.Platform.OSFeatures != nil { + platform.OSFeatures = slices.Clone(m.Platform.OSFeatures) + } + var urls []string + if m.URLs != nil { + urls = slices.Clone(m.URLs) + } list.docker.Manifests = append(list.docker.Manifests, manifest.Schema2ManifestDescriptor{ Schema2Descriptor: manifest.Schema2Descriptor{ MediaType: m.MediaType, Size: m.Size, Digest: m.Digest, + URLs: urls, }, Platform: manifest.Schema2PlatformSpec{ Architecture: platform.Architecture, @@ -421,13 +591,25 @@ func FromBlob(manifestBytes []byte) (List, error) { func (l *list) preferOCI() bool { // If we have any data that's only in the OCI format, use that. + if l.oci.ArtifactType != "" { + return true + } + if l.oci.Subject != nil { + return true + } + if len(l.oci.Annotations) > 0 { + return true + } for _, m := range l.oci.Manifests { - if len(m.URLs) > 0 { + if m.ArtifactType != "" { return true } if len(m.Annotations) > 0 { return true } + if len(m.Data) > 0 { + return true + } } // If we have any data that's only in the Docker format, use that. for _, m := range l.docker.Manifests { diff --git a/vendor/github.com/containers/common/pkg/netns/netns_linux.go b/vendor/github.com/containers/common/pkg/netns/netns_linux.go index f2569d37..59366532 100644 --- a/vendor/github.com/containers/common/pkg/netns/netns_linux.go +++ b/vendor/github.com/containers/common/pkg/netns/netns_linux.go @@ -30,17 +30,19 @@ import ( "sync" "github.com/containernetworking/plugins/pkg/ns" - "github.com/containers/common/pkg/util" + "github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/unshare" - "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) +// threadNsPath is the /proc path to the current netns handle for the current thread +const threadNsPath = "/proc/thread-self/ns/net" + // GetNSRunDir returns the dir of where to create the netNS. When running // rootless, it needs to be at a location writable by user. func GetNSRunDir() (string, error) { if unshare.IsRootless() { - rootlessDir, err := util.GetRuntimeDir() + rootlessDir, err := homedir.GetRuntimeDir() if err != nil { return "", err } @@ -49,6 +51,10 @@ func GetNSRunDir() (string, error) { return "/run/netns", nil } +func NewNSAtPath(nsPath string) (ns.NetNS, error) { + return newNSPath(nsPath) +} + // NewNS creates a new persistent (bind-mounted) network namespace and returns // an object representing that namespace, without switching to it. func NewNS() (ns.NetNS, error) { @@ -111,8 +117,12 @@ func NewNSWithName(name string) (ns.NetNS, error) { } } - // create an empty file at the mount point nsPath := path.Join(nsRunDir, name) + return newNSPath(nsPath) +} + +func newNSPath(nsPath string) (ns.NetNS, error) { + // create an empty file at the mount point mountPointFd, err := os.OpenFile(nsPath, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0o600) if err != nil { return nil, err @@ -140,24 +150,10 @@ func NewNSWithName(name string) (ns.NetNS, error) { // Don't unlock. By not unlocking, golang will kill the OS thread when the // goroutine is done (for go1.10+) - threadNsPath := getCurrentThreadNetNSPath() - - var origNS ns.NetNS - origNS, err = ns.GetNS(threadNsPath) - if err != nil { - logrus.Warnf("Cannot open current network namespace %s: %q", threadNsPath, err) - return - } - defer func() { - if err := origNS.Close(); err != nil { - logrus.Errorf("Unable to close namespace: %q", err) - } - }() - // create a new netns on the current thread err = unix.Unshare(unix.CLONE_NEWNET) if err != nil { - logrus.Warnf("Cannot create a new network namespace: %q", err) + err = fmt.Errorf("unshare network namespace: %w", err) return } @@ -181,29 +177,26 @@ func NewNSWithName(name string) (ns.NetNS, error) { // UnmountNS unmounts the given netns path func UnmountNS(nsPath string) error { - nsRunDir, err := GetNSRunDir() - if err != nil { - return err - } - + var rErr error // Only unmount if it's been bind-mounted (don't touch namespaces in /proc...) - if strings.HasPrefix(nsPath, nsRunDir) { + if !strings.HasPrefix(nsPath, "/proc/") { if err := unix.Unmount(nsPath, unix.MNT_DETACH); err != nil { - return fmt.Errorf("failed to unmount NS: at %s: %v", nsPath, err) + // Do not return here, always try to remove below. + // This is important in case podman now is in a new userns compared to + // when the netns was created. The umount will fail EINVAL but removing + // the file will work and the kernel will destroy the bind mount in the + // other ns because of this. We also need it so pasta doesn't leak. + rErr = fmt.Errorf("failed to unmount NS: at %s: %w", nsPath, err) } if err := os.Remove(nsPath); err != nil { - return fmt.Errorf("failed to remove ns path %s: %v", nsPath, err) + err := fmt.Errorf("failed to remove ns path: %w", err) + if rErr != nil { + err = fmt.Errorf("%v, %w", err, rErr) + } + rErr = err } } - return nil -} - -// getCurrentThreadNetNSPath copied from pkg/ns -func getCurrentThreadNetNSPath() string { - // /proc/self/ns/net returns the namespace of the main thread, not - // of whatever thread this goroutine is running on. Make sure we - // use the thread's net namespace since the thread is switching around - return fmt.Sprintf("/proc/%d/task/%d/ns/net", os.Getpid(), unix.Gettid()) + return rErr } diff --git a/vendor/github.com/containers/common/pkg/parse/parse.go b/vendor/github.com/containers/common/pkg/parse/parse.go index 7629f584..284751e5 100644 --- a/vendor/github.com/containers/common/pkg/parse/parse.go +++ b/vendor/github.com/containers/common/pkg/parse/parse.go @@ -14,7 +14,7 @@ import ( // ValidateVolumeOpts validates a volume's options func ValidateVolumeOpts(options []string) ([]string, error) { - var foundRootPropagation, foundRWRO, foundLabelChange, bindType, foundExec, foundDev, foundSuid, foundChown, foundUpperDir, foundWorkDir, foundCopy int + var foundRootPropagation, foundRWRO, foundLabelChange, bindType, foundExec, foundDev, foundSuid, foundChown, foundUpperDir, foundWorkDir, foundCopy, foundCopySymlink int finalOpts := make([]string, 0, len(options)) for _, opt := range options { // support advanced options like upperdir=/path, workdir=/path @@ -93,6 +93,11 @@ func ValidateVolumeOpts(options []string) ([]string, error) { if foundCopy > 1 { return nil, fmt.Errorf("invalid options %q, can only specify 1 'copy' or 'nocopy' option", strings.Join(options, ", ")) } + case "no-dereference": + foundCopySymlink++ + if foundCopySymlink > 1 { + return nil, fmt.Errorf("invalid options %q, can only specify 1 'no-dereference' option", strings.Join(options, ", ")) + } default: return nil, fmt.Errorf("invalid option type %q", opt) } diff --git a/vendor/github.com/containers/common/pkg/parse/parse_unix.go b/vendor/github.com/containers/common/pkg/parse/parse_unix.go index 44fe33d9..86563b3e 100644 --- a/vendor/github.com/containers/common/pkg/parse/parse_unix.go +++ b/vendor/github.com/containers/common/pkg/parse/parse_unix.go @@ -1,5 +1,4 @@ //go:build linux || darwin || freebsd -// +build linux darwin freebsd package parse diff --git a/vendor/github.com/containers/common/pkg/password/password_supported.go b/vendor/github.com/containers/common/pkg/password/password_supported.go index 56e95b3d..4761b3ff 100644 --- a/vendor/github.com/containers/common/pkg/password/password_supported.go +++ b/vendor/github.com/containers/common/pkg/password/password_supported.go @@ -1,5 +1,4 @@ //go:build linux || darwin || freebsd -// +build linux darwin freebsd package password diff --git a/vendor/github.com/containers/common/pkg/password/password_windows.go b/vendor/github.com/containers/common/pkg/password/password_windows.go index 7a0822d0..0a1d9eeb 100644 --- a/vendor/github.com/containers/common/pkg/password/password_windows.go +++ b/vendor/github.com/containers/common/pkg/password/password_windows.go @@ -1,5 +1,4 @@ //go:build windows -// +build windows package password diff --git a/vendor/github.com/containers/common/pkg/retry/retry_unsupported.go b/vendor/github.com/containers/common/pkg/retry/retry_unsupported.go index 901e28a5..008fe6bc 100644 --- a/vendor/github.com/containers/common/pkg/retry/retry_unsupported.go +++ b/vendor/github.com/containers/common/pkg/retry/retry_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package retry diff --git a/vendor/github.com/containers/common/pkg/rootlessport/rootlessport_linux.go b/vendor/github.com/containers/common/pkg/rootlessport/rootlessport_linux.go index 7ea018a2..dc4c5bf1 100644 --- a/vendor/github.com/containers/common/pkg/rootlessport/rootlessport_linux.go +++ b/vendor/github.com/containers/common/pkg/rootlessport/rootlessport_linux.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux // Rootlessport Config type for use in podman/cmd/rootlessport. package rootlessport diff --git a/vendor/github.com/containers/common/pkg/seccomp/default_linux.go b/vendor/github.com/containers/common/pkg/seccomp/default_linux.go index 0db77879..2a59c149 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/default_linux.go +++ b/vendor/github.com/containers/common/pkg/seccomp/default_linux.go @@ -142,6 +142,7 @@ func DefaultProfile() *Seccomp { "fchdir", "fchmod", "fchmodat", + "fchmodat2", "fchown", "fchown32", "fchownat", diff --git a/vendor/github.com/containers/common/pkg/seccomp/errno_list.go b/vendor/github.com/containers/common/pkg/seccomp/errno_list.go index 87ac2ab7..616cae33 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/errno_list.go +++ b/vendor/github.com/containers/common/pkg/seccomp/errno_list.go @@ -1,5 +1,4 @@ //go:build linux && seccomp -// +build linux,seccomp package seccomp diff --git a/vendor/github.com/containers/common/pkg/seccomp/filter.go b/vendor/github.com/containers/common/pkg/seccomp/filter.go index 72c95734..1739dcb9 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/filter.go +++ b/vendor/github.com/containers/common/pkg/seccomp/filter.go @@ -1,5 +1,4 @@ //go:build seccomp -// +build seccomp // NOTE: this package has originally been copied from // github.com/opencontainers/runc and modified to work for other use cases diff --git a/vendor/github.com/containers/common/pkg/seccomp/seccomp.json b/vendor/github.com/containers/common/pkg/seccomp/seccomp.json index 18674db4..306e9014 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/seccomp.json +++ b/vendor/github.com/containers/common/pkg/seccomp/seccomp.json @@ -149,6 +149,7 @@ "fchdir", "fchmod", "fchmodat", + "fchmodat2", "fchown", "fchown32", "fchownat", diff --git a/vendor/github.com/containers/common/pkg/seccomp/seccomp_linux.go b/vendor/github.com/containers/common/pkg/seccomp/seccomp_linux.go index f7adde8a..e399f6b2 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/seccomp_linux.go +++ b/vendor/github.com/containers/common/pkg/seccomp/seccomp_linux.go @@ -1,5 +1,4 @@ //go:build seccomp -// +build seccomp // SPDX-License-Identifier: Apache-2.0 diff --git a/vendor/github.com/containers/common/pkg/seccomp/seccomp_unsupported.go b/vendor/github.com/containers/common/pkg/seccomp/seccomp_unsupported.go index da5230c5..1bf8155d 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/seccomp_unsupported.go +++ b/vendor/github.com/containers/common/pkg/seccomp/seccomp_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux || !seccomp -// +build !linux !seccomp // SPDX-License-Identifier: Apache-2.0 @@ -15,12 +14,12 @@ import ( var errNotSupported = errors.New("seccomp not enabled in this build") -// LoadProfile returns an error on unsuppored systems +// LoadProfile returns an error on unsupported systems func LoadProfile(body string, rs *specs.Spec) (*specs.LinuxSeccomp, error) { return nil, errNotSupported } -// GetDefaultProfile returns an error on unsuppored systems +// GetDefaultProfile returns an error on unsupported systems func GetDefaultProfile(rs *specs.Spec) (*specs.LinuxSeccomp, error) { return nil, errNotSupported } diff --git a/vendor/github.com/containers/common/pkg/seccomp/supported.go b/vendor/github.com/containers/common/pkg/seccomp/supported.go index f8a20e53..5c39979d 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/supported.go +++ b/vendor/github.com/containers/common/pkg/seccomp/supported.go @@ -1,5 +1,4 @@ //go:build linux && seccomp -// +build linux,seccomp package seccomp diff --git a/vendor/github.com/containers/common/pkg/seccomp/validate.go b/vendor/github.com/containers/common/pkg/seccomp/validate.go index 80558c1f..81effc76 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/validate.go +++ b/vendor/github.com/containers/common/pkg/seccomp/validate.go @@ -1,5 +1,4 @@ //go:build seccomp -// +build seccomp package seccomp diff --git a/vendor/github.com/containers/common/pkg/secrets/filedriver/filedriver.go b/vendor/github.com/containers/common/pkg/secrets/filedriver/filedriver.go index 3054a2bb..33bb62dc 100644 --- a/vendor/github.com/containers/common/pkg/secrets/filedriver/filedriver.go +++ b/vendor/github.com/containers/common/pkg/secrets/filedriver/filedriver.go @@ -10,6 +10,7 @@ import ( "sort" "github.com/containers/storage/pkg/lockfile" + "golang.org/x/exp/maps" ) // secretsDataFile is the file where secrets data/payload will be stored @@ -56,10 +57,7 @@ func (d *Driver) List() ([]string, error) { if err != nil { return nil, err } - allID := make([]string, 0, len(secretData)) - for k := range secretData { - allID = append(allID, k) - } + allID := maps.Keys(secretData) sort.Strings(allID) return allID, err } @@ -79,7 +77,7 @@ func (d *Driver) Lookup(id string) ([]byte, error) { return nil, fmt.Errorf("%s: %w", id, errNoSecretData) } -// Store stores the bytes associated with an ID. An error is returned if the ID arleady exists +// Store stores the bytes associated with an ID. An error is returned if the ID already exists func (d *Driver) Store(id string, data []byte) error { d.lockfile.Lock() defer d.lockfile.Unlock() diff --git a/vendor/github.com/containers/common/pkg/secrets/secrets.go b/vendor/github.com/containers/common/pkg/secrets/secrets.go index 47e68840..7092c8f2 100644 --- a/vendor/github.com/containers/common/pkg/secrets/secrets.go +++ b/vendor/github.com/containers/common/pkg/secrets/secrets.go @@ -13,6 +13,7 @@ import ( "github.com/containers/storage/pkg/lockfile" "github.com/containers/storage/pkg/regexp" "github.com/containers/storage/pkg/stringid" + "golang.org/x/exp/maps" ) // maxSecretSize is the max size for secret data - 512kB @@ -289,11 +290,7 @@ func (s *SecretsManager) List() ([]Secret, error) { if err != nil { return nil, err } - ls := make([]Secret, 0, len(secrets)) - for _, v := range secrets { - ls = append(ls, v) - } - return ls, nil + return maps.Values(secrets), nil } // LookupSecretData returns secret metadata as well as secret data in bytes. diff --git a/vendor/github.com/containers/common/pkg/servicereaper/service.go b/vendor/github.com/containers/common/pkg/servicereaper/service.go index 11482c59..12a29669 100644 --- a/vendor/github.com/containers/common/pkg/servicereaper/service.go +++ b/vendor/github.com/containers/common/pkg/servicereaper/service.go @@ -1,5 +1,4 @@ //go:build linux || freebsd -// +build linux freebsd package servicereaper diff --git a/vendor/github.com/containers/common/pkg/signal/signal_linux.go b/vendor/github.com/containers/common/pkg/signal/signal_linux.go index 21e09c9f..6800b7d3 100644 --- a/vendor/github.com/containers/common/pkg/signal/signal_linux.go +++ b/vendor/github.com/containers/common/pkg/signal/signal_linux.go @@ -1,5 +1,4 @@ //go:build linux && !mips && !mipsle && !mips64 && !mips64le -// +build linux,!mips,!mipsle,!mips64,!mips64le // Signal handling for Linux only. package signal diff --git a/vendor/github.com/containers/common/pkg/signal/signal_linux_mipsx.go b/vendor/github.com/containers/common/pkg/signal/signal_linux_mipsx.go index 52b07aaf..58b35395 100644 --- a/vendor/github.com/containers/common/pkg/signal/signal_linux_mipsx.go +++ b/vendor/github.com/containers/common/pkg/signal/signal_linux_mipsx.go @@ -1,6 +1,4 @@ //go:build linux && (mips || mipsle || mips64 || mips64le) -// +build linux -// +build mips mipsle mips64 mips64le // Special signal handling for mips architecture package signal diff --git a/vendor/github.com/containers/common/pkg/signal/signal_unsupported.go b/vendor/github.com/containers/common/pkg/signal/signal_unsupported.go index 0e8685a7..36893d05 100644 --- a/vendor/github.com/containers/common/pkg/signal/signal_unsupported.go +++ b/vendor/github.com/containers/common/pkg/signal/signal_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux // Signal handling for Linux only. package signal diff --git a/vendor/github.com/containers/common/pkg/ssh/connection_golang.go b/vendor/github.com/containers/common/pkg/ssh/connection_golang.go index 1abb5802..ad47f63c 100644 --- a/vendor/github.com/containers/common/pkg/ssh/connection_golang.go +++ b/vendor/github.com/containers/common/pkg/ssh/connection_golang.go @@ -23,6 +23,7 @@ import ( "golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh/agent" "golang.org/x/crypto/ssh/knownhosts" + "golang.org/x/exp/maps" ) func golangConnectionCreate(options ConnectionCreateOptions) error { @@ -52,32 +53,32 @@ func golangConnectionCreate(options ConnectionCreateOptions) error { dst.URI += uri.Path } - cfg, err := config.ReadCustomConfig() - if err != nil { - return err - } - if cfg.Engine.ServiceDestinations == nil { - cfg.Engine.ServiceDestinations = map[string]config.Destination{ - options.Name: *dst, - } - cfg.Engine.ActiveService = options.Name - } else { - cfg.Engine.ServiceDestinations[options.Name] = *dst - } - - // Create or update an existing farm with the connection being added - if options.Farm != "" { - if len(cfg.Farms.List) == 0 { - cfg.Farms.Default = options.Farm - } - if val, ok := cfg.Farms.List[options.Farm]; ok { - cfg.Farms.List[options.Farm] = append(val, options.Name) + // TODO this really should not live here, it must be in podman where we write the other connections as well. + // This duplicates the code for no reason and I have a really hard time to make any sense of why this code + // was added in the first place. + return config.EditConnectionConfig(func(cfg *config.ConnectionsFile) error { + if cfg.Connection.Connections == nil { + cfg.Connection.Connections = map[string]config.Destination{ + options.Name: *dst, + } + cfg.Connection.Default = options.Name } else { - cfg.Farms.List[options.Farm] = []string{options.Name} + cfg.Connection.Connections[options.Name] = *dst } - } - return cfg.Write() + // Create or update an existing farm with the connection being added + if options.Farm != "" { + if len(cfg.Farm.List) == 0 { + cfg.Farm.Default = options.Farm + } + if val, ok := cfg.Farm.List[options.Farm]; ok { + cfg.Farm.List[options.Farm] = append(val, options.Name) + } else { + cfg.Farm.List[options.Farm] = []string{options.Name} + } + } + return nil + }) } func golangConnectionDial(options ConnectionDialOptions) (*ConnectionDialReport, error) { @@ -98,7 +99,7 @@ func golangConnectionDial(options ConnectionDialOptions) (*ConnectionDialReport, return &ConnectionDialReport{dial}, nil } -func golangConnectionExec(options ConnectionExecOptions) (*ConnectionExecReport, error) { +func golangConnectionExec(options ConnectionExecOptions, input io.Reader) (*ConnectionExecReport, error) { if !strings.HasPrefix(options.Host, "ssh://") { options.Host = "ssh://" + options.Host } @@ -116,7 +117,7 @@ func golangConnectionExec(options ConnectionExecOptions) (*ConnectionExecReport, return nil, fmt.Errorf("failed to connect: %w", err) } - out, err := ExecRemoteCommand(dialAdd, strings.Join(options.Args, " ")) + out, err := ExecRemoteCommandWithInput(dialAdd, strings.Join(options.Args, " "), input) if err != nil { return nil, err } @@ -188,6 +189,10 @@ func golangConnectionScp(options ConnectionScpOptions) (*ConnectionScpReport, er // ExecRemoteCommand takes a ssh client connection and a command to run and executes the // command on the specified client. The function returns the Stdout from the client or the Stderr func ExecRemoteCommand(dial *ssh.Client, run string) ([]byte, error) { + return ExecRemoteCommandWithInput(dial, run, nil) +} + +func ExecRemoteCommandWithInput(dial *ssh.Client, run string, input io.Reader) ([]byte, error) { sess, err := dial.NewSession() // new ssh client session if err != nil { return nil, err @@ -196,8 +201,11 @@ func ExecRemoteCommand(dial *ssh.Client, run string) ([]byte, error) { var buffer bytes.Buffer var bufferErr bytes.Buffer - sess.Stdout = &buffer // output from client funneled into buffer - sess.Stderr = &bufferErr // err form client funneled into buffer + sess.Stdout = &buffer // output from client funneled into buffer + sess.Stderr = &bufferErr // err from client funneled into buffer + if input != nil { + sess.Stdin = input + } if err := sess.Run(run); err != nil { // run the command on the ssh client return nil, fmt.Errorf("%v: %w", bufferErr.String(), err) } @@ -262,7 +270,7 @@ func ValidateAndConfigure(uri *url.URL, iden string, insecureIsMachineConnection } } } - var authMethods []ssh.AuthMethod // now we validate and check for the authorization methods, most notaibly public key authorization + var authMethods []ssh.AuthMethod // now we validate and check for the authorization methods, most notably public key authorization if len(signers) > 0 { dedup := make(map[string]ssh.Signer) for _, s := range signers { @@ -273,10 +281,7 @@ func ValidateAndConfigure(uri *url.URL, iden string, insecureIsMachineConnection dedup[fp] = s } - var uniq []ssh.Signer - for _, s := range dedup { - uniq = append(uniq, s) - } + uniq := maps.Values(dedup) authMethods = append(authMethods, ssh.PublicKeysCallback(func() ([]ssh.Signer, error) { return uniq, nil })) @@ -286,7 +291,7 @@ func ValidateAndConfigure(uri *url.URL, iden string, insecureIsMachineConnection } if len(authMethods) == 0 { authMethods = append(authMethods, ssh.PasswordCallback(func() (string, error) { - pass, err := ReadPassword(fmt.Sprintf("%s's login password:", uri.User.Username())) + pass, err := ReadPassword(uri.User.Username() + "'s login password:") return string(pass), err })) } diff --git a/vendor/github.com/containers/common/pkg/ssh/connection_native.go b/vendor/github.com/containers/common/pkg/ssh/connection_native.go index c725cb27..a92693ff 100644 --- a/vendor/github.com/containers/common/pkg/ssh/connection_native.go +++ b/vendor/github.com/containers/common/pkg/ssh/connection_native.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/json" "fmt" + "io" "os/exec" "regexp" "strings" @@ -34,7 +35,7 @@ func nativeConnectionCreate(options ConnectionCreateOptions) error { // test connection ssh, err := exec.LookPath("ssh") if err != nil { - return fmt.Errorf("no ssh binary found") + return err } if strings.Contains(uri.Host, "/run") { @@ -72,27 +73,35 @@ func nativeConnectionCreate(options ConnectionCreateOptions) error { return fmt.Errorf("remote podman %q failed to report its UDS socket", uri.Host) } - cfg, err := config.ReadCustomConfig() - if err != nil { - return err - } - if options.Default { - cfg.Engine.ActiveService = options.Name - } - - if cfg.Engine.ServiceDestinations == nil { - cfg.Engine.ServiceDestinations = map[string]config.Destination{ - options.Name: *dst, + // TODO this really should not live here, it must be in podman where we write the other connections as well. + // This duplicates the code for no reason and I have a really hard time to make any sense of why this code + // was added in the first place. + return config.EditConnectionConfig(func(cfg *config.ConnectionsFile) error { + if cfg.Connection.Connections == nil { + cfg.Connection.Connections = map[string]config.Destination{ + options.Name: *dst, + } + cfg.Connection.Default = options.Name + } else { + cfg.Connection.Connections[options.Name] = *dst } - cfg.Engine.ActiveService = options.Name - } else { - cfg.Engine.ServiceDestinations[options.Name] = *dst - } - return cfg.Write() + // Create or update an existing farm with the connection being added + if options.Farm != "" { + if len(cfg.Farm.List) == 0 { + cfg.Farm.Default = options.Farm + } + if val, ok := cfg.Farm.List[options.Farm]; ok { + cfg.Farm.List[options.Farm] = append(val, options.Name) + } else { + cfg.Farm.List[options.Farm] = []string{options.Name} + } + } + return nil + }) } -func nativeConnectionExec(options ConnectionExecOptions) (*ConnectionExecReport, error) { +func nativeConnectionExec(options ConnectionExecOptions, input io.Reader) (*ConnectionExecReport, error) { dst, uri, err := Validate(options.User, options.Host, options.Port, options.Identity) if err != nil { return nil, err @@ -100,7 +109,7 @@ func nativeConnectionExec(options ConnectionExecOptions) (*ConnectionExecReport, ssh, err := exec.LookPath("ssh") if err != nil { - return nil, fmt.Errorf("no ssh binary found") + return nil, err } output := &bytes.Buffer{} @@ -126,6 +135,9 @@ func nativeConnectionExec(options ConnectionExecOptions) (*ConnectionExecReport, info := exec.Command(ssh, args...) info.Stdout = output info.Stderr = errors + if input != nil { + info.Stdin = input + } err = info.Run() if err != nil { return nil, err @@ -145,7 +157,7 @@ func nativeConnectionScp(options ConnectionScpOptions) (*ConnectionScpReport, er scp, err := exec.LookPath("scp") if err != nil { - return nil, fmt.Errorf("no scp binary found") + return nil, err } conf, err := config.Default() diff --git a/vendor/github.com/containers/common/pkg/ssh/ssh.go b/vendor/github.com/containers/common/pkg/ssh/ssh.go index d638d69a..6e8a923e 100644 --- a/vendor/github.com/containers/common/pkg/ssh/ssh.go +++ b/vendor/github.com/containers/common/pkg/ssh/ssh.go @@ -1,7 +1,8 @@ package ssh import ( - "fmt" + "errors" + "io" "golang.org/x/crypto/ssh" ) @@ -17,7 +18,7 @@ func Dial(options *ConnectionDialOptions, kind EngineMode) (*ssh.Client, error) var rep *ConnectionDialReport var err error if kind == NativeMode { - return nil, fmt.Errorf("ssh dial failed: you cannot create a dial-able client with native ssh") + return nil, errors.New("ssh dial failed: you cannot create a dial-able client with native ssh") } rep, err = golangConnectionDial(*options) if err != nil { @@ -27,15 +28,19 @@ func Dial(options *ConnectionDialOptions, kind EngineMode) (*ssh.Client, error) } func Exec(options *ConnectionExecOptions, kind EngineMode) (string, error) { + return ExecWithInput(options, kind, nil) +} + +func ExecWithInput(options *ConnectionExecOptions, kind EngineMode, input io.Reader) (string, error) { var rep *ConnectionExecReport var err error if kind == NativeMode { - rep, err = nativeConnectionExec(*options) + rep, err = nativeConnectionExec(*options, input) if err != nil { return "", err } } else { - rep, err = golangConnectionExec(*options) + rep, err = golangConnectionExec(*options, input) if err != nil { return "", err } diff --git a/vendor/github.com/containers/common/pkg/ssh/types.go b/vendor/github.com/containers/common/pkg/ssh/types.go index 60065593..bc41d78b 100644 --- a/vendor/github.com/containers/common/pkg/ssh/types.go +++ b/vendor/github.com/containers/common/pkg/ssh/types.go @@ -70,11 +70,11 @@ type ConnectionScpReport struct { // Info is the overall struct that describes the host system // running libpod/podman type Info struct { - Host *HostInfo `json:"host"` - Store *StoreInfo `json:"store"` - Registries map[string]interface{} `json:"registries"` - Plugins Plugins `json:"plugins"` - Version Version `json:"version"` + Host *HostInfo `json:"host"` + Store *StoreInfo `json:"store"` + Registries map[string]any `json:"registries"` + Plugins Plugins `json:"plugins"` + Version Version `json:"version"` } // Version is an output struct for API @@ -121,8 +121,8 @@ type HostInfo struct { OCIRuntime *OCIRuntimeInfo `json:"ociRuntime"` OS string `json:"os"` // RemoteSocket returns the UNIX domain socket the Podman service is listening on - RemoteSocket *RemoteSocket `json:"remoteSocket,omitempty"` - RuntimeInfo map[string]interface{} `json:"runtimeInfo,omitempty"` + RemoteSocket *RemoteSocket `json:"remoteSocket,omitempty"` + RuntimeInfo map[string]any `json:"runtimeInfo,omitempty"` // ServiceIsRemote is true when the podman/libpod service is remote to the client ServiceIsRemote bool `json:"serviceIsRemote"` Security SecurityInfo `json:"security"` @@ -179,11 +179,11 @@ type OCIRuntimeInfo struct { // StoreInfo describes the container storage and its // attributes type StoreInfo struct { - ConfigFile string `json:"configFile"` - ContainerStore ContainerStore `json:"containerStore"` - GraphDriverName string `json:"graphDriverName"` - GraphOptions map[string]interface{} `json:"graphOptions"` - GraphRoot string `json:"graphRoot"` + ConfigFile string `json:"configFile"` + ContainerStore ContainerStore `json:"containerStore"` + GraphDriverName string `json:"graphDriverName"` + GraphOptions map[string]any `json:"graphOptions"` + GraphRoot string `json:"graphRoot"` // GraphRootAllocated is how much space the graphroot has in bytes GraphRootAllocated uint64 `json:"graphRootAllocated"` // GraphRootUsed is how much of graphroot is used in bytes diff --git a/vendor/github.com/containers/common/pkg/ssh/utils.go b/vendor/github.com/containers/common/pkg/ssh/utils.go index d47a9859..0aa76d65 100644 --- a/vendor/github.com/containers/common/pkg/ssh/utils.go +++ b/vendor/github.com/containers/common/pkg/ssh/utils.go @@ -1,6 +1,7 @@ package ssh import ( + "errors" "fmt" "io" "net" @@ -80,7 +81,7 @@ func ReadPassword(prompt string) (pw []byte, err error) { pw = append(pw, b[0]) // limit size, so that a wrong input won't fill up the memory if len(pw) > 1024 { - err = fmt.Errorf("password too long, 1024 byte limit") + err = errors.New("password too long, 1024 byte limit") } } if err != nil { @@ -156,7 +157,7 @@ func ParseScpArgs(options ConnectionScpOptions) (string, string, string, bool, e } else { split = strings.Split(host, ":") if len(split) != 2 { - return "", "", "", false, fmt.Errorf("no remote destination provided") + return "", "", "", false, errors.New("no remote destination provided") } host = split[0] remotePath = split[1] diff --git a/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go b/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go index 6ba2154a..6845914a 100644 --- a/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go +++ b/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go @@ -212,7 +212,7 @@ func MountsWithUIDGID(mountLabel, containerRunDir, mountFile, mountPoint string, } func rchown(chowndir string, uid, gid int) error { - return filepath.Walk(chowndir, func(filePath string, f os.FileInfo, err error) error { + return filepath.Walk(chowndir, func(filePath string, _ os.FileInfo, err error) error { return os.Lchown(filePath, uid, gid) }) } @@ -231,7 +231,7 @@ func addSubscriptionsFromMountsFile(filePath, mountLabel, containerRunDir string fileInfo, err := os.Stat(hostDirOrFile) if err != nil { if errors.Is(err, os.ErrNotExist) { - logrus.Warnf("Path %q from %q doesn't exist, skipping", hostDirOrFile, filePath) + logrus.Infof("Path %q from %q doesn't exist, skipping", hostDirOrFile, filePath) continue } return nil, err diff --git a/vendor/github.com/containers/common/pkg/supplemented/supplemented.go b/vendor/github.com/containers/common/pkg/supplemented/supplemented.go index 6ae9a416..c21a6aa5 100644 --- a/vendor/github.com/containers/common/pkg/supplemented/supplemented.go +++ b/vendor/github.com/containers/common/pkg/supplemented/supplemented.go @@ -3,6 +3,7 @@ package supplemented import ( "container/list" "context" + "errors" "fmt" "io" @@ -14,6 +15,7 @@ import ( multierror "github.com/hashicorp/go-multierror" digest "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" ) // supplementedImageReference groups multiple references together. @@ -139,7 +141,7 @@ func (s *supplementedImageReference) NewImageSource(ctx context.Context, sys *ty } sources[manifestDigest] = src - // Parse the manifest as a list of images. + // Parse the manifest as a list of images and artifacts. list, err := manifest.ListFromBlob(manifestBytes, manifestType) if err != nil { return fmt.Errorf("parsing manifest blob %q as a %q: %w", string(manifestBytes), manifestType, err) @@ -155,7 +157,11 @@ func (s *supplementedImageReference) NewImageSource(ctx context.Context, sys *ty } chaseInstances = []digest.Digest{instance} case cp.CopySpecificImages: - chaseInstances = s.instances + for _, instance := range list.Instances() { + if slices.Contains(s.instances, instance) { + chaseInstances = append(chaseInstances, instance) + } + } case cp.CopyAllImages: chaseInstances = list.Instances() } @@ -281,7 +287,7 @@ func (s *supplementedImageReference) NewImageSource(ctx context.Context, sys *ty } func (s *supplementedImageReference) DeleteImage(_ context.Context, _ *types.SystemContext) error { - return fmt.Errorf("deletion of images not implemented") + return errors.New("deletion of images not implemented") } func (s *supplementedImageSource) Close() error { diff --git a/vendor/github.com/containers/common/pkg/sysinfo/numcpu_linux.go b/vendor/github.com/containers/common/pkg/sysinfo/numcpu_linux.go index d77e52f6..5e680957 100644 --- a/vendor/github.com/containers/common/pkg/sysinfo/numcpu_linux.go +++ b/vendor/github.com/containers/common/pkg/sysinfo/numcpu_linux.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package sysinfo diff --git a/vendor/github.com/containers/common/pkg/sysinfo/numcpu_other.go b/vendor/github.com/containers/common/pkg/sysinfo/numcpu_other.go index 26c543c4..fdb0586e 100644 --- a/vendor/github.com/containers/common/pkg/sysinfo/numcpu_other.go +++ b/vendor/github.com/containers/common/pkg/sysinfo/numcpu_other.go @@ -1,5 +1,4 @@ //go:build !linux && !windows -// +build !linux,!windows package sysinfo diff --git a/vendor/github.com/containers/common/pkg/sysinfo/numcpu_windows.go b/vendor/github.com/containers/common/pkg/sysinfo/numcpu_windows.go index 9f354eb1..13c42d28 100644 --- a/vendor/github.com/containers/common/pkg/sysinfo/numcpu_windows.go +++ b/vendor/github.com/containers/common/pkg/sysinfo/numcpu_windows.go @@ -1,5 +1,4 @@ //go:build windows -// +build windows package sysinfo diff --git a/vendor/github.com/containers/common/pkg/sysinfo/nummem_linux.go b/vendor/github.com/containers/common/pkg/sysinfo/nummem_linux.go index 018c488b..739a7ffb 100644 --- a/vendor/github.com/containers/common/pkg/sysinfo/nummem_linux.go +++ b/vendor/github.com/containers/common/pkg/sysinfo/nummem_linux.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package sysinfo diff --git a/vendor/github.com/containers/common/pkg/sysinfo/nummem_unsupported.go b/vendor/github.com/containers/common/pkg/sysinfo/nummem_unsupported.go index c9e4184a..2b6bbaf2 100644 --- a/vendor/github.com/containers/common/pkg/sysinfo/nummem_unsupported.go +++ b/vendor/github.com/containers/common/pkg/sysinfo/nummem_unsupported.go @@ -1,5 +1,4 @@ //go:build (windows && ignore) || osx -// +build windows,ignore osx package sysinfo diff --git a/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_solaris.go b/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_solaris.go index af1c77d6..bf4b3104 100644 --- a/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_solaris.go +++ b/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_solaris.go @@ -1,5 +1,4 @@ //go:build solaris && cgo -// +build solaris,cgo package sysinfo diff --git a/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_unix.go b/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_unix.go index 4aa9401f..ce6ac023 100644 --- a/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_unix.go +++ b/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_unix.go @@ -1,5 +1,4 @@ //go:build !linux && !solaris && !windows -// +build !linux,!solaris,!windows package sysinfo diff --git a/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_windows.go b/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_windows.go index 455a8892..e89d18a0 100644 --- a/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_windows.go +++ b/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_windows.go @@ -1,5 +1,4 @@ //go:build windows -// +build windows package sysinfo diff --git a/vendor/github.com/containers/common/pkg/systemd/systemd_linux.go b/vendor/github.com/containers/common/pkg/systemd/systemd_linux.go new file mode 100644 index 00000000..c1d8ed72 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/systemd/systemd_linux.go @@ -0,0 +1,151 @@ +package systemd + +import ( + "context" + "crypto/rand" + "fmt" + "os" + "strconv" + "sync" + + "github.com/containers/common/pkg/cgroups" + "github.com/containers/storage/pkg/unshare" + systemdDbus "github.com/coreos/go-systemd/v22/dbus" + "github.com/godbus/dbus/v5" + "github.com/sirupsen/logrus" +) + +var ( + runsOnSystemdOnce sync.Once + runsOnSystemd bool +) + +// RunsOnSystemd returns whether the system is using systemd +func RunsOnSystemd() bool { + runsOnSystemdOnce.Do(func() { + // per sd_booted(3), check for this dir + fd, err := os.Stat("/run/systemd/system") + runsOnSystemd = err == nil && fd.IsDir() + }) + return runsOnSystemd +} + +func moveProcessPIDFileToScope(pidPath, slice, scope string) error { + data, err := os.ReadFile(pidPath) + if err != nil { + // do not raise an error if the file doesn't exist + if os.IsNotExist(err) { + return nil + } + return fmt.Errorf("cannot read pid file: %w", err) + } + pid, err := strconv.ParseUint(string(data), 10, 0) + if err != nil { + return fmt.Errorf("cannot parse pid file %s: %w", pidPath, err) + } + + return moveProcessToScope(int(pid), slice, scope) +} + +func moveProcessToScope(pid int, slice, scope string) error { + err := RunUnderSystemdScope(pid, slice, scope) + // If the PID is not valid anymore, do not return an error. + if dbusErr, ok := err.(dbus.Error); ok { + if dbusErr.Name == "org.freedesktop.DBus.Error.UnixProcessIdUnknown" { + return nil + } + } + return err +} + +// MoveRootlessNetnsSlirpProcessToUserSlice moves the slirp4netns process for the rootless netns +// into a different scope so that systemd does not kill it with a container. +func MoveRootlessNetnsSlirpProcessToUserSlice(pid int) error { + randBytes := make([]byte, 4) + _, err := rand.Read(randBytes) + if err != nil { + return err + } + return moveProcessToScope(pid, "user.slice", fmt.Sprintf("rootless-netns-%x.scope", randBytes)) +} + +// MovePauseProcessToScope moves the pause process used for rootless mode to keep the namespaces alive to +// a separate scope. +func MovePauseProcessToScope(pausePidPath string) { + var err error + + for i := 0; i < 10; i++ { + randBytes := make([]byte, 4) + _, err = rand.Read(randBytes) + if err != nil { + logrus.Errorf("failed to read random bytes: %v", err) + continue + } + err = moveProcessPIDFileToScope(pausePidPath, "user.slice", fmt.Sprintf("podman-pause-%x.scope", randBytes)) + if err == nil { + return + } + } + + if err != nil { + unified, err2 := cgroups.IsCgroup2UnifiedMode() + if err2 != nil { + logrus.Warnf("Failed to detect if running with cgroup unified: %v", err) + } + if RunsOnSystemd() && unified { + logrus.Warnf("Failed to add pause process to systemd sandbox cgroup: %v", err) + } else { + logrus.Debugf("Failed to add pause process to systemd sandbox cgroup: %v", err) + } + } +} + +// RunUnderSystemdScope adds the specified pid to a systemd scope +func RunUnderSystemdScope(pid int, slice string, unitName string) error { + var properties []systemdDbus.Property + var conn *systemdDbus.Conn + var err error + + if unshare.GetRootlessUID() != 0 { + conn, err = cgroups.UserConnection(unshare.GetRootlessUID()) + if err != nil { + return err + } + } else { + conn, err = systemdDbus.NewWithContext(context.Background()) + if err != nil { + return err + } + } + defer conn.Close() + properties = append(properties, systemdDbus.PropSlice(slice)) + properties = append(properties, newProp("PIDs", []uint32{uint32(pid)})) + properties = append(properties, newProp("Delegate", true)) + properties = append(properties, newProp("DefaultDependencies", false)) + ch := make(chan string) + _, err = conn.StartTransientUnitContext(context.Background(), unitName, "replace", properties, ch) + if err != nil { + // On errors check if the cgroup already exists, if it does move the process there + if props, err := conn.GetUnitTypePropertiesContext(context.Background(), unitName, "Scope"); err == nil { + if cgroup, ok := props["ControlGroup"].(string); ok && cgroup != "" { + if err := cgroups.MoveUnderCgroup(cgroup, "", []uint32{uint32(pid)}); err == nil { + return nil + } + // On errors return the original error message we got from StartTransientUnit. + } + } + return err + } + + // Block until job is started + <-ch + + return nil +} + +func newProp(name string, units any) systemdDbus.Property { + return systemdDbus.Property{ + Name: name, + Value: dbus.MakeVariant(units), + } +} diff --git a/vendor/github.com/containers/common/pkg/systemd/systemd_unsupported.go b/vendor/github.com/containers/common/pkg/systemd/systemd_unsupported.go new file mode 100644 index 00000000..e4a62852 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/systemd/systemd_unsupported.go @@ -0,0 +1,15 @@ +//go:build !linux + +package systemd + +import "errors" + +func RunsOnSystemd() bool { + return false +} + +func MovePauseProcessToScope(pausePidPath string) {} + +func RunUnderSystemdScope(pid int, slice string, unitName string) error { + return errors.New("RunUnderSystemdScope not supported on this OS") +} diff --git a/vendor/github.com/containers/common/pkg/umask/umask_unix.go b/vendor/github.com/containers/common/pkg/umask/umask_unix.go index 4f5527cb..caa60c8e 100644 --- a/vendor/github.com/containers/common/pkg/umask/umask_unix.go +++ b/vendor/github.com/containers/common/pkg/umask/umask_unix.go @@ -1,5 +1,4 @@ //go:build linux || darwin -// +build linux darwin package umask diff --git a/vendor/github.com/containers/common/pkg/umask/umask_unsupported.go b/vendor/github.com/containers/common/pkg/umask/umask_unsupported.go index cf76ea1d..547dea86 100644 --- a/vendor/github.com/containers/common/pkg/umask/umask_unsupported.go +++ b/vendor/github.com/containers/common/pkg/umask/umask_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux && !darwin -// +build !linux,!darwin package umask diff --git a/vendor/github.com/containers/common/pkg/util/util.go b/vendor/github.com/containers/common/pkg/util/util.go index 708472ba..8229296d 100644 --- a/vendor/github.com/containers/common/pkg/util/util.go +++ b/vendor/github.com/containers/common/pkg/util/util.go @@ -9,16 +9,14 @@ import ( "github.com/fsnotify/fsnotify" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" ) -// StringInSlice determines if a string is in a string slice, returns bool +// StringInSlice determines if a string is in a string slice, returns bool. +// +// Deprecated: Use [golang.org/x/exp/slices.Contains] instead. func StringInSlice(s string, sl []string) bool { - for _, i := range sl { - if i == s { - return true - } - } - return false + return slices.Contains(sl, s) } // StringMatchRegexSlice determines if a given string matches one of the given regexes, returns bool diff --git a/vendor/github.com/containers/common/pkg/util/util_supported.go b/vendor/github.com/containers/common/pkg/util/util_supported.go deleted file mode 100644 index 0cd53af5..00000000 --- a/vendor/github.com/containers/common/pkg/util/util_supported.go +++ /dev/null @@ -1,91 +0,0 @@ -//go:build linux || darwin || freebsd -// +build linux darwin freebsd - -package util - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "sync" - "syscall" - - "github.com/containers/storage/pkg/homedir" - "github.com/containers/storage/pkg/unshare" - "github.com/sirupsen/logrus" -) - -var ( - rootlessRuntimeDirOnce sync.Once - rootlessRuntimeDir string -) - -// isWriteableOnlyByOwner checks that the specified permission mask allows write -// access only to the owner. -func isWriteableOnlyByOwner(perm os.FileMode) bool { - return (perm & 0o722) == 0o700 -} - -// GetRuntimeDir returns the runtime directory -func GetRuntimeDir() (string, error) { - var rootlessRuntimeDirError error - - rootlessRuntimeDirOnce.Do(func() { - runtimeDir, err := homedir.GetRuntimeDir() - if err != nil { - logrus.Debug(err) - } - if runtimeDir != "" { - st, err := os.Stat(runtimeDir) - if err != nil { - rootlessRuntimeDirError = err - return - } - if int(st.Sys().(*syscall.Stat_t).Uid) != os.Geteuid() { - rootlessRuntimeDirError = fmt.Errorf("XDG_RUNTIME_DIR directory %q is not owned by the current user", runtimeDir) - return - } - } - uid := fmt.Sprintf("%d", unshare.GetRootlessUID()) - if runtimeDir == "" { - tmpDir := filepath.Join("/run", "user", uid) - if err := os.MkdirAll(tmpDir, 0o700); err != nil { - logrus.Debugf("unable to make temp dir: %v", err) - } - st, err := os.Stat(tmpDir) - if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && isWriteableOnlyByOwner(st.Mode().Perm()) { - runtimeDir = tmpDir - } - } - if runtimeDir == "" { - tmpDir := filepath.Join(os.TempDir(), fmt.Sprintf("podman-run-%s", uid)) - if err := os.MkdirAll(tmpDir, 0o700); err != nil { - logrus.Debugf("unable to make temp dir %v", err) - } - st, err := os.Stat(tmpDir) - if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && isWriteableOnlyByOwner(st.Mode().Perm()) { - runtimeDir = tmpDir - } - } - if runtimeDir == "" { - home := os.Getenv("HOME") - if home == "" { - rootlessRuntimeDirError = errors.New("neither XDG_RUNTIME_DIR nor HOME was set non-empty") - return - } - resolvedHome, err := filepath.EvalSymlinks(home) - if err != nil { - rootlessRuntimeDirError = fmt.Errorf("cannot resolve home: %w", err) - return - } - runtimeDir = filepath.Join(resolvedHome, "rundir") - } - rootlessRuntimeDir = runtimeDir - }) - - if rootlessRuntimeDirError != nil { - return "", rootlessRuntimeDirError - } - return rootlessRuntimeDir, nil -} diff --git a/vendor/github.com/containers/common/pkg/util/util_windows.go b/vendor/github.com/containers/common/pkg/util/util_windows.go deleted file mode 100644 index 1525bdc3..00000000 --- a/vendor/github.com/containers/common/pkg/util/util_windows.go +++ /dev/null @@ -1,13 +0,0 @@ -//go:build windows -// +build windows - -package util - -import ( - "errors" -) - -// getRuntimeDir returns the runtime directory -func GetRuntimeDir() (string, error) { - return "", errors.New("this function is not implemented for windows") -} diff --git a/vendor/github.com/containers/common/version/version.go b/vendor/github.com/containers/common/version/version.go index 19ba92c0..cd77bf89 100644 --- a/vendor/github.com/containers/common/version/version.go +++ b/vendor/github.com/containers/common/version/version.go @@ -1,4 +1,4 @@ package version // Version is the version of the build. -const Version = "0.57.4" +const Version = "0.58.0" diff --git a/vendor/github.com/containers/image/v5/copy/compression.go b/vendor/github.com/containers/image/v5/copy/compression.go index a42e3b67..1706f711 100644 --- a/vendor/github.com/containers/image/v5/copy/compression.go +++ b/vendor/github.com/containers/image/v5/copy/compression.go @@ -23,9 +23,9 @@ var ( // compressionBufferSize is the buffer size used to compress a blob compressionBufferSize = 1048576 - // expectedCompressionFormats is used to check if a blob with a specified media type is compressed + // expectedBaseCompressionFormats is used to check if a blob with a specified media type is compressed // using the algorithm that the media type says it should be compressed with - expectedCompressionFormats = map[string]*compressiontypes.Algorithm{ + expectedBaseCompressionFormats = map[string]*compressiontypes.Algorithm{ imgspecv1.MediaTypeImageLayerGzip: &compression.Gzip, imgspecv1.MediaTypeImageLayerZstd: &compression.Zstd, manifest.DockerV2Schema2LayerMediaType: &compression.Gzip, @@ -62,15 +62,16 @@ func blobPipelineDetectCompressionStep(stream *sourceStream, srcInfo types.BlobI res.srcCompressorName = internalblobinfocache.Uncompressed } - if expectedFormat, known := expectedCompressionFormats[stream.info.MediaType]; known && res.isCompressed && format.Name() != expectedFormat.Name() { - logrus.Debugf("blob %s with type %s should be compressed with %s, but compressor appears to be %s", srcInfo.Digest.String(), srcInfo.MediaType, expectedFormat.Name(), format.Name()) + if expectedBaseFormat, known := expectedBaseCompressionFormats[stream.info.MediaType]; known && res.isCompressed && format.BaseVariantName() != expectedBaseFormat.Name() { + logrus.Debugf("blob %s with type %s should be compressed with %s, but compressor appears to be %s", srcInfo.Digest.String(), srcInfo.MediaType, expectedBaseFormat.Name(), format.Name()) } return res, nil } // bpCompressionStepData contains data that the copy pipeline needs about the compression step. type bpCompressionStepData struct { - operation types.LayerCompression // Operation to use for updating the blob metadata. + operation bpcOperation // What we are actually doing + uploadedOperation types.LayerCompression // Operation to use for updating the blob metadata (matching the end state, not necessarily what we do) uploadedAlgorithm *compressiontypes.Algorithm // An algorithm parameter for the compressionOperation edits. uploadedAnnotations map[string]string // Annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed. srcCompressorName string // Compressor name to record in the blob info cache for the source blob. @@ -78,6 +79,18 @@ type bpCompressionStepData struct { closers []io.Closer // Objects to close after the upload is done, if any. } +type bpcOperation int + +const ( + bpcOpInvalid bpcOperation = iota + bpcOpPreserveOpaque // We are preserving something where compression is not applicable + bpcOpPreserveCompressed // We are preserving a compressed, and decompressible, layer + bpcOpPreserveUncompressed // We are preserving an uncompressed, and compressible, layer + bpcOpCompressUncompressed // We are compressing uncompressed data + bpcOpRecompressCompressed // We are recompressing compressed data + bpcOpDecompressCompressed // We are decompressing compressed data +) + // blobPipelineCompressionStep updates *stream to compress and/or decompress it. // srcInfo is primarily used for error messages. // Returns data for other steps; the caller should eventually call updateCompressionEdits and perhaps recordValidatedBlobData, @@ -112,10 +125,11 @@ func (ic *imageCopier) blobPipelineCompressionStep(stream *sourceStream, canModi // bpcPreserveEncrypted checks if the input is encrypted, and returns a *bpCompressionStepData if so. func (ic *imageCopier) bpcPreserveEncrypted(stream *sourceStream, _ bpDetectCompressionStepData) (*bpCompressionStepData, error) { if isOciEncrypted(stream.info.MediaType) { + // We can’t do anything with an encrypted blob unless decrypted. logrus.Debugf("Using original blob without modification for encrypted blob") - // PreserveOriginal due to any compression not being able to be done on an encrypted blob unless decrypted return &bpCompressionStepData{ - operation: types.PreserveOriginal, + operation: bpcOpPreserveOpaque, + uploadedOperation: types.PreserveOriginal, uploadedAlgorithm: nil, srcCompressorName: internalblobinfocache.UnknownCompression, uploadedCompressorName: internalblobinfocache.UnknownCompression, @@ -143,7 +157,8 @@ func (ic *imageCopier) bpcCompressUncompressed(stream *sourceStream, detected bp Size: -1, } return &bpCompressionStepData{ - operation: types.Compress, + operation: bpcOpCompressUncompressed, + uploadedOperation: types.Compress, uploadedAlgorithm: uploadedAlgorithm, uploadedAnnotations: annotations, srcCompressorName: detected.srcCompressorName, @@ -157,7 +172,8 @@ func (ic *imageCopier) bpcCompressUncompressed(stream *sourceStream, detected bp // bpcRecompressCompressed checks if we should be recompressing a compressed input to another format, and returns a *bpCompressionStepData if so. func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) { if ic.c.dest.DesiredLayerCompression() == types.Compress && detected.isCompressed && - ic.compressionFormat != nil && ic.compressionFormat.Name() != detected.format.Name() { + ic.compressionFormat != nil && + (ic.compressionFormat.Name() != detected.format.Name() && ic.compressionFormat.Name() != detected.format.BaseVariantName()) { // When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally // re-compressed using the desired format. logrus.Debugf("Blob will be converted") @@ -182,7 +198,8 @@ func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bp } succeeded = true return &bpCompressionStepData{ - operation: types.PreserveOriginal, + operation: bpcOpRecompressCompressed, + uploadedOperation: types.PreserveOriginal, uploadedAlgorithm: ic.compressionFormat, uploadedAnnotations: annotations, srcCompressorName: detected.srcCompressorName, @@ -208,7 +225,8 @@ func (ic *imageCopier) bpcDecompressCompressed(stream *sourceStream, detected bp Size: -1, } return &bpCompressionStepData{ - operation: types.Decompress, + operation: bpcOpDecompressCompressed, + uploadedOperation: types.Decompress, uploadedAlgorithm: nil, srcCompressorName: detected.srcCompressorName, uploadedCompressorName: internalblobinfocache.Uncompressed, @@ -232,14 +250,26 @@ func (ic *imageCopier) bpcPreserveOriginal(_ *sourceStream, detected bpDetectCom // But don’t touch blobs in objects where we can’t change compression, // so that src.UpdatedImage() doesn’t fail; assume that for such blobs // LayerInfosForCopy() should not be making any changes in the first place. + var bpcOp bpcOperation + var uploadedOp types.LayerCompression var algorithm *compressiontypes.Algorithm - if layerCompressionChangeSupported && detected.isCompressed { + switch { + case !layerCompressionChangeSupported: + bpcOp = bpcOpPreserveOpaque + uploadedOp = types.PreserveOriginal + algorithm = nil + case detected.isCompressed: + bpcOp = bpcOpPreserveCompressed + uploadedOp = types.PreserveOriginal algorithm = &detected.format - } else { + default: + bpcOp = bpcOpPreserveUncompressed + uploadedOp = types.Decompress algorithm = nil } return &bpCompressionStepData{ - operation: types.PreserveOriginal, + operation: bpcOp, + uploadedOperation: uploadedOp, uploadedAlgorithm: algorithm, srcCompressorName: detected.srcCompressorName, uploadedCompressorName: detected.srcCompressorName, @@ -248,7 +278,7 @@ func (ic *imageCopier) bpcPreserveOriginal(_ *sourceStream, detected bpDetectCom // updateCompressionEdits sets *operation, *algorithm and updates *annotations, if necessary. func (d *bpCompressionStepData) updateCompressionEdits(operation *types.LayerCompression, algorithm **compressiontypes.Algorithm, annotations *map[string]string) { - *operation = d.operation + *operation = d.uploadedOperation // If we can modify the layer's blob, set the desired algorithm for it to be set in the manifest. *algorithm = d.uploadedAlgorithm if *annotations == nil { @@ -257,7 +287,8 @@ func (d *bpCompressionStepData) updateCompressionEdits(operation *types.LayerCom maps.Copy(*annotations, d.uploadedAnnotations) } -// recordValidatedBlobData updates b.blobInfoCache with data about the created uploadedInfo adnd the original srcInfo. +// recordValidatedBlobData updates b.blobInfoCache with data about the created uploadedInfo (as returned by PutBlob) +// and the original srcInfo (which the caller guarantees has been validated). // This must ONLY be called if all data has been validated by OUR code, and is not coming from third parties. func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInfo types.BlobInfo, srcInfo types.BlobInfo, encryptionStep *bpEncryptionStepData, decryptionStep *bpDecryptionStepData) error { @@ -268,17 +299,26 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf // in the blob info cache (which would probably be necessary for any more complex logic), // and the simplicity is attractive. if !encryptionStep.encrypting && !decryptionStep.decrypting { - // If d.operation != types.PreserveOriginal, we now have two reliable digest values: + // If d.operation != bpcOpPreserve*, we now have two reliable digest values: // srcinfo.Digest describes the pre-d.operation input, verified by digestingReader // uploadedInfo.Digest describes the post-d.operation output, computed by PutBlob - // (because stream.info.Digest == "", this must have been computed afresh). + // (because we set stream.info.Digest == "", this must have been computed afresh). switch d.operation { - case types.PreserveOriginal: - break // Do nothing, we have only one digest and we might not have even verified it. - case types.Compress: + case bpcOpPreserveOpaque: + // No useful information + case bpcOpCompressUncompressed: c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest) - case types.Decompress: + case bpcOpDecompressCompressed: c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest) + case bpcOpRecompressCompressed, bpcOpPreserveCompressed: + // We know one or two compressed digests. BlobInfoCache associates compression variants via the uncompressed digest, + // and we don’t know that one. + // That also means that repeated copies with the same recompression don’t identify reuse opportunities (unless + // RecordDigestUncompressedPair was called for both compressed variants for some other reason). + case bpcOpPreserveUncompressed: + c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, srcInfo.Digest) + case bpcOpInvalid: + fallthrough default: return fmt.Errorf("Internal error: Unexpected d.operation value %#v", d.operation) } @@ -286,7 +326,7 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf if d.uploadedCompressorName != "" && d.uploadedCompressorName != internalblobinfocache.UnknownCompression { if d.uploadedCompressorName != compressiontypes.ZstdChunkedAlgorithmName { // HACK: Don’t record zstd:chunked algorithms. - // There is already a similar hack in internal/imagedestination/impl/helpers.BlobMatchesRequiredCompression, + // There is already a similar hack in internal/imagedestination/impl/helpers.CandidateMatchesTryReusingBlobOptions, // and that one prevents reusing zstd:chunked blobs, so recording the algorithm here would be mostly harmless. // // We skip that here anyway to work around the inability of blobPipelineDetectCompressionStep to differentiate diff --git a/vendor/github.com/containers/image/v5/copy/digesting_reader.go b/vendor/github.com/containers/image/v5/copy/digesting_reader.go index 901d1082..4c6ba82e 100644 --- a/vendor/github.com/containers/image/v5/copy/digesting_reader.go +++ b/vendor/github.com/containers/image/v5/copy/digesting_reader.go @@ -23,11 +23,11 @@ type digestingReader struct { func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) { var digester digest.Digester if err := expectedDigest.Validate(); err != nil { - return nil, fmt.Errorf("Invalid digest specification %s", expectedDigest) + return nil, fmt.Errorf("invalid digest specification %q: %w", expectedDigest, err) } digestAlgorithm := expectedDigest.Algorithm() if !digestAlgorithm.Available() { - return nil, fmt.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm) + return nil, fmt.Errorf("invalid digest specification %q: unsupported digest algorithm %q", expectedDigest, digestAlgorithm) } digester = digestAlgorithm.Digester() diff --git a/vendor/github.com/containers/image/v5/copy/manifest.go b/vendor/github.com/containers/image/v5/copy/manifest.go index 6f01cf5c..60ea92aa 100644 --- a/vendor/github.com/containers/image/v5/copy/manifest.go +++ b/vendor/github.com/containers/image/v5/copy/manifest.go @@ -6,8 +6,10 @@ import ( "fmt" "strings" + internalManifest "github.com/containers/image/v5/internal/manifest" "github.com/containers/image/v5/internal/set" "github.com/containers/image/v5/manifest" + compressiontypes "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/types" v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" @@ -19,8 +21,8 @@ import ( // Include v2s1 signed but not v2s1 unsigned, because docker/distribution requires a signature even if the unsigned MIME type is used. var preferredManifestMIMETypes = []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType} -// ociEncryptionMIMETypes lists manifest MIME types that are known to support OCI encryption. -var ociEncryptionMIMETypes = []string{v1.MediaTypeImageManifest} +// allManifestMIMETypes lists all possible manifest MIME types. +var allManifestMIMETypes = []string{v1.MediaTypeImageManifest, manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType} // orderedSet is a list of strings (MIME types or platform descriptors in our case), with each string appearing at most once. type orderedSet struct { @@ -51,9 +53,10 @@ type determineManifestConversionInputs struct { destSupportedManifestMIMETypes []string // MIME types supported by the destination, per types.ImageDestination.SupportedManifestMIMETypes() - forceManifestMIMEType string // User’s choice of forced manifest MIME type - requiresOCIEncryption bool // Restrict to manifest formats that can support OCI encryption - cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can + forceManifestMIMEType string // User’s choice of forced manifest MIME type + requestedCompressionFormat *compressiontypes.Algorithm // Compression algorithm to use, if the user _explictily_ requested one. + requiresOCIEncryption bool // Restrict to manifest formats that can support OCI encryption + cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can } // manifestConversionPlan contains the decisions made by determineManifestConversion. @@ -79,42 +82,68 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest if in.forceManifestMIMEType != "" { destSupportedManifestMIMETypes = []string{in.forceManifestMIMEType} } - if len(destSupportedManifestMIMETypes) == 0 { - if !in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(srcType) { - return manifestConversionPlan{ // Anything goes; just use the original as is, do not try any conversions. - preferredMIMEType: srcType, - otherMIMETypeCandidates: []string{}, - }, nil - } - destSupportedManifestMIMETypes = ociEncryptionMIMETypes + destSupportedManifestMIMETypes = allManifestMIMETypes } + + restrictiveCompressionRequired := in.requestedCompressionFormat != nil && !internalManifest.CompressionAlgorithmIsUniversallySupported(*in.requestedCompressionFormat) supportedByDest := set.New[string]() for _, t := range destSupportedManifestMIMETypes { - if !in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(t) { - supportedByDest.Add(t) + if in.requiresOCIEncryption && !manifest.MIMETypeSupportsEncryption(t) { + continue + } + if restrictiveCompressionRequired && !internalManifest.MIMETypeSupportsCompressionAlgorithm(t, *in.requestedCompressionFormat) { + continue } + supportedByDest.Add(t) } if supportedByDest.Empty() { - if len(destSupportedManifestMIMETypes) == 0 { // Coverage: This should never happen, empty values were replaced by ociEncryptionMIMETypes + if len(destSupportedManifestMIMETypes) == 0 { // Coverage: This should never happen, empty values were replaced by allManifestMIMETypes return manifestConversionPlan{}, errors.New("internal error: destSupportedManifestMIMETypes is empty") } - // We know, and have verified, that destSupportedManifestMIMETypes is not empty, so encryption must have been involved. - if !in.requiresOCIEncryption { // Coverage: This should never happen, destSupportedManifestMIMETypes was not empty, so we should have filtered for encryption. - return manifestConversionPlan{}, errors.New("internal error: supportedByDest is empty but destSupportedManifestMIMETypes is not, and not encrypting") - } + // We know, and have verified, that destSupportedManifestMIMETypes is not empty, so some filtering of supported MIME types must have been involved. + // destSupportedManifestMIMETypes has three possible origins: if in.forceManifestMIMEType != "" { // 1. forceManifestType specified - return manifestConversionPlan{}, fmt.Errorf("encryption required together with format %s, which does not support encryption", - in.forceManifestMIMEType) + switch { + case in.requiresOCIEncryption && restrictiveCompressionRequired: + return manifestConversionPlan{}, fmt.Errorf("compression using %s, and encryption, required together with format %s, which does not support both", + in.requestedCompressionFormat.Name(), in.forceManifestMIMEType) + case in.requiresOCIEncryption: + return manifestConversionPlan{}, fmt.Errorf("encryption required together with format %s, which does not support encryption", + in.forceManifestMIMEType) + case restrictiveCompressionRequired: + return manifestConversionPlan{}, fmt.Errorf("compression using %s required together with format %s, which does not support it", + in.requestedCompressionFormat.Name(), in.forceManifestMIMEType) + default: + return manifestConversionPlan{}, errors.New("internal error: forceManifestMIMEType was rejected for an unknown reason") + } + } + if len(in.destSupportedManifestMIMETypes) == 0 { // 2. destination accepts anything and we have chosen allManifestTypes + if !restrictiveCompressionRequired { + // Coverage: This should never happen. + // If we have not rejected for encryption reasons, we must have rejected due to encryption, but + // allManifestTypes includes OCI, which supports encryption. + return manifestConversionPlan{}, errors.New("internal error: in.destSupportedManifestMIMETypes is empty but supportedByDest is empty as well") + } + // This can legitimately happen when the user asks for completely unsupported formats like Bzip2 or Xz. + return manifestConversionPlan{}, fmt.Errorf("compression using %s required, but none of the known manifest formats support it", in.requestedCompressionFormat.Name()) } - if len(in.destSupportedManifestMIMETypes) == 0 { // 2. destination accepts anything and we have chosen ociEncryptionMIMETypes - // Coverage: This should never happen, ociEncryptionMIMETypes all support encryption - return manifestConversionPlan{}, errors.New("internal error: in.destSupportedManifestMIMETypes is empty but supportedByDest is empty as well") + // 3. destination accepts a restricted list of mime types + destMIMEList := strings.Join(destSupportedManifestMIMETypes, ", ") + switch { + case in.requiresOCIEncryption && restrictiveCompressionRequired: + return manifestConversionPlan{}, fmt.Errorf("compression using %s, and encryption, required but the destination only supports MIME types [%s], none of which support both", + in.requestedCompressionFormat.Name(), destMIMEList) + case in.requiresOCIEncryption: + return manifestConversionPlan{}, fmt.Errorf("encryption required but the destination only supports MIME types [%s], none of which support encryption", + destMIMEList) + case restrictiveCompressionRequired: + return manifestConversionPlan{}, fmt.Errorf("compression using %s required but the destination only supports MIME types [%s], none of which support it", + in.requestedCompressionFormat.Name(), destMIMEList) + default: // Coverage: This should never happen, we only filter for in.requiresOCIEncryption || restrictiveCompressionRequired + return manifestConversionPlan{}, errors.New("internal error: supportedByDest is empty but destSupportedManifestMIMETypes is not, and we are neither encrypting nor requiring a restrictive compression algorithm") } - // 3. destination does not support encryption. - return manifestConversionPlan{}, fmt.Errorf("encryption required but the destination only supports MIME types [%s], none of which support encryption", - strings.Join(destSupportedManifestMIMETypes, ", ")) } // destSupportedManifestMIMETypes is a static guess; a particular registry may still only support a subset of the types. @@ -156,7 +185,7 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest } logrus.Debugf("Manifest has MIME type %s, ordered candidate list [%s]", srcType, strings.Join(prioritizedTypes.list, ", ")) - if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes and supportedByDest, which is a subset, is not empty (or we would have exited above), so this should never happen. + if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes and supportedByDest, which is a subset, is not empty (or we would have exited above), so this should never happen. return manifestConversionPlan{}, errors.New("Internal error: no candidate MIME types") } res := manifestConversionPlan{ diff --git a/vendor/github.com/containers/image/v5/copy/multiple.go b/vendor/github.com/containers/image/v5/copy/multiple.go index f252e347..a219b58b 100644 --- a/vendor/github.com/containers/image/v5/copy/multiple.go +++ b/vendor/github.com/containers/image/v5/copy/multiple.go @@ -38,6 +38,7 @@ type instanceCopy struct { // Fields which can be used by callers when operation // is `instanceCopyClone` + cloneArtifactType string cloneCompressionVariant OptionCompressionVariant clonePlatform *imgspecv1.Platform cloneAnnotations map[string]string @@ -142,6 +143,7 @@ func prepareInstanceCopies(list internalManifest.List, instanceDigests []digest. res = append(res, instanceCopy{ op: instanceCopyClone, sourceDigest: instanceDigest, + cloneArtifactType: instanceDetails.ReadOnly.ArtifactType, cloneCompressionVariant: compressionVariant, clonePlatform: instanceDetails.ReadOnly.Platform, cloneAnnotations: maps.Clone(instanceDetails.ReadOnly.Annotations), @@ -268,6 +270,7 @@ func (c *copier) copyMultipleImages(ctx context.Context) (copiedManifest []byte, AddDigest: updated.manifestDigest, AddSize: int64(len(updated.manifest)), AddMediaType: updated.manifestMIMEType, + AddArtifactType: instance.cloneArtifactType, AddPlatform: instance.clonePlatform, AddAnnotations: instance.cloneAnnotations, AddCompressionAlgorithms: updated.compressionAlgorithms, diff --git a/vendor/github.com/containers/image/v5/copy/progress_bars.go b/vendor/github.com/containers/image/v5/copy/progress_bars.go index ce078234..3ac93a9d 100644 --- a/vendor/github.com/containers/image/v5/copy/progress_bars.go +++ b/vendor/github.com/containers/image/v5/copy/progress_bars.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "io" + "time" "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/types" @@ -148,13 +149,14 @@ type blobChunkAccessorProxy struct { // The readers must be fully consumed, in the order they are returned, before blocking // to read the next chunk. func (s *blobChunkAccessorProxy) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { + start := time.Now() rc, errs, err := s.wrapped.GetBlobAt(ctx, info, chunks) if err == nil { total := int64(0) for _, c := range chunks { total += int64(c.Length) } - s.bar.IncrInt64(total) + s.bar.EwmaIncrInt64(total, time.Since(start)) } return rc, errs, err } diff --git a/vendor/github.com/containers/image/v5/copy/single.go b/vendor/github.com/containers/image/v5/copy/single.go index 67ca43f7..e1a43f75 100644 --- a/vendor/github.com/containers/image/v5/copy/single.go +++ b/vendor/github.com/containers/image/v5/copy/single.go @@ -20,6 +20,7 @@ import ( compressiontypes "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" + chunkedToc "github.com/containers/storage/pkg/chunked/toc" digest "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" @@ -32,6 +33,7 @@ type imageCopier struct { c *copier manifestUpdates *types.ManifestUpdateOptions src *image.SourcedImage + manifestConversionPlan manifestConversionPlan diffIDsAreNeeded bool cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can canSubstituteBlobs bool @@ -135,7 +137,7 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar c: c, manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: types.ManifestUpdateInformation{Destination: c.dest}}, src: src, - // diffIDsAreNeeded is computed later + // manifestConversionPlan and diffIDsAreNeeded are computed later cannotModifyManifestReason: cannotModifyManifestReason, requireCompressionFormatMatch: opts.requireCompressionFormatMatch, } @@ -163,10 +165,11 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar destRequiresOciEncryption := (isEncrypted(src) && ic.c.options.OciDecryptConfig == nil) || c.options.OciEncryptLayers != nil - manifestConversionPlan, err := determineManifestConversion(determineManifestConversionInputs{ + ic.manifestConversionPlan, err = determineManifestConversion(determineManifestConversionInputs{ srcMIMEType: ic.src.ManifestMIMEType, destSupportedManifestMIMETypes: ic.c.dest.SupportedManifestMIMETypes(), forceManifestMIMEType: c.options.ForceManifestMIMEType, + requestedCompressionFormat: ic.compressionFormat, requiresOCIEncryption: destRequiresOciEncryption, cannotModifyManifestReason: ic.cannotModifyManifestReason, }) @@ -177,8 +180,8 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar // code that calls copyUpdatedConfigAndManifest, so that other parts of the copy code // (e.g. the UpdatedImageNeedsLayerDiffIDs check just below) can make decisions based // on the expected destination format. - if manifestConversionPlan.preferredMIMETypeNeedsConversion { - ic.manifestUpdates.ManifestMIMEType = manifestConversionPlan.preferredMIMEType + if ic.manifestConversionPlan.preferredMIMETypeNeedsConversion { + ic.manifestUpdates.ManifestMIMEType = ic.manifestConversionPlan.preferredMIMEType } // If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here. @@ -217,11 +220,11 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar manifestBytes, manifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance) wipResult := copySingleImageResult{ manifest: manifestBytes, - manifestMIMEType: manifestConversionPlan.preferredMIMEType, + manifestMIMEType: ic.manifestConversionPlan.preferredMIMEType, manifestDigest: manifestDigest, } if err != nil { - logrus.Debugf("Writing manifest using preferred type %s failed: %v", manifestConversionPlan.preferredMIMEType, err) + logrus.Debugf("Writing manifest using preferred type %s failed: %v", ic.manifestConversionPlan.preferredMIMEType, err) // … if it fails, and the failure is either because the manifest is rejected by the registry, or // because we failed to create a manifest of the specified type because the specific manifest type // doesn't support the type of compression we're trying to use (e.g. docker v2s2 and zstd), we may @@ -230,13 +233,13 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar var manifestLayerCompressionIncompatibilityError manifest.ManifestLayerCompressionIncompatibilityError isManifestRejected := errors.As(err, &manifestTypeRejectedError) isCompressionIncompatible := errors.As(err, &manifestLayerCompressionIncompatibilityError) - if (!isManifestRejected && !isCompressionIncompatible) || len(manifestConversionPlan.otherMIMETypeCandidates) == 0 { + if (!isManifestRejected && !isCompressionIncompatible) || len(ic.manifestConversionPlan.otherMIMETypeCandidates) == 0 { // We don’t have other options. // In principle the code below would handle this as well, but the resulting error message is fairly ugly. // Don’t bother the user with MIME types if we have no choice. return copySingleImageResult{}, err } - // If the original MIME type is acceptable, determineManifestConversion always uses it as manifestConversionPlan.preferredMIMEType. + // If the original MIME type is acceptable, determineManifestConversion always uses it as ic.manifestConversionPlan.preferredMIMEType. // So if we are here, we will definitely be trying to convert the manifest. // With ic.cannotModifyManifestReason != "", that would just be a string of repeated failures for the same reason, // so let’s bail out early and with a better error message. @@ -245,8 +248,8 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar } // errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil. - errs := []string{fmt.Sprintf("%s(%v)", manifestConversionPlan.preferredMIMEType, err)} - for _, manifestMIMEType := range manifestConversionPlan.otherMIMETypeCandidates { + errs := []string{fmt.Sprintf("%s(%v)", ic.manifestConversionPlan.preferredMIMEType, err)} + for _, manifestMIMEType := range ic.manifestConversionPlan.otherMIMETypeCandidates { logrus.Debugf("Trying to use manifest type %s…", manifestMIMEType) ic.manifestUpdates.ManifestMIMEType = manifestMIMEType attemptedManifest, attemptedManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance) @@ -380,7 +383,11 @@ func (ic *imageCopier) compareImageDestinationManifestEqual(ctx context.Context, compressionAlgos := set.New[string]() for _, srcInfo := range ic.src.LayerInfos() { - if c := compressionAlgorithmFromMIMEType(srcInfo); c != nil { + _, c, err := compressionEditsFromBlobInfo(srcInfo) + if err != nil { + return nil, err + } + if c != nil { compressionAlgos.Add(c.Name()) } } @@ -633,17 +640,29 @@ type diffIDResult struct { err error } -func compressionAlgorithmFromMIMEType(srcInfo types.BlobInfo) *compressiontypes.Algorithm { +// compressionEditsFromBlobInfo returns a (CompressionOperation, CompressionAlgorithm) value pair suitable +// for types.BlobInfo. +func compressionEditsFromBlobInfo(srcInfo types.BlobInfo) (types.LayerCompression, *compressiontypes.Algorithm, error) { // This MIME type → compression mapping belongs in manifest-specific code in our manifest // package (but we should preferably replace/change UpdatedImage instead of productizing // this workaround). switch srcInfo.MediaType { case manifest.DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayerGzip: - return &compression.Gzip + return types.PreserveOriginal, &compression.Gzip, nil case imgspecv1.MediaTypeImageLayerZstd: - return &compression.Zstd + tocDigest, err := chunkedToc.GetTOCDigest(srcInfo.Annotations) + if err != nil { + return types.PreserveOriginal, nil, err + } + if tocDigest != nil { + return types.PreserveOriginal, &compression.ZstdChunked, nil + } + return types.PreserveOriginal, &compression.Zstd, nil + case manifest.DockerV2SchemaLayerMediaTypeUncompressed, imgspecv1.MediaTypeImageLayer: + return types.Decompress, nil, nil + default: + return types.PreserveOriginal, nil, nil } - return nil } // copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps (de/re/)compressing it, @@ -657,8 +676,13 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to // which uses the compression information to compute the updated MediaType values. // (Sadly UpdatedImage() is documented to not update MediaTypes from // ManifestUpdateOptions.LayerInfos[].MediaType, so we are doing it indirectly.) - if srcInfo.CompressionAlgorithm == nil { - srcInfo.CompressionAlgorithm = compressionAlgorithmFromMIMEType(srcInfo) + if srcInfo.CompressionOperation == types.PreserveOriginal && srcInfo.CompressionAlgorithm == nil { + op, algo, err := compressionEditsFromBlobInfo(srcInfo) + if err != nil { + return types.BlobInfo{}, "", err + } + srcInfo.CompressionOperation = op + srcInfo.CompressionAlgorithm = algo } ic.c.printCopyInfo("blob", srcInfo) @@ -681,26 +705,33 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to logrus.Debugf("Checking if we can reuse blob %s: general substitution = %v, compression for MIME type %q = %v", srcInfo.Digest, ic.canSubstituteBlobs, srcInfo.MediaType, canChangeLayerCompression) canSubstitute := ic.canSubstituteBlobs && ic.src.CanChangeLayerCompression(srcInfo.MediaType) - // TODO: at this point we don't know whether or not a blob we end up reusing is compressed using an algorithm - // that is acceptable for use on layers in the manifest that we'll be writing later, so if we end up reusing - // a blob that's compressed with e.g. zstd, but we're only allowed to write a v2s2 manifest, this will cause - // a failure when we eventually try to update the manifest with the digest and MIME type of the reused blob. - // Fixing that will probably require passing more information to TryReusingBlob() than the current version of - // the ImageDestination interface lets us pass in. + var requiredCompression *compressiontypes.Algorithm - var originalCompression *compressiontypes.Algorithm if ic.requireCompressionFormatMatch { requiredCompression = ic.compressionFormat - originalCompression = srcInfo.CompressionAlgorithm } + + var tocDigest digest.Digest + + // Check if we have a chunked layer in storage that's based on that blob. These layers are stored by their TOC digest. + d, err := chunkedToc.GetTOCDigest(srcInfo.Annotations) + if err != nil { + return types.BlobInfo{}, "", err + } + if d != nil { + tocDigest = *d + } + reused, reusedBlob, err := ic.c.dest.TryReusingBlobWithOptions(ctx, srcInfo, private.TryReusingBlobOptions{ - Cache: ic.c.blobInfoCache, - CanSubstitute: canSubstitute, - EmptyLayer: emptyLayer, - LayerIndex: &layerIndex, - SrcRef: srcRef, - RequiredCompression: requiredCompression, - OriginalCompression: originalCompression, + Cache: ic.c.blobInfoCache, + CanSubstitute: canSubstitute, + EmptyLayer: emptyLayer, + LayerIndex: &layerIndex, + SrcRef: srcRef, + PossibleManifestFormats: append([]string{ic.manifestConversionPlan.preferredMIMEType}, ic.manifestConversionPlan.otherMIMETypeCandidates...), + RequiredCompression: requiredCompression, + OriginalCompression: srcInfo.CompressionAlgorithm, + TOCDigest: tocDigest, }) if err != nil { return types.BlobInfo{}, "", fmt.Errorf("trying to reuse blob %s at destination: %w", srcInfo.Digest, err) @@ -708,7 +739,11 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to if reused { logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest) func() { // A scope for defer - bar := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: reusedBlob.Digest, Size: 0}, "blob", "skipped: already exists") + label := "skipped: already exists" + if reusedBlob.MatchedByTOCDigest { + label = "skipped: already exists (found by TOC)" + } + bar := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: reusedBlob.Digest, Size: 0}, "blob", label) defer bar.Abort(false) bar.mark100PercentComplete() }() @@ -741,7 +776,10 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to wrapped: ic.c.rawSource, bar: bar, } - uploadedBlob, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, ic.c.blobInfoCache) + uploadedBlob, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, private.PutBlobPartialOptions{ + Cache: ic.c.blobInfoCache, + LayerIndex: layerIndex, + }) if err == nil { if srcInfo.Size != -1 { refill := srcInfo.Size - bar.Current() diff --git a/vendor/github.com/containers/image/v5/directory/directory_dest.go b/vendor/github.com/containers/image/v5/directory/directory_dest.go index 222723a8..9d921924 100644 --- a/vendor/github.com/containers/image/v5/directory/directory_dest.go +++ b/vendor/github.com/containers/image/v5/directory/directory_dest.go @@ -190,7 +190,7 @@ func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io. // If the blob has been successfully reused, returns (true, info, nil). // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. func (d *dirImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { - if !impl.OriginalBlobMatchesRequiredCompression(options) { + if !impl.OriginalCandidateMatchesTryReusingBlobOptions(options) { return false, private.ReusedBlob{}, nil } if info.Digest == "" { diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go index 6ce8f700..a60ec563 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_client.go +++ b/vendor/github.com/containers/image/v5/docker/docker_client.go @@ -978,13 +978,10 @@ func (c *dockerClient) fetchManifest(ctx context.Context, ref dockerReference, t // This function can return nil reader when no url is supported by this function. In this case, the caller // should fallback to fetch the non-external blob (i.e. pull from the registry). func (c *dockerClient) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) { - var ( - resp *http.Response - err error - ) if len(urls) == 0 { return nil, 0, errors.New("internal error: getExternalBlob called with no URLs") } + var remoteErrors []error for _, u := range urls { blobURL, err := url.Parse(u) if err != nil || (blobURL.Scheme != "http" && blobURL.Scheme != "https") { @@ -993,24 +990,28 @@ func (c *dockerClient) getExternalBlob(ctx context.Context, urls []string) (io.R // NOTE: we must not authenticate on additional URLs as those // can be abused to leak credentials or tokens. Please // refer to CVE-2020-15157 for more information. - resp, err = c.makeRequestToResolvedURL(ctx, http.MethodGet, blobURL, nil, nil, -1, noAuth, nil) - if err == nil { - if resp.StatusCode != http.StatusOK { - err = fmt.Errorf("error fetching external blob from %q: %d (%s)", u, resp.StatusCode, http.StatusText(resp.StatusCode)) - logrus.Debug(err) - resp.Body.Close() - continue - } - break + resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, blobURL, nil, nil, -1, noAuth, nil) + if err != nil { + remoteErrors = append(remoteErrors, err) + continue + } + if resp.StatusCode != http.StatusOK { + err := fmt.Errorf("error fetching external blob from %q: %d (%s)", u, resp.StatusCode, http.StatusText(resp.StatusCode)) + remoteErrors = append(remoteErrors, err) + logrus.Debug(err) + resp.Body.Close() + continue } + return resp.Body, getBlobSize(resp), nil } - if resp == nil && err == nil { + if remoteErrors == nil { return nil, 0, nil // fallback to non-external blob } - if err != nil { - return nil, 0, err + err := fmt.Errorf("failed fetching external blob from all urls: %w", remoteErrors[0]) + for _, e := range remoteErrors[1:] { + err = fmt.Errorf("%s, %w", err, e) } - return resp.Body, getBlobSize(resp), nil + return nil, 0, err } func getBlobSize(resp *http.Response) int64 { diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go index a9a36f0a..877d11b7 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go @@ -27,6 +27,7 @@ import ( "github.com/containers/image/v5/internal/uploadreader" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/blobinfocache/none" + compressiontypes "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/types" "github.com/docker/distribution/registry/api/errcode" v2 "github.com/docker/distribution/registry/api/v2" @@ -311,6 +312,13 @@ func (d *dockerImageDestination) tryReusingExactBlob(ctx context.Context, info t return false, private.ReusedBlob{}, nil } +func optionalCompressionName(algo *compressiontypes.Algorithm) string { + if algo != nil { + return algo.Name() + } + return "nil" +} + // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. @@ -321,7 +329,7 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest") } - if impl.OriginalBlobMatchesRequiredCompression(options) { + if impl.OriginalCandidateMatchesTryReusingBlobOptions(options) { // First, check whether the blob happens to already exist at the destination. haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, info, options.Cache) if err != nil { @@ -331,11 +339,8 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, return true, reusedInfo, nil } } else { - requiredCompression := "nil" - if options.OriginalCompression != nil { - requiredCompression = options.OriginalCompression.Name() - } - logrus.Debugf("Ignoring exact blob match case due to compression mismatch ( %s vs %s )", options.RequiredCompression.Name(), requiredCompression) + logrus.Debugf("Ignoring exact blob match, compression %s does not match required %s or MIME types %#v", + optionalCompressionName(options.OriginalCompression), optionalCompressionName(options.RequiredCompression), options.PossibleManifestFormats) } // Then try reusing blobs from other locations. @@ -355,15 +360,13 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, continue } } - if !impl.BlobMatchesRequiredCompression(options, compressionAlgorithm) { - requiredCompression := "nil" - if compressionAlgorithm != nil { - requiredCompression = compressionAlgorithm.Name() - } + if !impl.CandidateMatchesTryReusingBlobOptions(options, compressionAlgorithm) { if !candidate.UnknownLocation { - logrus.Debugf("Ignoring candidate blob %s as reuse candidate due to compression mismatch ( %s vs %s ) in %s", candidate.Digest.String(), options.RequiredCompression.Name(), requiredCompression, candidateRepo.Name()) + logrus.Debugf("Ignoring candidate blob %s in %s, compression %s does not match required %s or MIME types %#v", candidate.Digest.String(), candidateRepo.Name(), + optionalCompressionName(compressionAlgorithm), optionalCompressionName(options.RequiredCompression), options.PossibleManifestFormats) } else { - logrus.Debugf("Ignoring candidate blob %s as reuse candidate due to compression mismatch ( %s vs %s ) with no location match, checking current repo", candidate.Digest.String(), options.RequiredCompression.Name(), requiredCompression) + logrus.Debugf("Ignoring candidate blob %s with no known location, compression %s does not match required %s or MIME types %#v", candidate.Digest.String(), + optionalCompressionName(compressionAlgorithm), optionalCompressionName(options.RequiredCompression), options.PossibleManifestFormats) } continue } diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go index 7507d855..b44f4ca1 100644 --- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go @@ -129,7 +129,7 @@ func (d *Destination) PutBlobWithOptions(ctx context.Context, stream io.Reader, // If the blob has been successfully reused, returns (true, info, nil). // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. func (d *Destination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { - if !impl.OriginalBlobMatchesRequiredCompression(options) { + if !impl.OriginalCandidateMatchesTryReusingBlobOptions(options) { return false, private.ReusedBlob{}, nil } if err := d.archive.lock(); err != nil { diff --git a/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go b/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go index 429d6826..4d3858ab 100644 --- a/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go +++ b/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go @@ -36,7 +36,7 @@ type BlobInfoCache2 interface { // that could possibly be reused within the specified (transport scope) (if they still // exist, which is not guaranteed). // - // If !canSubstitute, the returned cadidates will match the submitted digest exactly; if + // If !canSubstitute, the returned candidates will match the submitted digest exactly; if // canSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look // up variants of the blob which have the same uncompressed digest. // diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go index 5d28b3e7..553569a0 100644 --- a/vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go @@ -1,25 +1,42 @@ package impl import ( + "github.com/containers/image/v5/internal/manifest" "github.com/containers/image/v5/internal/private" compression "github.com/containers/image/v5/pkg/compression/types" + "golang.org/x/exp/slices" ) -// BlobMatchesRequiredCompression validates if compression is required by the caller while selecting a blob, if it is required +// CandidateMatchesTryReusingBlobOptions validates if compression is required by the caller while selecting a blob, if it is required // then function performs a match against the compression requested by the caller and compression of existing blob // (which can be nil to represent uncompressed or unknown) -func BlobMatchesRequiredCompression(options private.TryReusingBlobOptions, candidateCompression *compression.Algorithm) bool { - if options.RequiredCompression == nil { - return true // no requirement imposed +func CandidateMatchesTryReusingBlobOptions(options private.TryReusingBlobOptions, candidateCompression *compression.Algorithm) bool { + if options.RequiredCompression != nil { + if options.RequiredCompression.Name() == compression.ZstdChunkedAlgorithmName { + // HACK: Never match when the caller asks for zstd:chunked, because we don’t record the annotations required to use the chunked blobs. + // The caller must re-compress to build those annotations. + return false + } + if candidateCompression == nil || + (options.RequiredCompression.Name() != candidateCompression.Name() && options.RequiredCompression.Name() != candidateCompression.BaseVariantName()) { + return false + } } - if options.RequiredCompression.Name() == compression.ZstdChunkedAlgorithmName { - // HACK: Never match when the caller asks for zstd:chunked, because we don’t record the annotations required to use the chunked blobs. - // The caller must re-compress to build those annotations. - return false + + // For candidateCompression == nil, we can’t tell the difference between “uncompressed” and “unknown”; + // and “uncompressed” is acceptable in all known formats (well, it seems to work in practice for schema1), + // so don’t impose any restrictions if candidateCompression == nil + if options.PossibleManifestFormats != nil && candidateCompression != nil { + if !slices.ContainsFunc(options.PossibleManifestFormats, func(mt string) bool { + return manifest.MIMETypeSupportsCompressionAlgorithm(mt, *candidateCompression) + }) { + return false + } } - return candidateCompression != nil && (options.RequiredCompression.Name() == candidateCompression.Name()) + + return true } -func OriginalBlobMatchesRequiredCompression(opts private.TryReusingBlobOptions) bool { - return BlobMatchesRequiredCompression(opts, opts.OriginalCompression) +func OriginalCandidateMatchesTryReusingBlobOptions(opts private.TryReusingBlobOptions) bool { + return CandidateMatchesTryReusingBlobOptions(opts, opts.OriginalCompression) } diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go index 0dc6bd5a..bbb53c19 100644 --- a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go @@ -4,7 +4,6 @@ import ( "context" "fmt" - "github.com/containers/image/v5/internal/blobinfocache" "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/types" ) @@ -39,7 +38,7 @@ func (stub NoPutBlobPartialInitialize) SupportsPutBlobPartial() bool { // It is available only if SupportsPutBlobPartial(). // Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller // should fall back to PutBlobWithOptions. -func (stub NoPutBlobPartialInitialize) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) { +func (stub NoPutBlobPartialInitialize) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) { return private.UploadedBlob{}, fmt.Errorf("internal error: PutBlobPartial is not supported by the %q transport", stub.transportName) } diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go index 17e1870c..cdd3c5e5 100644 --- a/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go @@ -28,7 +28,7 @@ type wrapped struct { // // NOTE: The returned API MUST NOT be a public interface (it can be either just a struct // with public methods, or perhaps a private interface), so that we can add methods -// without breaking any external implementors of a public interface. +// without breaking any external implementers of a public interface. func FromPublic(dest types.ImageDestination) private.ImageDestination { if dest2, ok := dest.(private.ImageDestination); ok { return dest2 diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go b/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go index 886b4e83..f0d1d042 100644 --- a/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go +++ b/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go @@ -27,7 +27,7 @@ type wrapped struct { // // NOTE: The returned API MUST NOT be a public interface (it can be either just a struct // with public methods, or perhaps a private interface), so that we can add methods -// without breaking any external implementors of a public interface. +// without breaking any external implementers of a public interface. func FromPublic(src types.ImageSource) private.ImageSource { if src2, ok := src.(private.ImageSource); ok { return src2 diff --git a/vendor/github.com/containers/image/v5/internal/manifest/list.go b/vendor/github.com/containers/image/v5/internal/manifest/list.go index 189f1a71..1d60da75 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/list.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/list.go @@ -73,6 +73,7 @@ type ListUpdate struct { Platform *imgspecv1.Platform Annotations map[string]string CompressionAlgorithmNames []string + ArtifactType string } } @@ -101,6 +102,7 @@ type ListEdit struct { AddDigest digest.Digest AddSize int64 AddMediaType string + AddArtifactType string AddPlatform *imgspecv1.Platform AddAnnotations map[string]string AddCompressionAlgorithms []compression.Algorithm diff --git a/vendor/github.com/containers/image/v5/internal/manifest/manifest.go b/vendor/github.com/containers/image/v5/internal/manifest/manifest.go index 1dbcc141..c77db752 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/manifest.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/manifest.go @@ -3,6 +3,7 @@ package manifest import ( "encoding/json" + compressiontypes "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/libtrust" digest "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -14,7 +15,7 @@ import ( const ( // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 DockerV2Schema1MediaType = "application/vnd.docker.distribution.manifest.v1+json" - // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 with a JWS signature + // DockerV2Schema1SignedMediaType MIME type represents Docker manifest schema 1 with a JWS signature DockerV2Schema1SignedMediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws" // DockerV2Schema2MediaType MIME type represents Docker manifest schema 2 DockerV2Schema2MediaType = "application/vnd.docker.distribution.manifest.v2+json" @@ -165,3 +166,29 @@ func NormalizedMIMEType(input string) string { return DockerV2Schema1SignedMediaType } } + +// CompressionAlgorithmIsUniversallySupported returns true if MIMETypeSupportsCompressionAlgorithm(mimeType, algo) returns true for all mimeType values. +func CompressionAlgorithmIsUniversallySupported(algo compressiontypes.Algorithm) bool { + // Compare the discussion about BaseVariantName in MIMETypeSupportsCompressionAlgorithm(). + switch algo.Name() { + case compressiontypes.GzipAlgorithmName: + return true + default: + return false + } +} + +// MIMETypeSupportsCompressionAlgorithm returns true if mimeType can represent algo. +func MIMETypeSupportsCompressionAlgorithm(mimeType string, algo compressiontypes.Algorithm) bool { + if CompressionAlgorithmIsUniversallySupported(algo) { + return true + } + // This does not use BaseVariantName: Plausibly a manifest format might support zstd but not have annotation fields. + // The logic might have to be more complex (and more ad-hoc) if more manifest formats, with more capabilities, emerge. + switch algo.Name() { + case compressiontypes.ZstdAlgorithmName, compressiontypes.ZstdChunkedAlgorithmName: + return mimeType == imgspecv1.MediaTypeImageManifest + default: // Includes Bzip2AlgorithmName and XzAlgorithmName, which are defined names but are not supported anywhere + return false + } +} diff --git a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go index d8d06513..829852a8 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go @@ -61,6 +61,7 @@ func (index *OCI1IndexPublic) Instance(instanceDigest digest.Digest) (ListUpdate ret.ReadOnly.Platform = manifest.Platform ret.ReadOnly.Annotations = manifest.Annotations ret.ReadOnly.CompressionAlgorithmNames = annotationsToCompressionAlgorithmNames(manifest.Annotations) + ret.ReadOnly.ArtifactType = manifest.ArtifactType return ret, nil } } @@ -102,7 +103,7 @@ func addCompressionAnnotations(compressionAlgorithms []compression.Algorithm, an *annotationsMap = map[string]string{} } for _, algo := range compressionAlgorithms { - switch algo.Name() { + switch algo.BaseVariantName() { case compression.ZstdAlgorithmName: (*annotationsMap)[OCI1InstanceAnnotationCompressionZSTD] = OCI1InstanceAnnotationCompressionZSTDValue default: @@ -157,11 +158,13 @@ func (index *OCI1IndexPublic) editInstances(editInstances []ListEdit) error { } addCompressionAnnotations(editInstance.AddCompressionAlgorithms, &annotations) addedEntries = append(addedEntries, imgspecv1.Descriptor{ - MediaType: editInstance.AddMediaType, - Size: editInstance.AddSize, - Digest: editInstance.AddDigest, - Platform: editInstance.AddPlatform, - Annotations: annotations}) + MediaType: editInstance.AddMediaType, + ArtifactType: editInstance.AddArtifactType, + Size: editInstance.AddSize, + Digest: editInstance.AddDigest, + Platform: editInstance.AddPlatform, + Annotations: annotations, + }) default: return fmt.Errorf("internal error: invalid operation: %d", editInstance.ListOperation) } @@ -299,12 +302,13 @@ func OCI1IndexPublicFromComponents(components []imgspecv1.Descriptor, annotation platform = &platformCopy } m := imgspecv1.Descriptor{ - MediaType: component.MediaType, - Size: component.Size, - Digest: component.Digest, - URLs: slices.Clone(component.URLs), - Annotations: maps.Clone(component.Annotations), - Platform: platform, + MediaType: component.MediaType, + ArtifactType: component.ArtifactType, + Size: component.Size, + Digest: component.Digest, + URLs: slices.Clone(component.URLs), + Annotations: maps.Clone(component.Annotations), + Platform: platform, } index.Manifests[i] = m } diff --git a/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go b/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go index 3ba0e408..94002d6d 100644 --- a/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go +++ b/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go @@ -25,6 +25,7 @@ import ( "github.com/containers/image/v5/types" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" "golang.org/x/exp/slices" ) @@ -82,15 +83,40 @@ func getCPUVariantWindows(arch string) string { func getCPUVariantArm() string { variant, err := getCPUInfo("Cpu architecture") if err != nil { + logrus.Errorf("Couldn't get cpu architecture: %v", err) return "" } - // TODO handle RPi Zero mismatch (https://github.com/moby/moby/pull/36121#issuecomment-398328286) switch strings.ToLower(variant) { case "8", "aarch64": variant = "v8" - case "7", "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)": + case "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)": variant = "v7" + case "7": + // handle RPi Zero variant mismatch due to wrong variant from kernel + // https://github.com/containerd/containerd/pull/4530 + // https://www.raspberrypi.org/forums/viewtopic.php?t=12614 + // https://github.com/moby/moby/pull/36121#issuecomment-398328286 + model, err := getCPUInfo("model name") + if err != nil { + logrus.Errorf("Couldn't get cpu model name, it may be the corner case where variant is 6: %v", err) + return "" + } + // model name is NOT a value provided by the CPU; it is another outcome of Linux CPU detection, + // https://github.com/torvalds/linux/blob/190bf7b14b0cf3df19c059061be032bd8994a597/arch/arm/mm/proc-v6.S#L178C35-L178C35 + // (matching happens based on value + mask at https://github.com/torvalds/linux/blob/190bf7b14b0cf3df19c059061be032bd8994a597/arch/arm/mm/proc-v6.S#L273-L274 ) + // ARM CPU ID starts with a “main” ID register https://developer.arm.com/documentation/ddi0406/cb/System-Level-Architecture/System-Control-Registers-in-a-VMSA-implementation/VMSA-System-control-registers-descriptions--in-register-order/MIDR--Main-ID-Register--VMSA?lang=en , + // but the ARMv6/ARMv7 differences are not a single dimension, https://developer.arm.com/documentation/ddi0406/cb/System-Level-Architecture/The-CPUID-Identification-Scheme?lang=en . + // The Linux "cpu architecture" is determined by a “memory model” feature. + // + // So, the "armv6-compatible" check basically checks for a "v6 or v7 CPU, but not one found listed as a known v7 one in the .proc.info.init tables of + // https://github.com/torvalds/linux/blob/190bf7b14b0cf3df19c059061be032bd8994a597/arch/arm/mm/proc-v7.S . + if strings.HasPrefix(strings.ToLower(model), "armv6-compatible") { + logrus.Debugf("Detected corner case, setting cpu variant to v6") + variant = "v6" + } else { + variant = "v7" + } case "6", "6tej": variant = "v6" case "5", "5t", "5te", "5tej": diff --git a/vendor/github.com/containers/image/v5/internal/private/private.go b/vendor/github.com/containers/image/v5/internal/private/private.go index 95d561fc..562adbea 100644 --- a/vendor/github.com/containers/image/v5/internal/private/private.go +++ b/vendor/github.com/containers/image/v5/internal/private/private.go @@ -55,7 +55,7 @@ type ImageDestinationInternalOnly interface { // It is available only if SupportsPutBlobPartial(). // Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller // should fall back to PutBlobWithOptions. - PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (UploadedBlob, error) + PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, options PutBlobPartialOptions) (UploadedBlob, error) // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). @@ -100,6 +100,12 @@ type PutBlobOptions struct { LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise. } +// PutBlobPartialOptions are used in PutBlobPartial. +type PutBlobPartialOptions struct { + Cache blobinfocache.BlobInfoCache2 // Cache to use and/or update. + LayerIndex int // A zero-based index of the layer within the image (PutBlobPartial is only called with layer-like blobs, not configs) +} + // TryReusingBlobOptions are used in TryReusingBlobWithOptions. type TryReusingBlobOptions struct { Cache blobinfocache.BlobInfoCache2 // Cache to use and/or update. @@ -112,11 +118,13 @@ type TryReusingBlobOptions struct { // Transports, OTOH, MUST support these fields being zero-valued for types.ImageDestination callers // if they use internal/imagedestination/impl.Compat; // in that case, they will all be consistently zero-valued. - RequiredCompression *compression.Algorithm // If set, reuse blobs with a matching algorithm as per implementations in internal/imagedestination/impl.helpers.go - OriginalCompression *compression.Algorithm // Must be set if RequiredCompression is set; can be set to nil to indicate “uncompressed” or “unknown”. - EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented. - LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise. - SrcRef reference.Named // A reference to the source image that contains the input blob. + EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented. + LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise. + SrcRef reference.Named // A reference to the source image that contains the input blob. + PossibleManifestFormats []string // A set of possible manifest formats; at least one should support the reused layer blob. + RequiredCompression *compression.Algorithm // If set, reuse blobs with a matching algorithm as per implementations in internal/imagedestination/impl.helpers.go + OriginalCompression *compression.Algorithm // May be nil to indicate “uncompressed” or “unknown”. + TOCDigest digest.Digest // If specified, the blob can be looked up in the destination also by its TOC digest. } // ReusedBlob is information about a blob reused in a destination. @@ -128,6 +136,8 @@ type ReusedBlob struct { // a differently-compressed blob. CompressionOperation types.LayerCompression // Compress/Decompress, matching the reused blob; PreserveOriginal if N/A CompressionAlgorithm *compression.Algorithm // Algorithm if compressed, nil if decompressed or N/A + + MatchedByTOCDigest bool // Whether the layer was reused/matched by TOC digest. Used only for UI purposes. } // ImageSourceChunk is a portion of a blob. diff --git a/vendor/github.com/containers/image/v5/manifest/common.go b/vendor/github.com/containers/image/v5/manifest/common.go index 1bdcf3d3..de462811 100644 --- a/vendor/github.com/containers/image/v5/manifest/common.go +++ b/vendor/github.com/containers/image/v5/manifest/common.go @@ -55,7 +55,7 @@ func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType if variants != nil { name := mtsUncompressed if algorithm != nil { - name = algorithm.InternalUnstableUndocumentedMIMEQuestionMark() + name = algorithm.BaseVariantName() } if res, ok := variants[name]; ok { if res != mtsUnsupportedMIMEType { diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go index a80af701..76281557 100644 --- a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go @@ -10,6 +10,7 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/internal/manifest" "github.com/containers/image/v5/internal/set" + compressiontypes "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/regexp" "github.com/docker/docker/api/types/versions" @@ -142,6 +143,15 @@ func (m *Schema1) LayerInfos() []LayerInfo { return layers } +const fakeSchema1MIMEType = DockerV2Schema2LayerMediaType // Used only in schema1CompressionMIMETypeSets +var schema1CompressionMIMETypeSets = []compressionMIMETypeSet{ + { + mtsUncompressed: fakeSchema1MIMEType, + compressiontypes.GzipAlgorithmName: fakeSchema1MIMEType, + compressiontypes.ZstdAlgorithmName: mtsUnsupportedMIMEType, + }, +} + // UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { // Our LayerInfos includes empty layers (where m.ExtractedV1Compatibility[].ThrowAway), so expect them to be included here as well. @@ -150,6 +160,11 @@ func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { } m.FSLayers = make([]Schema1FSLayers, len(layerInfos)) for i, info := range layerInfos { + // There are no MIME types in schema1, but we do a “conversion” here to reject unsupported compression algorithms, + // in a way that is consistent with the other schema implementations. + if _, err := updatedMIMEType(schema1CompressionMIMETypeSets, fakeSchema1MIMEType, info); err != nil { + return fmt.Errorf("preparing updated manifest, layer %q: %w", info.Digest, err) + } // (docker push) sets up m.ExtractedV1Compatibility[].{Id,Parent} based on values of info.Digest, // but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness. // So, we don't bother recomputing the IDs in m.History.V1Compatibility. diff --git a/vendor/github.com/containers/image/v5/manifest/manifest.go b/vendor/github.com/containers/image/v5/manifest/manifest.go index 959aac93..828b8da0 100644 --- a/vendor/github.com/containers/image/v5/manifest/manifest.go +++ b/vendor/github.com/containers/image/v5/manifest/manifest.go @@ -16,7 +16,7 @@ import ( const ( // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 DockerV2Schema1MediaType = manifest.DockerV2Schema1MediaType - // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 with a JWS signature + // DockerV2Schema1SignedMediaType MIME type represents Docker manifest schema 1 with a JWS signature DockerV2Schema1SignedMediaType = manifest.DockerV2Schema1SignedMediaType // DockerV2Schema2MediaType MIME type represents Docker manifest schema 2 DockerV2Schema2MediaType = manifest.DockerV2Schema2MediaType diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go index a85641c3..548994ff 100644 --- a/vendor/github.com/containers/image/v5/manifest/oci.go +++ b/vendor/github.com/containers/image/v5/manifest/oci.go @@ -235,7 +235,7 @@ func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*type } // ImageID computes an ID which can uniquely identify this image by its contents. -func (m *OCI1) ImageID([]digest.Digest) (string, error) { +func (m *OCI1) ImageID(diffIDs []digest.Digest) (string, error) { // The way m.Config.Digest “uniquely identifies” an image is // by containing RootFS.DiffIDs, which identify the layers of the image. // For non-image artifacts, the we can’t expect the config to change diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go index 8386c47a..a3eb5d7a 100644 --- a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go @@ -6,13 +6,13 @@ import ( "io" "os" - "github.com/containers/image/v5/internal/blobinfocache" "github.com/containers/image/v5/internal/imagedestination" "github.com/containers/image/v5/internal/imagedestination/impl" "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/internal/signature" "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/idtools" digest "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" ) @@ -119,8 +119,8 @@ func (d *ociArchiveImageDestination) PutBlobWithOptions(ctx context.Context, str // It is available only if SupportsPutBlobPartial(). // Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller // should fall back to PutBlobWithOptions. -func (d *ociArchiveImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) { - return d.unpackedDest.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache) +func (d *ociArchiveImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) { + return d.unpackedDest.PutBlobPartial(ctx, chunkAccessor, srcInfo, options) } // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination @@ -169,10 +169,15 @@ func (d *ociArchiveImageDestination) Commit(ctx context.Context, unparsedTopleve // tar converts the directory at src and saves it to dst func tarDirectory(src, dst string) error { // input is a stream of bytes from the archive of the directory at path - input, err := archive.Tar(src, archive.Uncompressed) + input, err := archive.TarWithOptions(src, &archive.TarOptions{ + Compression: archive.Uncompressed, + // Don’t include the data about the user account this code is running under. + ChownOpts: &idtools.IDPair{UID: 0, GID: 0}, + }) if err != nil { return fmt.Errorf("retrieving stream of bytes from %q: %w", src, err) } + defer input.Close() // creates the tar file outFile, err := os.Create(dst) diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go index 100d1676..305d8c9c 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go @@ -173,7 +173,7 @@ func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io. // If the blob has been successfully reused, returns (true, info, nil). // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. func (d *ociImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { - if !impl.OriginalBlobMatchesRequiredCompression(options) { + if !impl.OriginalCandidateMatchesTryReusingBlobOptions(options) { return false, private.ReusedBlob{}, nil } if info.Digest == "" { diff --git a/vendor/github.com/containers/image/v5/openshift/openshift_dest.go b/vendor/github.com/containers/image/v5/openshift/openshift_dest.go index 50a5339e..656f4518 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift_dest.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift_dest.go @@ -12,7 +12,6 @@ import ( "github.com/containers/image/v5/docker" "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/internal/blobinfocache" "github.com/containers/image/v5/internal/imagedestination" "github.com/containers/image/v5/internal/imagedestination/impl" "github.com/containers/image/v5/internal/imagedestination/stubs" @@ -128,8 +127,8 @@ func (d *openshiftImageDestination) PutBlobWithOptions(ctx context.Context, stre // It is available only if SupportsPutBlobPartial(). // Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller // should fall back to PutBlobWithOptions. -func (d *openshiftImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) { - return d.docker.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache) +func (d *openshiftImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) { + return d.docker.PutBlobPartial(ctx, chunkAccessor, srcInfo, options) } // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go index d00a0cdf..228af90c 100644 --- a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go +++ b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go @@ -335,7 +335,7 @@ func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobTo // reflected in the manifest that will be written. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. func (d *ostreeImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { - if !impl.OriginalBlobMatchesRequiredCompression(options) { + if !impl.OriginalCandidateMatchesTryReusingBlobOptions(options) { return false, private.ReusedBlob{}, nil } if d.repo == nil { diff --git a/vendor/github.com/containers/image/v5/pkg/blobcache/dest.go b/vendor/github.com/containers/image/v5/pkg/blobcache/dest.go index 9bda0851..663df11d 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobcache/dest.go +++ b/vendor/github.com/containers/image/v5/pkg/blobcache/dest.go @@ -9,7 +9,6 @@ import ( "path/filepath" "sync" - "github.com/containers/image/v5/internal/blobinfocache" "github.com/containers/image/v5/internal/imagedestination" "github.com/containers/image/v5/internal/imagedestination/impl" "github.com/containers/image/v5/internal/private" @@ -227,8 +226,8 @@ func (d *blobCacheDestination) SupportsPutBlobPartial() bool { // It is available only if SupportsPutBlobPartial(). // Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller // should fall back to PutBlobWithOptions. -func (d *blobCacheDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) { - return d.destination.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache) +func (d *blobCacheDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) { + return d.destination.PutBlobPartial(ctx, chunkAccessor, srcInfo, options) } // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination @@ -237,7 +236,7 @@ func (d *blobCacheDestination) PutBlobPartial(ctx context.Context, chunkAccessor // If the blob has been successfully reused, returns (true, info, nil). // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. func (d *blobCacheDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { - if !impl.OriginalBlobMatchesRequiredCompression(options) { + if !impl.OriginalCandidateMatchesTryReusingBlobOptions(options) { return false, private.ReusedBlob{}, nil } present, reusedInfo, err := d.destination.TryReusingBlobWithOptions(ctx, info, options) diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go index 97562687..470fca0c 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go @@ -91,11 +91,11 @@ func min(a, b int) int { // destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with parameters for the // number of entries to limit for known and unknown location separately, only to make testing simpler. -// TODO: following function is not destructive any more in the nature instead priortized result is actually copies of the original +// TODO: following function is not destructive any more in the nature instead prioritized result is actually copies of the original // candidate set, so In future we might wanna re-name this public API and remove the destructive prefix. func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, totalLimit int, noLocationLimit int) []blobinfocache.BICReplacementCandidate2 { // split unknown candidates and known candidates - // and limit them seperately. + // and limit them separately. var knownLocationCandidates []CandidateWithTime var unknownLocationCandidates []CandidateWithTime // We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go index cfad16b2..16193db9 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go @@ -184,7 +184,7 @@ func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types // CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known) that could possibly be reused // within the specified (transport scope) (if they still exist, which is not guaranteed). // -// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, +// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute, // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same // uncompressed digest. func (mem *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 { diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go index 2b446a61..d8bde2fa 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go @@ -171,7 +171,7 @@ func transaction[T any](sqc *cache, fn func(tx *sql.Tx) (T, error)) (T, error) { // dbTransaction calls fn within a read-write transaction in db. func dbTransaction[T any](db *sql.DB, fn func(tx *sql.Tx) (T, error)) (T, error) { - // Ideally we should be able to distinguish between read-only and read-write transactions, see the _txlock=exclusive dicussion. + // Ideally we should be able to distinguish between read-only and read-write transactions, see the _txlock=exclusive discussion. var zeroRes T // A zero value of T @@ -496,7 +496,7 @@ func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW // that could possibly be reused within the specified (transport scope) (if they still // exist, which is not guaranteed). // -// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if +// If !canSubstitute, the returned candidates will match the submitted digest exactly; if // canSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look // up variants of the blob which have the same uncompressed digest. // diff --git a/vendor/github.com/containers/image/v5/pkg/compression/compression.go b/vendor/github.com/containers/image/v5/pkg/compression/compression.go index 4443dda7..b83a257e 100644 --- a/vendor/github.com/containers/image/v5/pkg/compression/compression.go +++ b/vendor/github.com/containers/image/v5/pkg/compression/compression.go @@ -19,19 +19,19 @@ type Algorithm = types.Algorithm var ( // Gzip compression. - Gzip = internal.NewAlgorithm(types.GzipAlgorithmName, types.GzipAlgorithmName, + Gzip = internal.NewAlgorithm(types.GzipAlgorithmName, "", []byte{0x1F, 0x8B, 0x08}, GzipDecompressor, gzipCompressor) // Bzip2 compression. - Bzip2 = internal.NewAlgorithm(types.Bzip2AlgorithmName, types.Bzip2AlgorithmName, + Bzip2 = internal.NewAlgorithm(types.Bzip2AlgorithmName, "", []byte{0x42, 0x5A, 0x68}, Bzip2Decompressor, bzip2Compressor) // Xz compression. - Xz = internal.NewAlgorithm(types.XzAlgorithmName, types.XzAlgorithmName, + Xz = internal.NewAlgorithm(types.XzAlgorithmName, "", []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor, xzCompressor) // Zstd compression. - Zstd = internal.NewAlgorithm(types.ZstdAlgorithmName, types.ZstdAlgorithmName, + Zstd = internal.NewAlgorithm(types.ZstdAlgorithmName, "", []byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, zstdCompressor) - // ZstdChunked is a Zstd compression with chunk metadta which allows random access to individual files. - ZstdChunked = internal.NewAlgorithm(types.ZstdChunkedAlgorithmName, types.ZstdAlgorithmName, /* Note: InternalUnstableUndocumentedMIMEQuestionMark is not ZstdChunkedAlgorithmName */ + // ZstdChunked is a Zstd compression with chunk metadata which allows random access to individual files. + ZstdChunked = internal.NewAlgorithm(types.ZstdChunkedAlgorithmName, types.ZstdAlgorithmName, nil, ZstdDecompressor, compressor.ZstdCompressor) compressionAlgorithms = map[string]Algorithm{ diff --git a/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go b/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go index ba619be0..d6f85274 100644 --- a/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go +++ b/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go @@ -12,23 +12,28 @@ type DecompressorFunc func(io.Reader) (io.ReadCloser, error) // Algorithm is a compression algorithm that can be used for CompressStream. type Algorithm struct { - name string - mime string - prefix []byte // Initial bytes of a stream compressed using this algorithm, or empty to disable detection. - decompressor DecompressorFunc - compressor CompressorFunc + name string + baseVariantName string + prefix []byte // Initial bytes of a stream compressed using this algorithm, or empty to disable detection. + decompressor DecompressorFunc + compressor CompressorFunc } // NewAlgorithm creates an Algorithm instance. +// nontrivialBaseVariantName is typically "". // This function exists so that Algorithm instances can only be created by code that // is allowed to import this internal subpackage. -func NewAlgorithm(name, mime string, prefix []byte, decompressor DecompressorFunc, compressor CompressorFunc) Algorithm { +func NewAlgorithm(name, nontrivialBaseVariantName string, prefix []byte, decompressor DecompressorFunc, compressor CompressorFunc) Algorithm { + baseVariantName := name + if nontrivialBaseVariantName != "" { + baseVariantName = nontrivialBaseVariantName + } return Algorithm{ - name: name, - mime: mime, - prefix: prefix, - decompressor: decompressor, - compressor: compressor, + name: name, + baseVariantName: baseVariantName, + prefix: prefix, + decompressor: decompressor, + compressor: compressor, } } @@ -37,10 +42,11 @@ func (c Algorithm) Name() string { return c.name } -// InternalUnstableUndocumentedMIMEQuestionMark ??? -// DO NOT USE THIS anywhere outside of c/image until it is properly documented. -func (c Algorithm) InternalUnstableUndocumentedMIMEQuestionMark() string { - return c.mime +// BaseVariantName returns the name of the “base variant” of the compression algorithm. +// It is either equal to Name() of the same algorithm, or equal to Name() of some other Algorithm (the “base variant”). +// This supports a single level of “is-a” relationship between compression algorithms, e.g. where "zstd:chunked" data is valid "zstd" data. +func (c Algorithm) BaseVariantName() string { + return c.baseVariantName } // AlgorithmCompressor returns the compressor field of algo. diff --git a/vendor/github.com/containers/image/v5/signature/fulcio_cert.go b/vendor/github.com/containers/image/v5/signature/fulcio_cert.go index ef5d3df6..c11fa46a 100644 --- a/vendor/github.com/containers/image/v5/signature/fulcio_cert.go +++ b/vendor/github.com/containers/image/v5/signature/fulcio_cert.go @@ -1,3 +1,6 @@ +//go:build !containers_image_fulcio_stub +// +build !containers_image_fulcio_stub + package signature import ( diff --git a/vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go b/vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go new file mode 100644 index 00000000..c0b48daf --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go @@ -0,0 +1,28 @@ +//go:build containers_image_fulcio_stub +// +build containers_image_fulcio_stub + +package signature + +import ( + "crypto" + "crypto/ecdsa" + "crypto/x509" + "errors" +) + +type fulcioTrustRoot struct { + caCertificates *x509.CertPool + oidcIssuer string + subjectEmail string +} + +func (f *fulcioTrustRoot) validate() error { + return errors.New("fulcio disabled at compile-time") +} + +func verifyRekorFulcio(rekorPublicKey *ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte, + untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte, untrustedBase64Signature string, + untrustedPayloadBytes []byte) (crypto.PublicKey, error) { + return nil, errors.New("fulcio disabled at compile-time") + +} diff --git a/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go b/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go index d439b5f7..50243da3 100644 --- a/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go +++ b/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go @@ -1,3 +1,6 @@ +//go:build !containers_image_rekor_stub +// +build !containers_image_rekor_stub + package internal import ( @@ -216,6 +219,10 @@ func VerifyRekorSET(publicKey *ecdsa.PublicKey, unverifiedRekorSET []byte, unver if hashedRekordV001.Data.Hash.Algorithm == nil { return time.Time{}, NewInvalidSignatureError(`Missing "data.hash.algorithm" field in hashedrekord`) } + // FIXME: Rekor 1.3.5 has added SHA-386 and SHA-512 as recognized values. + // Eventually we should support them as well; doing that cleanly would require updqating to Rekor 1.3.5, which requires Go 1.21. + // Short-term, Cosign (as of 2024-02 and Cosign 2.2.3) only produces and accepts SHA-256, so right now that’s not a compatibility + // issue. if *hashedRekordV001.Data.Hash.Algorithm != models.HashedrekordV001SchemaDataHashAlgorithmSha256 { return time.Time{}, NewInvalidSignatureError(fmt.Sprintf(`Unexpected "data.hash.algorithm" value %#v`, *hashedRekordV001.Data.Hash.Algorithm)) } diff --git a/vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go b/vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go new file mode 100644 index 00000000..7c121cc2 --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go @@ -0,0 +1,15 @@ +//go:build containers_image_rekor_stub +// +build containers_image_rekor_stub + +package internal + +import ( + "crypto/ecdsa" + "time" +) + +// VerifyRekorSET verifies that unverifiedRekorSET is correctly signed by publicKey and matches the rest of the data. +// Returns bundle upload time on success. +func VerifyRekorSET(publicKey *ecdsa.PublicKey, unverifiedRekorSET []byte, unverifiedKeyOrCertBytes []byte, unverifiedBase64Signature string, unverifiedPayloadBytes []byte) (time.Time, error) { + return time.Time{}, NewInvalidSignatureError("rekor disabled at compile-time") +} diff --git a/vendor/github.com/containers/image/v5/storage/storage_dest.go b/vendor/github.com/containers/image/v5/storage/storage_dest.go index 07e1d5e1..c06d6c09 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_dest.go +++ b/vendor/github.com/containers/image/v5/storage/storage_dest.go @@ -16,7 +16,6 @@ import ( "sync/atomic" "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/internal/blobinfocache" "github.com/containers/image/v5/internal/imagedestination/impl" "github.com/containers/image/v5/internal/imagedestination/stubs" "github.com/containers/image/v5/internal/private" @@ -55,41 +54,61 @@ type storageImageDestination struct { stubs.ImplementsPutBlobPartial stubs.AlwaysSupportsSignatures - imageRef storageReference - directory string // Temporary directory where we store blobs until Commit() time - nextTempFileID atomic.Int32 // A counter that we use for computing filenames to assign to blobs - manifest []byte // Manifest contents, temporary - manifestDigest digest.Digest // Valid if len(manifest) != 0 - signatures []byte // Signature contents, temporary - signatureses map[digest.Digest][]byte // Instance signature contents, temporary - SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice - SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // Sizes of each manifest's signature slice - - // A storage destination may be used concurrently. Accesses are - // serialized via a mutex. Please refer to the individual comments - // below for details. - lock sync.Mutex + imageRef storageReference + directory string // Temporary directory where we store blobs until Commit() time + nextTempFileID atomic.Int32 // A counter that we use for computing filenames to assign to blobs + manifest []byte // Manifest contents, temporary + manifestDigest digest.Digest // Valid if len(manifest) != 0 + untrustedDiffIDValues []digest.Digest // From config’s RootFS.DiffIDs, valid if not nil + signatures []byte // Signature contents, temporary + signatureses map[digest.Digest][]byte // Instance signature contents, temporary + metadata storageImageMetadata // Metadata contents being built + // Mapping from layer (by index) to the associated ID in the storage. // It's protected *implicitly* since `commitLayer()`, at any given // time, can only be executed by *one* goroutine. Please refer to // `queueOrCommit()` for further details on how the single-caller // guarantee is implemented. - indexToStorageID map[int]*string - // All accesses to below data are protected by `lock` which is made - // *explicit* in the code. - blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs - fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes - filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them - currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed) - indexToAddedLayerInfo map[int]addedLayerInfo // Mapping from layer (by index) to blob to add to the image - blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer - diffOutputs map[digest.Digest]*graphdriver.DriverWithDifferOutput // Mapping from digest to differ output + indexToStorageID map[int]string + + // A storage destination may be used concurrently, due to HasThreadSafePutBlob. + lock sync.Mutex // Protects lockProtected + lockProtected storageImageDestinationLockProtected +} + +// storageImageDestinationLockProtected contains storageImageDestination data which might be +// accessed concurrently, due to HasThreadSafePutBlob. +// _During the concurrent TryReusingBlob/PutBlob/* calls_ (but not necessarily during the final Commit) +// uses must hold storageImageDestination.lock. +type storageImageDestinationLockProtected struct { + currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed) + indexToAddedLayerInfo map[int]addedLayerInfo // Mapping from layer (by index) to blob to add to the image + + // In general, a layer is identified either by (compressed) digest, or by TOC digest. + // When creating a layer, the c/storage layer metadata and image IDs must _only_ be based on trusted values + // we have computed ourselves. (Layer reuse can then look up against such trusted values, but it might not + // recompute those values for incomding layers — the point of the reuse is that we don’t need to consume the incoming layer.) + + // Layer identification: For a layer, at least one of indexToTOCDigest and blobDiffIDs must be available before commitLayer is called. + // The presence of an indexToTOCDigest is what decides how the layer is identified, i.e. which fields must be trusted. + blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs + indexToTOCDigest map[int]digest.Digest // Mapping from layer index to a TOC Digest, IFF the layer was created/found/reused by TOC digest + + // Layer data: Before commitLayer is called, either at least one of (diffOutputs, blobAdditionalLayer, filenames) + // should be available; or indexToTOCDigest/blobDiffIDs should be enough to locate an existing c/storage layer. + // They are looked up in the order they are mentioned above. + diffOutputs map[int]*graphdriver.DriverWithDifferOutput // Mapping from layer index to a partially-pulled layer intermediate data + blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer + // Mapping from layer blobsums to names of files we used to hold them. If set, fileSizes and blobDiffIDs must also be set. + filenames map[digest.Digest]string + // Mapping from layer blobsums to their sizes. If set, filenames and blobDiffIDs must also be set. + fileSizes map[digest.Digest]int64 } // addedLayerInfo records data about a layer to use in this image. type addedLayerInfo struct { - digest digest.Digest - emptyLayer bool // The layer is an “empty”/“throwaway” one, and may or may not be physically represented in various transport / storage systems. false if the manifest type does not have the concept. + digest digest.Digest // Mandatory, the digest of the layer. + emptyLayer bool // The layer is an “empty”/“throwaway” one, and may or may not be physically represented in various transport / storage systems. false if the manifest type does not have the concept. } // newImageDestination sets us up to write a new image, caching blobs in a temporary directory until @@ -117,18 +136,23 @@ func newImageDestination(sys *types.SystemContext, imageRef storageReference) (* HasThreadSafePutBlob: true, }), - imageRef: imageRef, - directory: directory, - signatureses: make(map[digest.Digest][]byte), - blobDiffIDs: make(map[digest.Digest]digest.Digest), - blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer), - fileSizes: make(map[digest.Digest]int64), - filenames: make(map[digest.Digest]string), - SignatureSizes: []int{}, - SignaturesSizes: make(map[digest.Digest][]int), - indexToStorageID: make(map[int]*string), - indexToAddedLayerInfo: make(map[int]addedLayerInfo), - diffOutputs: make(map[digest.Digest]*graphdriver.DriverWithDifferOutput), + imageRef: imageRef, + directory: directory, + signatureses: make(map[digest.Digest][]byte), + metadata: storageImageMetadata{ + SignatureSizes: []int{}, + SignaturesSizes: make(map[digest.Digest][]int), + }, + indexToStorageID: make(map[int]string), + lockProtected: storageImageDestinationLockProtected{ + indexToAddedLayerInfo: make(map[int]addedLayerInfo), + blobDiffIDs: make(map[digest.Digest]digest.Digest), + indexToTOCDigest: make(map[int]digest.Digest), + diffOutputs: make(map[int]*graphdriver.DriverWithDifferOutput), + blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer), + filenames: make(map[digest.Digest]string), + fileSizes: make(map[digest.Digest]int64), + }, } dest.Compat = impl.AddCompat(dest) return dest, nil @@ -142,12 +166,13 @@ func (s *storageImageDestination) Reference() types.ImageReference { // Close cleans up the temporary directory and additional layer store handlers. func (s *storageImageDestination) Close() error { - for _, al := range s.blobAdditionalLayer { + // This is outside of the scope of HasThreadSafePutBlob, so we don’t need to hold s.lock. + for _, al := range s.lockProtected.blobAdditionalLayer { al.Release() } - for _, v := range s.diffOutputs { + for _, v := range s.lockProtected.diffOutputs { if v.Target != "" { - _ = s.imageRef.transport.store.CleanupStagingDirectory(v.Target) + _ = s.imageRef.transport.store.CleanupStagedLayer(v) } } return os.RemoveAll(s.directory) @@ -227,9 +252,9 @@ func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinf // Record information about the blob. s.lock.Lock() - s.blobDiffIDs[blobDigest] = diffID.Digest() - s.fileSizes[blobDigest] = counter.Count - s.filenames[blobDigest] = filename + s.lockProtected.blobDiffIDs[blobDigest] = diffID.Digest() + s.lockProtected.fileSizes[blobDigest] = counter.Count + s.lockProtected.filenames[blobDigest] = filename s.lock.Unlock() // This is safe because we have just computed diffID, and blobDigest was either computed // by us, or validated by the caller (usually copy.digestingReader). @@ -269,14 +294,14 @@ func (f *zstdFetcher) GetBlobAt(chunks []chunked.ImageSourceChunk) (chan io.Read // It is available only if SupportsPutBlobPartial(). // Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller // should fall back to PutBlobWithOptions. -func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) { +func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) { fetcher := zstdFetcher{ chunkAccessor: chunkAccessor, ctx: ctx, blobInfo: srcInfo, } - differ, err := chunked.GetDiffer(ctx, s.imageRef.transport.store, srcInfo.Size, srcInfo.Annotations, &fetcher) + differ, err := chunked.GetDiffer(ctx, s.imageRef.transport.store, srcInfo.Digest, srcInfo.Size, srcInfo.Annotations, &fetcher) if err != nil { return private.UploadedBlob{}, err } @@ -286,13 +311,25 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces return private.UploadedBlob{}, err } + if out.TOCDigest == "" && out.UncompressedDigest == "" { + return private.UploadedBlob{}, errors.New("internal error: ApplyDiffWithDiffer succeeded with neither TOCDigest nor UncompressedDigest set") + } + blobDigest := srcInfo.Digest s.lock.Lock() - s.blobDiffIDs[blobDigest] = blobDigest - s.fileSizes[blobDigest] = 0 - s.filenames[blobDigest] = "" - s.diffOutputs[blobDigest] = out + if out.UncompressedDigest != "" { + // The computation of UncompressedDigest means the whole layer has been consumed; while doing that, chunked.GetDiffer is + // responsible for ensuring blobDigest has been validated. + s.lockProtected.blobDiffIDs[blobDigest] = out.UncompressedDigest + } else { + // Don’t identify layers by TOC if UncompressedDigest is available. + // - Using UncompressedDigest allows image reuse with non-partially-pulled layers + // - If UncompressedDigest has been computed, that means the layer was read completely, and the TOC has been created from scratch. + // That TOC is quite unlikely to match with any other TOC value. + s.lockProtected.indexToTOCDigest[options.LayerIndex] = out.TOCDigest + } + s.lockProtected.diffOutputs[options.LayerIndex] = out s.lock.Unlock() return private.UploadedBlob{ @@ -307,7 +344,7 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces // If the blob has been successfully reused, returns (true, info, nil). // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { - if !impl.OriginalBlobMatchesRequiredCompression(options) { + if !impl.OriginalCandidateMatchesTryReusingBlobOptions(options) { return false, private.ReusedBlob{}, nil } reused, info, err := s.tryReusingBlobAsPending(blobinfo.Digest, blobinfo.Size, &options) @@ -321,68 +358,79 @@ func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, }) } -// tryReusingBlobAsPending implements TryReusingBlobWithOptions for (digest, size or -1), filling s.blobDiffIDs and other metadata. +// tryReusingBlobAsPending implements TryReusingBlobWithOptions for (blobDigest, size or -1), filling s.blobDiffIDs and other metadata. // The caller must arrange the blob to be eventually committed using s.commitLayer(). -func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest, size int64, options *private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { +func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Digest, size int64, options *private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { // lock the entire method as it executes fairly quickly s.lock.Lock() defer s.lock.Unlock() if options.SrcRef != nil { // Check if we have the layer in the underlying additional layer store. - aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(digest, options.SrcRef.String()) + aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(blobDigest, options.SrcRef.String()) if err != nil && !errors.Is(err, storage.ErrLayerUnknown) { - return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, digest, err) + return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, blobDigest, err) } else if err == nil { - // Record the uncompressed value so that we can use it to calculate layer IDs. - s.blobDiffIDs[digest] = aLayer.UncompressedDigest() - s.blobAdditionalLayer[digest] = aLayer + s.lockProtected.blobDiffIDs[blobDigest] = aLayer.UncompressedDigest() + s.lockProtected.blobAdditionalLayer[blobDigest] = aLayer return true, private.ReusedBlob{ - Digest: digest, + Digest: blobDigest, Size: aLayer.CompressedSize(), }, nil } } - if digest == "" { + if blobDigest == "" { return false, private.ReusedBlob{}, errors.New(`Can not check for a blob with unknown digest`) } - if err := digest.Validate(); err != nil { + if err := blobDigest.Validate(); err != nil { return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err) } + if options.TOCDigest != "" { + if err := options.TOCDigest.Validate(); err != nil { + return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err) + } + } + + // Check if we have a wasn't-compressed layer in storage that's based on that blob. // Check if we've already cached it in a file. - if size, ok := s.fileSizes[digest]; ok { + if size, ok := s.lockProtected.fileSizes[blobDigest]; ok { + // s.lockProtected.blobDiffIDs is set either by putBlobToPendingFile or in createNewLayer when creating the + // filenames/fileSizes entry. return true, private.ReusedBlob{ - Digest: digest, + Digest: blobDigest, Size: size, }, nil } - // Check if we have a wasn't-compressed layer in storage that's based on that blob. - layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(digest) + layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobDigest) if err != nil && !errors.Is(err, storage.ErrLayerUnknown) { - return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, digest, err) + return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, blobDigest, err) } if len(layers) > 0 { - // Save this for completeness. - s.blobDiffIDs[digest] = layers[0].UncompressedDigest + s.lockProtected.blobDiffIDs[blobDigest] = blobDigest return true, private.ReusedBlob{ - Digest: digest, + Digest: blobDigest, Size: layers[0].UncompressedSize, }, nil } // Check if we have a was-compressed layer in storage that's based on that blob. - layers, err = s.imageRef.transport.store.LayersByCompressedDigest(digest) + layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobDigest) if err != nil && !errors.Is(err, storage.ErrLayerUnknown) { - return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q: %w`, digest, err) + return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q: %w`, blobDigest, err) } if len(layers) > 0 { - // Record the uncompressed value so that we can use it to calculate layer IDs. - s.blobDiffIDs[digest] = layers[0].UncompressedDigest + // LayersByCompressedDigest only finds layers which were created from a full layer blob, and extracting that + // always sets UncompressedDigest. + diffID := layers[0].UncompressedDigest + if diffID == "" { + return false, private.ReusedBlob{}, fmt.Errorf("internal error: compressed layer %q (for compressed digest %q) does not have an uncompressed digest", layers[0].ID, blobDigest.String()) + } + s.lockProtected.blobDiffIDs[blobDigest] = diffID return true, private.ReusedBlob{ - Digest: digest, + Digest: blobDigest, Size: layers[0].CompressedSize, }, nil } @@ -391,23 +439,23 @@ func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest, // Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the // uncompressed layer, and that can happen only if options.CanSubstitute, or if the incoming manifest already specifies the size. if options.CanSubstitute || size != -1 { - if uncompressedDigest := options.Cache.UncompressedDigest(digest); uncompressedDigest != "" && uncompressedDigest != digest { + if uncompressedDigest := options.Cache.UncompressedDigest(blobDigest); uncompressedDigest != "" && uncompressedDigest != blobDigest { layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest) if err != nil && !errors.Is(err, storage.ErrLayerUnknown) { return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, uncompressedDigest, err) } if len(layers) > 0 { if size != -1 { - s.blobDiffIDs[digest] = layers[0].UncompressedDigest + s.lockProtected.blobDiffIDs[blobDigest] = uncompressedDigest return true, private.ReusedBlob{ - Digest: digest, + Digest: blobDigest, Size: size, }, nil } if !options.CanSubstitute { - return false, private.ReusedBlob{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blob with digest %s", digest) + return false, private.ReusedBlob{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blob with digest %s", blobDigest) } - s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest + s.lockProtected.blobDiffIDs[uncompressedDigest] = uncompressedDigest return true, private.ReusedBlob{ Digest: uncompressedDigest, Size: layers[0].UncompressedSize, @@ -416,6 +464,32 @@ func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest, } } + if options.TOCDigest != "" && options.LayerIndex != nil { + // Check if we have a chunked layer in storage with the same TOC digest. + layers, err := s.imageRef.transport.store.LayersByTOCDigest(options.TOCDigest) + + if err != nil && !errors.Is(err, storage.ErrLayerUnknown) { + return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with TOC digest %q: %w`, options.TOCDigest, err) + } + if len(layers) > 0 { + if size != -1 { + s.lockProtected.indexToTOCDigest[*options.LayerIndex] = options.TOCDigest + return true, private.ReusedBlob{ + Digest: blobDigest, + Size: size, + MatchedByTOCDigest: true, + }, nil + } else if options.CanSubstitute && layers[0].UncompressedDigest != "" { + s.lockProtected.indexToTOCDigest[*options.LayerIndex] = options.TOCDigest + return true, private.ReusedBlob{ + Digest: layers[0].UncompressedDigest, + Size: layers[0].UncompressedSize, + MatchedByTOCDigest: true, + }, nil + } + } + } + // Nope, we don't have it. return false, private.ReusedBlob{}, nil } @@ -425,6 +499,8 @@ func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest, // that since we don't have a recommendation, a random ID should be used if one needs // to be allocated. func (s *storageImageDestination) computeID(m manifest.Manifest) string { + // This is outside of the scope of HasThreadSafePutBlob, so we don’t need to hold s.lock. + // Build the diffID list. We need the decompressed sums that we've been calculating to // fill in the DiffIDs. It's expected (but not enforced by us) that the number of // diffIDs corresponds to the number of non-EmptyLayer entries in the history. @@ -438,24 +514,59 @@ func (s *storageImageDestination) computeID(m manifest.Manifest) string { continue } blobSum := m.FSLayers[i].BlobSum - diffID, ok := s.blobDiffIDs[blobSum] + diffID, ok := s.lockProtected.blobDiffIDs[blobSum] if !ok { + // this can, in principle, legitimately happen when a layer is reused by TOC. logrus.Infof("error looking up diffID for layer %q", blobSum.String()) return "" } diffIDs = append([]digest.Digest{diffID}, diffIDs...) } case *manifest.Schema2, *manifest.OCI1: - // We know the ID calculation for these formats doesn't actually use the diffIDs, - // so we don't need to populate the diffID list. + // We know the ID calculation doesn't actually use the diffIDs, so we don't need to populate + // the diffID list. default: return "" } - id, err := m.ImageID(diffIDs) + + // We want to use the same ID for “the same” images, but without risking unwanted sharing / malicious image corruption. + // + // Traditionally that means the same ~config digest, as computed by m.ImageID; + // but if we pull a layer by TOC, we verify the layer against neither the (compressed) blob digest in the manifest, + // nor against the config’s RootFS.DiffIDs. We don’t really want to do either, to allow partial layer pulls where we never see + // most of the data. + // + // So, if a layer is pulled by TOC (and we do validate against the TOC), the fact that we used the TOC, and the value of the TOC, + // must enter into the image ID computation. + // But for images where no TOC was used, continue to use IDs computed the traditional way, to maximize image reuse on upgrades, + // and to introduce the changed behavior only when partial pulls are used. + // + // Note that it’s not 100% guaranteed that an image pulled by TOC uses an OCI manifest; consider + // (skopeo copy --format v2s2 docker://…/zstd-chunked-image containers-storage:… ). So this is not happening only in the OCI case above. + ordinaryImageID, err := m.ImageID(diffIDs) if err != nil { return "" } - return id + tocIDInput := "" + hasLayerPulledByTOC := false + for i := range m.LayerInfos() { + layerValue := "" // An empty string is not a valid digest, so this is unambiguous with the TOC case. + tocDigest, ok := s.lockProtected.indexToTOCDigest[i] // "" if not a TOC + if ok { + hasLayerPulledByTOC = true + layerValue = tocDigest.String() + } + tocIDInput += layerValue + "|" // "|" can not be present in a TOC digest, so this is an unambiguous separator. + } + + if !hasLayerPulledByTOC { + return ordinaryImageID + } + // ordinaryImageID is a digest of a config, which is a JSON value. + // To avoid the risk of collisions, start the input with @ so that the input is not a valid JSON. + tocImageID := digest.FromString("@With TOC:" + tocIDInput).Hex() + logrus.Debugf("Ordinary storage image ID %s; a layer was looked up by TOC, so using image ID %s", ordinaryImageID, tocImageID) + return tocImageID } // getConfigBlob exists only to let us retrieve the configuration blob so that the manifest package can dig @@ -468,7 +579,7 @@ func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, er return nil, fmt.Errorf("invalid digest supplied when reading blob: %w", err) } // Assume it's a file, since we're only calling this from a place that expects to read files. - if filename, ok := s.filenames[info.Digest]; ok { + if filename, ok := s.lockProtected.filenames[info.Digest]; ok { contents, err2 := os.ReadFile(filename) if err2 != nil { return nil, fmt.Errorf(`reading blob from file %q: %w`, filename, err2) @@ -502,23 +613,23 @@ func (s *storageImageDestination) queueOrCommit(index int, info addedLayerInfo) // caller is the "worker" routine committing layers. All other routines // can continue pulling and queuing in layers. s.lock.Lock() - s.indexToAddedLayerInfo[index] = info + s.lockProtected.indexToAddedLayerInfo[index] = info // We're still waiting for at least one previous/parent layer to be // committed, so there's nothing to do. - if index != s.currentIndex { + if index != s.lockProtected.currentIndex { s.lock.Unlock() return nil } for { - info, ok := s.indexToAddedLayerInfo[index] + info, ok := s.lockProtected.indexToAddedLayerInfo[index] if !ok { break } s.lock.Unlock() // Note: commitLayer locks on-demand. - if err := s.commitLayer(index, info, -1); err != nil { + if stopQueue, err := s.commitLayer(index, info, -1); stopQueue || err != nil { return err } s.lock.Lock() @@ -527,185 +638,337 @@ func (s *storageImageDestination) queueOrCommit(index int, info addedLayerInfo) // Set the index at the very end to make sure that only one routine // enters stage 2). - s.currentIndex = index + s.lockProtected.currentIndex = index s.lock.Unlock() return nil } +// singleLayerIDComponent returns a single layer’s the input to computing a layer (chain) ID, +// and an indication whether the input already has the shape of a layer ID. +// It returns ("", false) if the layer is not found at all (which should never happen) +func (s *storageImageDestination) singleLayerIDComponent(layerIndex int, blobDigest digest.Digest) (string, bool) { + s.lock.Lock() + defer s.lock.Unlock() + + if d, found := s.lockProtected.indexToTOCDigest[layerIndex]; found { + return "@TOC=" + d.Hex(), false // "@" is not a valid start of a digest.Digest, so this is unambiguous. + } + + if d, found := s.lockProtected.blobDiffIDs[blobDigest]; found { + return d.Hex(), true // This looks like chain IDs, and it uses the traditional value. + } + return "", false +} + // commitLayer commits the specified layer with the given index to the storage. // size can usually be -1; it can be provided if the layer is not known to be already present in blobDiffIDs. // +// If the layer cannot be committed yet, the function returns (true, nil). +// // Note that the previous layer is expected to already be committed. // // Caution: this function must be called without holding `s.lock`. Callers // must guarantee that, at any given time, at most one goroutine may execute // `commitLayer()`. -func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, size int64) error { +func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, size int64) (bool, error) { // Already committed? Return early. if _, alreadyCommitted := s.indexToStorageID[index]; alreadyCommitted { - return nil + return false, nil } // Start with an empty string or the previous layer ID. Note that // `s.indexToStorageID` can only be accessed by *one* goroutine at any // given time. Hence, we don't need to lock accesses. - var lastLayer string - if prev := s.indexToStorageID[index-1]; prev != nil { - lastLayer = *prev + var parentLayer string + if index != 0 { + prev, ok := s.indexToStorageID[index-1] + if !ok { + return false, fmt.Errorf("Internal error: commitLayer called with previous layer %d not committed yet", index-1) + } + parentLayer = prev } // Carry over the previous ID for empty non-base layers. if info.emptyLayer { - s.indexToStorageID[index] = &lastLayer - return nil + s.indexToStorageID[index] = parentLayer + return false, nil } // Check if there's already a layer with the ID that we'd give to the result of applying // this layer blob to its parent, if it has one, or the blob's hex value otherwise. - s.lock.Lock() - diffID, haveDiffID := s.blobDiffIDs[info.digest] - s.lock.Unlock() - if !haveDiffID { - // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(), - // or to even check if we had it. - // Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller + // The layerID refers either to the DiffID or the digest of the TOC. + layerIDComponent, layerIDComponentStandalone := s.singleLayerIDComponent(index, info.digest) + if layerIDComponent == "" { + // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob() / TryReusingBlob() / … + // + // Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache: a caller // that relies on using a blob digest that has never been seen by the store had better call // TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only // so far we are going to accommodate that (if we should be doing that at all). - logrus.Debugf("looking for diffID for blob %+v", info.digest) + // + // We are also ignoring lookups by TOC, and other non-trivial situations. + // Those can only happen using the c/image/internal/private API, + // so those internal callers should be fixed to follow the API instead of expanding this fallback. + logrus.Debugf("looking for diffID for blob=%+v", info.digest) + // Use tryReusingBlobAsPending, not the top-level TryReusingBlobWithOptions, to prevent recursion via queueOrCommit. has, _, err := s.tryReusingBlobAsPending(info.digest, size, &private.TryReusingBlobOptions{ Cache: none.NoCache, CanSubstitute: false, }) if err != nil { - return fmt.Errorf("checking for a layer based on blob %q: %w", info.digest.String(), err) + return false, fmt.Errorf("checking for a layer based on blob %q: %w", info.digest.String(), err) } if !has { - return fmt.Errorf("error determining uncompressed digest for blob %q", info.digest.String()) + return false, fmt.Errorf("error determining uncompressed digest for blob %q", info.digest.String()) } - diffID, haveDiffID = s.blobDiffIDs[info.digest] - if !haveDiffID { - return fmt.Errorf("we have blob %q, but don't know its uncompressed digest", info.digest.String()) + + layerIDComponent, layerIDComponentStandalone = s.singleLayerIDComponent(index, info.digest) + if layerIDComponent == "" { + return false, fmt.Errorf("we have blob %q, but don't know its layer ID", info.digest.String()) } } - id := diffID.Hex() - if lastLayer != "" { - id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffID.Hex())).Hex() + + id := layerIDComponent + if !layerIDComponentStandalone || parentLayer != "" { + id = digest.Canonical.FromString(parentLayer + "+" + layerIDComponent).Hex() } if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil { // There's already a layer that should have the right contents, just reuse it. - lastLayer = layer.ID - s.indexToStorageID[index] = &lastLayer - return nil + s.indexToStorageID[index] = layer.ID + return false, nil } + layer, err := s.createNewLayer(index, info.digest, parentLayer, id) + if err != nil { + return false, err + } + if layer == nil { + return true, nil + } + s.indexToStorageID[index] = layer.ID + return false, nil +} + +// createNewLayer creates a new layer newLayerID for (index, layerDigest) on top of parentLayer (which may be ""). +// If the layer cannot be committed yet, the function returns (nil, nil). +func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.Digest, parentLayer, newLayerID string) (*storage.Layer, error) { s.lock.Lock() - diffOutput, ok := s.diffOutputs[info.digest] + diffOutput, ok := s.lockProtected.diffOutputs[index] s.lock.Unlock() if ok { - layer, err := s.imageRef.transport.store.CreateLayer(id, lastLayer, nil, "", false, nil) - if err != nil { - return err + var untrustedUncompressedDigest digest.Digest + if diffOutput.UncompressedDigest == "" { + d, err := s.untrustedLayerDiffID(index) + if err != nil { + return nil, err + } + if d == "" { + logrus.Debugf("Skipping commit for layer %q, manifest not yet available", newLayerID) + return nil, nil + } + untrustedUncompressedDigest = d } - // FIXME: what to do with the uncompressed digest? - diffOutput.UncompressedDigest = info.digest - - if err := s.imageRef.transport.store.ApplyDiffFromStagingDirectory(layer.ID, diffOutput.Target, diffOutput, nil); err != nil { - _ = s.imageRef.transport.store.Delete(layer.ID) - return err + flags := make(map[string]interface{}) + if untrustedUncompressedDigest != "" { + flags[expectedLayerDiffIDFlag] = untrustedUncompressedDigest + logrus.Debugf("Setting uncompressed digest to %q for layer %q", untrustedUncompressedDigest, newLayerID) } - s.indexToStorageID[index] = &layer.ID - return nil + args := storage.ApplyStagedLayerOptions{ + ID: newLayerID, + ParentLayer: parentLayer, + + DiffOutput: diffOutput, + DiffOptions: &graphdriver.ApplyDiffWithDifferOpts{ + Flags: flags, + }, + } + layer, err := s.imageRef.transport.store.ApplyStagedLayer(args) + if err != nil && !errors.Is(err, storage.ErrDuplicateID) { + return nil, fmt.Errorf("failed to put layer using a partial pull: %w", err) + } + return layer, nil } s.lock.Lock() - al, ok := s.blobAdditionalLayer[info.digest] + al, ok := s.lockProtected.blobAdditionalLayer[layerDigest] s.lock.Unlock() if ok { - layer, err := al.PutAs(id, lastLayer, nil) + layer, err := al.PutAs(newLayerID, parentLayer, nil) if err != nil && !errors.Is(err, storage.ErrDuplicateID) { - return fmt.Errorf("failed to put layer from digest and labels: %w", err) + return nil, fmt.Errorf("failed to put layer from digest and labels: %w", err) } - lastLayer = layer.ID - s.indexToStorageID[index] = &lastLayer - return nil + return layer, nil } // Check if we previously cached a file with that blob's contents. If we didn't, // then we need to read the desired contents from a layer. + var trustedUncompressedDigest, trustedOriginalDigest digest.Digest // For storage.LayerOptions s.lock.Lock() - filename, ok := s.filenames[info.digest] + tocDigest := s.lockProtected.indexToTOCDigest[index] // "" if not set + optionalDiffID := s.lockProtected.blobDiffIDs[layerDigest] // "" if not set + filename, gotFilename := s.lockProtected.filenames[layerDigest] s.lock.Unlock() - if !ok { - // Try to find the layer with contents matching that blobsum. - layer := "" - layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(diffID) - if err2 == nil && len(layers) > 0 { - layer = layers[0].ID + if gotFilename && tocDigest == "" { + // If tocDigest != "", if we now happen to find a layerDigest match, the newLayerID has already been computed as TOC-based, + // and we don't know the relationship of the layerDigest and TOC digest. + // We could recompute newLayerID to be DiffID-based and use the file, but such a within-image layer + // reuse is expected to be pretty rare; instead, ignore the unexpected file match and proceed to the + // originally-planned TOC match. + + // Because tocDigest == "", optionaldiffID must have been set; and even if it weren’t, PutLayer will recompute the digest from the stream. + trustedUncompressedDigest = optionalDiffID + trustedOriginalDigest = layerDigest // The code setting .filenames[layerDigest] is responsible for the contents matching. + } else { + // Try to find the layer with contents matching the data we use. + var layer *storage.Layer // = nil + if tocDigest != "" { + layers, err2 := s.imageRef.transport.store.LayersByTOCDigest(tocDigest) + if err2 == nil && len(layers) > 0 { + layer = &layers[0] + } else { + return nil, fmt.Errorf("locating layer for TOC digest %q: %w", tocDigest, err2) + } } else { - layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(info.digest) + // Because tocDigest == "", optionaldiffID must have been set + layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(optionalDiffID) if err2 == nil && len(layers) > 0 { - layer = layers[0].ID + layer = &layers[0] + } else { + layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(layerDigest) + if err2 == nil && len(layers) > 0 { + layer = &layers[0] + } + } + if layer == nil { + return nil, fmt.Errorf("locating layer for blob %q: %w", layerDigest, err2) } - } - if layer == "" { - return fmt.Errorf("locating layer for blob %q: %w", info.digest, err2) } // Read the layer's contents. noCompression := archive.Uncompressed diffOptions := &storage.DiffOptions{ Compression: &noCompression, } - diff, err2 := s.imageRef.transport.store.Diff("", layer, diffOptions) + diff, err2 := s.imageRef.transport.store.Diff("", layer.ID, diffOptions) if err2 != nil { - return fmt.Errorf("reading layer %q for blob %q: %w", layer, info.digest, err2) + return nil, fmt.Errorf("reading layer %q for blob %q: %w", layer.ID, layerDigest, err2) } // Copy the layer diff to a file. Diff() takes a lock that it holds // until the ReadCloser that it returns is closed, and PutLayer() wants // the same lock, so the diff can't just be directly streamed from one // to the other. filename = s.computeNextBlobCacheFile() - file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600) + file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0o600) if err != nil { diff.Close() - return fmt.Errorf("creating temporary file %q: %w", filename, err) + return nil, fmt.Errorf("creating temporary file %q: %w", filename, err) } // Copy the data to the file. // TODO: This can take quite some time, and should ideally be cancellable using // ctx.Done(). - _, err = io.Copy(file, diff) + fileSize, err := io.Copy(file, diff) diff.Close() file.Close() if err != nil { - return fmt.Errorf("storing blob to file %q: %w", filename, err) + return nil, fmt.Errorf("storing blob to file %q: %w", filename, err) + } + + if optionalDiffID == "" && layer.UncompressedDigest != "" { + optionalDiffID = layer.UncompressedDigest + } + // The stream we have is uncompressed, this matches contents of the stream. + // If tocDigest != "", trustedUncompressedDigest might still be ""; in that case PutLayer will compute the value from the stream. + trustedUncompressedDigest = optionalDiffID + // FIXME? trustedOriginalDigest could be set to layerDigest IF tocDigest == "" (otherwise layerDigest is untrusted). + // But for c/storage to reasonably use it (as a CompressedDigest value), we should also ensure the CompressedSize of the created + // layer is correct, and the API does not currently make it possible (.CompressedSize is set from the input stream). + // + // We can legitimately set storage.LayerOptions.OriginalDigest to "", + // but that would just result in PutLayer computing the digest of the input stream == optionalDiffID. + // So, instead, set .OriginalDigest to the value we know already, to avoid that digest computation. + trustedOriginalDigest = optionalDiffID + + // Allow using the already-collected layer contents without extracting the layer again. + // + // This only matches against the uncompressed digest. + // We don’t have the original compressed data here to trivially set filenames[layerDigest]. + // In particular we can’t achieve the correct Layer.CompressedSize value with the current c/storage API. + // Within-image layer reuse is probably very rare, for now we prefer to avoid that complexity. + if trustedUncompressedDigest != "" { + s.lock.Lock() + s.lockProtected.blobDiffIDs[trustedUncompressedDigest] = trustedUncompressedDigest + s.lockProtected.filenames[trustedUncompressedDigest] = filename + s.lockProtected.fileSizes[trustedUncompressedDigest] = fileSize + s.lock.Unlock() } - // Make sure that we can find this file later, should we need the layer's - // contents again. - s.lock.Lock() - s.filenames[info.digest] = filename - s.lock.Unlock() } // Read the cached blob and use it as a diff. file, err := os.Open(filename) if err != nil { - return fmt.Errorf("opening file %q: %w", filename, err) + return nil, fmt.Errorf("opening file %q: %w", filename, err) } defer file.Close() // Build the new layer using the diff, regardless of where it came from. // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). - layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, &storage.LayerOptions{ - OriginalDigest: info.digest, - UncompressedDigest: diffID, + layer, _, err := s.imageRef.transport.store.PutLayer(newLayerID, parentLayer, nil, "", false, &storage.LayerOptions{ + OriginalDigest: trustedOriginalDigest, + UncompressedDigest: trustedUncompressedDigest, }, file) if err != nil && !errors.Is(err, storage.ErrDuplicateID) { - return fmt.Errorf("adding layer with blob %q: %w", info.digest, err) + return nil, fmt.Errorf("adding layer with blob %q: %w", layerDigest, err) } + return layer, nil +} - s.indexToStorageID[index] = &layer.ID - return nil +// untrustedLayerDiffID returns a DiffID value for layerIndex from the image’s config. +// If the value is not yet available (but it can be available after s.manifets is set), it returns ("", nil). +// WARNING: We don’t validate the DiffID value against the layer contents; it must not be used for any deduplication. +func (s *storageImageDestination) untrustedLayerDiffID(layerIndex int) (digest.Digest, error) { + // At this point, we are either inside the multi-threaded scope of HasThreadSafePutBlob, and + // nothing is writing to s.manifest yet, or PutManifest has been called and s.manifest != nil. + // Either way this function does not need the protection of s.lock. + if s.manifest == nil { + logrus.Debugf("Skipping commit for layer %d, manifest not yet available", layerIndex) + return "", nil + } + + if s.untrustedDiffIDValues == nil { + mt := manifest.GuessMIMEType(s.manifest) + if mt != imgspecv1.MediaTypeImageManifest { + // We could, in principle, build an ImageSource, support arbitrary image formats using image.FromUnparsedImage, + // and then use types.Image.OCIConfig so that we can parse the image. + // + // In practice, this should, right now, only matter for pulls of OCI images (this code path implies that a layer has annotation), + // while converting to a non-OCI formats, using a manual (skopeo copy) or something similar, not (podman pull). + // So it is not implemented yet. + return "", fmt.Errorf("determining DiffID for manifest type %q is not yet supported", mt) + } + man, err := manifest.FromBlob(s.manifest, mt) + if err != nil { + return "", fmt.Errorf("parsing manifest: %w", err) + } + + cb, err := s.getConfigBlob(man.ConfigInfo()) + if err != nil { + return "", err + } + + // retrieve the expected uncompressed digest from the config blob. + configOCI := &imgspecv1.Image{} + if err := json.Unmarshal(cb, configOCI); err != nil { + return "", err + } + s.untrustedDiffIDValues = slices.Clone(configOCI.RootFS.DiffIDs) + if s.untrustedDiffIDValues == nil { // Unlikely but possible in theory… + s.untrustedDiffIDValues = []digest.Digest{} + } + } + if layerIndex >= len(s.untrustedDiffIDValues) { + return "", fmt.Errorf("image config has only %d DiffID values, but a layer with index %d exists", len(s.untrustedDiffIDValues), layerIndex) + } + return s.untrustedDiffIDValues[layerIndex], nil } // Commit marks the process of storing the image as successful and asks for the image to be persisted. @@ -716,6 +979,8 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si // - Uploaded data MAY be visible to others before Commit() is called // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { + // This function is outside of the scope of HasThreadSafePutBlob, so we don’t need to hold s.lock. + if len(s.manifest) == 0 { return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()") } @@ -752,20 +1017,22 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t // Extract, commit, or find the layers. for i, blob := range layerBlobs { - if err := s.commitLayer(i, addedLayerInfo{ + if stopQueue, err := s.commitLayer(i, addedLayerInfo{ digest: blob.Digest, emptyLayer: blob.EmptyLayer, }, blob.Size); err != nil { return err + } else if stopQueue { + return fmt.Errorf("Internal error: storageImageDestination.Commit(): commitLayer() not ready to commit for layer %q", blob.Digest) } } var lastLayer string - if len(layerBlobs) > 0 { // Can happen when using caches - prev := s.indexToStorageID[len(layerBlobs)-1] - if prev == nil { + if len(layerBlobs) > 0 { // Zero-layer images rarely make sense, but it is technically possible, and may happen for non-image artifacts. + prev, ok := s.indexToStorageID[len(layerBlobs)-1] + if !ok { return fmt.Errorf("Internal error: storageImageDestination.Commit(): previous layer %d hasn't been committed (lastLayer == nil)", len(layerBlobs)-1) } - lastLayer = *prev + lastLayer = prev } // If one of those blobs was a configuration blob, then we can try to dig out the date when the image @@ -779,14 +1046,14 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t // Set up to save the non-layer blobs as data items. Since we only share layers, they should all be in files, so // we just need to screen out the ones that are actually layers to get the list of non-layers. dataBlobs := set.New[digest.Digest]() - for blob := range s.filenames { + for blob := range s.lockProtected.filenames { dataBlobs.Add(blob) } for _, layerBlob := range layerBlobs { dataBlobs.Delete(layerBlob.Digest) } for _, blob := range dataBlobs.Values() { - v, err := os.ReadFile(s.filenames[blob]) + v, err := os.ReadFile(s.lockProtected.filenames[blob]) if err != nil { return fmt.Errorf("copying non-layer blob %q to image: %w", blob, err) } @@ -839,7 +1106,7 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t } // Set up to save our metadata. - metadata, err := json.Marshal(s) + metadata, err := json.Marshal(s.metadata) if err != nil { return fmt.Errorf("encoding metadata for image: %w", err) } @@ -944,7 +1211,7 @@ func (s *storageImageDestination) PutSignaturesWithFormat(ctx context.Context, s } if instanceDigest == nil { s.signatures = sigblob - s.SignatureSizes = sizes + s.metadata.SignatureSizes = sizes if len(s.manifest) > 0 { manifestDigest := s.manifestDigest instanceDigest = &manifestDigest @@ -952,7 +1219,7 @@ func (s *storageImageDestination) PutSignaturesWithFormat(ctx context.Context, s } if instanceDigest != nil { s.signatureses[*instanceDigest] = sigblob - s.SignaturesSizes[*instanceDigest] = sizes + s.metadata.SignaturesSizes[*instanceDigest] = sizes } return nil } diff --git a/vendor/github.com/containers/image/v5/storage/storage_image.go b/vendor/github.com/containers/image/v5/storage/storage_image.go index ac09f3db..eed846eb 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_image.go +++ b/vendor/github.com/containers/image/v5/storage/storage_image.go @@ -18,11 +18,6 @@ var ( ErrNoSuchImage = storage.ErrNotAnImage ) -type storageImageCloser struct { - types.ImageCloser - size int64 -} - // manifestBigDataKey returns a key suitable for recording a manifest with the specified digest using storage.Store.ImageBigData and related functions. // If a specific manifest digest is explicitly requested by the user, the key returned by this function should be used preferably; // for compatibility, if a manifest is not available under this key, check also storage.ImageDigestBigDataKey @@ -36,6 +31,17 @@ func signatureBigDataKey(digest digest.Digest) string { return "signature-" + digest.Encoded() } +// storageImageMetadata is stored, as JSON, in storage.Image.Metadata +type storageImageMetadata struct { + SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice + SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // Sizes of each manifest's signature slice +} + +type storageImageCloser struct { + types.ImageCloser + size int64 +} + // Size() returns the previously-computed size of the image, with no error. func (s *storageImageCloser) Size() (int64, error) { return s.size, nil diff --git a/vendor/github.com/containers/image/v5/storage/storage_src.go b/vendor/github.com/containers/image/v5/storage/storage_src.go index f1ce0861..ead3300f 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_src.go +++ b/vendor/github.com/containers/image/v5/storage/storage_src.go @@ -34,16 +34,30 @@ type storageImageSource struct { impl.PropertyMethodsInitialize stubs.NoGetBlobAtInitialize - imageRef storageReference - image *storage.Image - systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files - layerPosition map[digest.Digest]int // Where we are in reading a blob's layers - cachedManifest []byte // A cached copy of the manifest, if already known, or nil - getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions - SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice - SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // List of sizes of each signature slice + imageRef storageReference + image *storage.Image + systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files + metadata storageImageMetadata + cachedManifest []byte // A cached copy of the manifest, if already known, or nil + getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions + getBlobMutexProtected getBlobMutexProtected } +// getBlobMutexProtected contains storageImageSource data protected by getBlobMutex. +type getBlobMutexProtected struct { + // digestToLayerID is a lookup map from a possibly-untrusted uncompressed layer digest (as returned by LayerInfosForCopy) to the + // layer ID in the store. + digestToLayerID map[digest.Digest]string + + // layerPosition stores where we are in reading a blob's layers + layerPosition map[digest.Digest]int +} + +// expectedLayerDiffIDFlag is a per-layer flag containing an UNTRUSTED uncompressed digest of the layer. +// It is set when pulling a layer by TOC; later, this value is used with digestToLayerID +// to allow identifying the layer — and the consumer is expected to verify the blob returned by GetBlob against the digest. +const expectedLayerDiffIDFlag = "expected-layer-diffid" + // newImageSource sets up an image for reading. func newImageSource(sys *types.SystemContext, imageRef storageReference) (*storageImageSource, error) { // First, locate the image. @@ -59,16 +73,21 @@ func newImageSource(sys *types.SystemContext, imageRef storageReference) (*stora }), NoGetBlobAtInitialize: stubs.NoGetBlobAt(imageRef), - imageRef: imageRef, - systemContext: sys, - image: img, - layerPosition: make(map[digest.Digest]int), - SignatureSizes: []int{}, - SignaturesSizes: make(map[digest.Digest][]int), + imageRef: imageRef, + systemContext: sys, + image: img, + metadata: storageImageMetadata{ + SignatureSizes: []int{}, + SignaturesSizes: make(map[digest.Digest][]int), + }, + getBlobMutexProtected: getBlobMutexProtected{ + digestToLayerID: make(map[digest.Digest]string), + layerPosition: make(map[digest.Digest]int), + }, } image.Compat = impl.AddCompat(image) if img.Metadata != "" { - if err := json.Unmarshal([]byte(img.Metadata), image); err != nil { + if err := json.Unmarshal([]byte(img.Metadata), &image.metadata); err != nil { return nil, fmt.Errorf("decoding metadata for source image: %w", err) } } @@ -91,6 +110,7 @@ func (s *storageImageSource) Close() error { func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) { // We need a valid digest value. digest := info.Digest + err = digest.Validate() if err != nil { return nil, 0, err @@ -100,10 +120,25 @@ func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, c return io.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil } - // Check if the blob corresponds to a diff that was used to initialize any layers. Our - // callers should try to retrieve layers using their uncompressed digests, so no need to - // check if they're using one of the compressed digests, which we can't reproduce anyway. - layers, _ := s.imageRef.transport.store.LayersByUncompressedDigest(digest) + var layers []storage.Layer + + // This lookup path is strictly necessary for layers identified by TOC digest + // (where LayersByUncompressedDigest might not find our layer); + // for other layers it is an optimization to avoid the cost of the LayersByUncompressedDigest call. + s.getBlobMutex.Lock() + layerID, found := s.getBlobMutexProtected.digestToLayerID[digest] + s.getBlobMutex.Unlock() + + if found { + if layer, err := s.imageRef.transport.store.Layer(layerID); err == nil { + layers = []storage.Layer{*layer} + } + } else { + // Check if the blob corresponds to a diff that was used to initialize any layers. Our + // callers should try to retrieve layers using their uncompressed digests, so no need to + // check if they're using one of the compressed digests, which we can't reproduce anyway. + layers, _ = s.imageRef.transport.store.LayersByUncompressedDigest(digest) + } // If it's not a layer, then it must be a data item. if len(layers) == 0 { @@ -174,8 +209,8 @@ func (s *storageImageSource) getBlobAndLayerID(digest digest.Digest, layers []st // which claim to have the same contents, that we actually do have multiple layers, otherwise we could // just go ahead and use the first one every time. s.getBlobMutex.Lock() - i := s.layerPosition[digest] - s.layerPosition[digest] = i + 1 + i := s.getBlobMutexProtected.layerPosition[digest] + s.getBlobMutexProtected.layerPosition[digest] = i + 1 s.getBlobMutex.Unlock() if len(layers) > 0 { layer = layers[i%len(layers)] @@ -267,14 +302,35 @@ func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDige if err != nil { return nil, fmt.Errorf("reading layer %q in image %q: %w", layerID, s.image.ID, err) } - if layer.UncompressedDigest == "" { - return nil, fmt.Errorf("uncompressed digest for layer %q is unknown", layerID) - } if layer.UncompressedSize < 0 { return nil, fmt.Errorf("uncompressed size for layer %q is unknown", layerID) } + + blobDigest := layer.UncompressedDigest + if blobDigest == "" { + if layer.TOCDigest == "" { + return nil, fmt.Errorf("uncompressed digest and TOC digest for layer %q is unknown", layerID) + } + if layer.Flags == nil || layer.Flags[expectedLayerDiffIDFlag] == nil { + return nil, fmt.Errorf("TOC digest %q for layer %q is present but %q flag is not set", layer.TOCDigest, layerID, expectedLayerDiffIDFlag) + } + expectedDigest, ok := layer.Flags[expectedLayerDiffIDFlag].(string) + if !ok { + return nil, fmt.Errorf("TOC digest %q for layer %q is present but %q flag is not a string", layer.TOCDigest, layerID, expectedLayerDiffIDFlag) + } + // If the layer is stored by its TOC, report the expected diffID as the layer Digest; + // the generic code is responsible for validating the digest. + // We can locate the layer without further c/storage help using s.getBlobMutexProtected.digestToLayerID. + blobDigest, err = digest.Parse(expectedDigest) + if err != nil { + return nil, fmt.Errorf("parsing expected diffID %q for layer %q: %w", expectedDigest, layerID, err) + } + } + s.getBlobMutex.Lock() + s.getBlobMutexProtected.digestToLayerID[blobDigest] = layer.ID + s.getBlobMutex.Unlock() blobInfo := types.BlobInfo{ - Digest: layer.UncompressedDigest, + Digest: blobDigest, Size: layer.UncompressedSize, MediaType: uncompressedLayerType, } @@ -324,11 +380,11 @@ func buildLayerInfosForCopy(manifestInfos []manifest.LayerInfo, physicalInfos [] func (s *storageImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { var offset int signatureBlobs := []byte{} - signatureSizes := s.SignatureSizes + signatureSizes := s.metadata.SignatureSizes key := "signatures" instance := "default instance" if instanceDigest != nil { - signatureSizes = s.SignaturesSizes[*instanceDigest] + signatureSizes = s.metadata.SignaturesSizes[*instanceDigest] key = signatureBigDataKey(*instanceDigest) instance = instanceDigest.Encoded() } @@ -374,7 +430,7 @@ func (s *storageImageSource) getSize() (int64, error) { sum += bigSize } // Add the signature sizes. - for _, sigSize := range s.SignatureSizes { + for _, sigSize := range s.metadata.SignatureSizes { sum += int64(sigSize) } // Walk the layer list. @@ -384,7 +440,7 @@ func (s *storageImageSource) getSize() (int64, error) { if err != nil { return -1, err } - if layer.UncompressedDigest == "" || layer.UncompressedSize < 0 { + if (layer.TOCDigest == "" && layer.UncompressedDigest == "") || layer.UncompressedSize < 0 { return -1, fmt.Errorf("size for layer %q is unknown, failing getSize()", layerID) } sum += layer.UncompressedSize diff --git a/vendor/github.com/containers/image/v5/storage/storage_transport.go b/vendor/github.com/containers/image/v5/storage/storage_transport.go index deb500b4..b981953a 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_transport.go +++ b/vendor/github.com/containers/image/v5/storage/storage_transport.go @@ -213,7 +213,7 @@ func (s *storageTransport) GetStore() (storage.Store, error) { // Return the transport's previously-set store. If we don't have one // of those, initialize one now. if s.store == nil { - options, err := storage.DefaultStoreOptionsAutoDetectUID() + options, err := storage.DefaultStoreOptions() if err != nil { return nil, err } diff --git a/vendor/github.com/containers/image/v5/types/types.go b/vendor/github.com/containers/image/v5/types/types.go index 180a98c5..7d609734 100644 --- a/vendor/github.com/containers/image/v5/types/types.go +++ b/vendor/github.com/containers/image/v5/types/types.go @@ -135,8 +135,8 @@ type BlobInfo struct { // CompressionOperation is used in Image.UpdateLayerInfos to instruct // whether the original layer's "compressed or not" should be preserved, // possibly while changing the compression algorithm from one to another, - // or if it should be compressed or decompressed. The field defaults to - // preserve the original layer's compressedness. + // or if it should be changed to compressed or decompressed. + // The field defaults to preserve the original layer's compressedness. // TODO: To remove together with CryptoOperation in re-design to remove // field out of BlobInfo. CompressionOperation LayerCompression diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go index b24ee881..a14c8e2f 100644 --- a/vendor/github.com/containers/image/v5/version/version.go +++ b/vendor/github.com/containers/image/v5/version/version.go @@ -6,9 +6,9 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 5 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 29 + VersionMinor = 30 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 2 + VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. VersionDev = "" diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml index c41dd5da..13bc20e7 100644 --- a/vendor/github.com/containers/storage/.cirrus.yml +++ b/vendor/github.com/containers/storage/.cirrus.yml @@ -17,13 +17,13 @@ env: #### #### Cache-image names to test with (double-quotes around names are critical) ### - FEDORA_NAME: "fedora-39ß" + FEDORA_NAME: "fedora-39" DEBIAN_NAME: "debian-13" # GCE project where images live IMAGE_PROJECT: "libpod-218412" # VM Image built in containers/automation_images - IMAGE_SUFFIX: "c20231004t194547z-f39f38d13" + IMAGE_SUFFIX: "c20240102t155643z-f39f38d13" FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}" @@ -167,7 +167,7 @@ vendor_task: cross_task: container: - image: golang:1.19 + image: golang:1.20 build_script: make cross @@ -181,6 +181,6 @@ success_task: - vendor - cross container: - image: golang:1.19 + image: golang:1.20 clone_script: 'mkdir -p "$CIRRUS_WORKING_DIR"' # Source code not needed script: /bin/true diff --git a/vendor/github.com/containers/storage/Makefile b/vendor/github.com/containers/storage/Makefile index 77189d49..8461c090 100644 --- a/vendor/github.com/containers/storage/Makefile +++ b/vendor/github.com/containers/storage/Makefile @@ -41,7 +41,7 @@ containers-storage: ## build using gc on the host $(GO) build -compiler gc $(BUILDFLAGS) ./cmd/containers-storage codespell: - codespell -S Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L worl,flate,uint,iff,od,ERRO -w + codespell -S Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L plack,worl,flate,uint,iff,od,ERRO -w binary local-binary: containers-storage diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION index ba0a7191..3f483015 100644 --- a/vendor/github.com/containers/storage/VERSION +++ b/vendor/github.com/containers/storage/VERSION @@ -1 +1 @@ -1.51.0 +1.53.0 diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go index ab32d652..aa99fdea 100644 --- a/vendor/github.com/containers/storage/drivers/driver.go +++ b/vendor/github.com/containers/storage/drivers/driver.go @@ -73,6 +73,13 @@ type ApplyDiffOpts struct { ForceMask *os.FileMode } +// ApplyDiffWithDifferOpts contains optional arguments for ApplyDiffWithDiffer methods. +type ApplyDiffWithDifferOpts struct { + ApplyDiffOpts + + Flags map[string]interface{} +} + // InitFunc initializes the storage driver. type InitFunc func(homedir string, options Options) (Driver, error) @@ -189,6 +196,8 @@ type DriverWithDifferOutput struct { BigData map[string][]byte TarSplit []byte TOCDigest digest.Digest + // RootDirMode is the mode of the root directory of the layer, if specified. + RootDirMode *os.FileMode // Artifacts is a collection of additional artifacts // generated by the differ that the storage driver can use. Artifacts map[string]interface{} @@ -205,10 +214,26 @@ const ( DifferOutputFormatFlat ) +type DifferFsVerity int + +const ( + // DifferFsVerityDisabled means no fs-verity is used + DifferFsVerityDisabled = iota + + // DifferFsVerityEnabled means fs-verity is used when supported + DifferFsVerityEnabled + + // DifferFsVerityRequired means fs-verity is required + DifferFsVerityRequired +) + // DifferOptions overrides how the differ work type DifferOptions struct { // Format defines the destination directory layout format Format DifferOutputFormat + + // UseFsVerity defines whether fs-verity is used + UseFsVerity DifferFsVerity } // Differ defines the interface for using a custom differ. @@ -223,9 +248,9 @@ type DriverWithDiffer interface { Driver // ApplyDiffWithDiffer applies the changes using the callback function. // If id is empty, then a staging directory is created. The staging directory is guaranteed to be usable with ApplyDiffFromStagingDirectory. - ApplyDiffWithDiffer(id, parent string, options *ApplyDiffOpts, differ Differ) (output DriverWithDifferOutput, err error) - // ApplyDiffFromStagingDirectory applies the changes using the specified staging directory. - ApplyDiffFromStagingDirectory(id, parent, stagingDirectory string, diffOutput *DriverWithDifferOutput, options *ApplyDiffOpts) error + ApplyDiffWithDiffer(id, parent string, options *ApplyDiffWithDifferOpts, differ Differ) (output DriverWithDifferOutput, err error) + // ApplyDiffFromStagingDirectory applies the changes using the diffOutput target directory. + ApplyDiffFromStagingDirectory(id, parent string, diffOutput *DriverWithDifferOutput, options *ApplyDiffWithDifferOpts) error // CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors CleanupStagingDirectory(stagingDirectory string) error // DifferTarget gets the location where files are stored for the layer. diff --git a/vendor/github.com/containers/storage/drivers/overlay/composefs_supported.go b/vendor/github.com/containers/storage/drivers/overlay/composefs.go similarity index 63% rename from vendor/github.com/containers/storage/drivers/overlay/composefs_supported.go rename to vendor/github.com/containers/storage/drivers/overlay/composefs.go index 26dd3686..baa9d7be 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/composefs_supported.go +++ b/vendor/github.com/containers/storage/drivers/overlay/composefs.go @@ -1,5 +1,5 @@ -//go:build linux && composefs && cgo -// +build linux,composefs,cgo +//go:build linux && cgo +// +build linux,cgo package overlay @@ -7,15 +7,13 @@ import ( "encoding/binary" "errors" "fmt" - "io/fs" "os" "os/exec" "path/filepath" "sync" - "syscall" - "unsafe" "github.com/containers/storage/pkg/chunked/dump" + "github.com/containers/storage/pkg/fsverity" "github.com/containers/storage/pkg/loopback" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" @@ -34,77 +32,6 @@ func getComposeFsHelper() (string, error) { return composeFsHelperPath, composeFsHelperErr } -func composeFsSupported() bool { - _, err := getComposeFsHelper() - return err == nil -} - -func enableVerity(description string, fd int) error { - enableArg := unix.FsverityEnableArg{ - Version: 1, - Hash_algorithm: unix.FS_VERITY_HASH_ALG_SHA256, - Block_size: 4096, - } - - _, _, e1 := syscall.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.FS_IOC_ENABLE_VERITY), uintptr(unsafe.Pointer(&enableArg))) - if e1 != 0 && !errors.Is(e1, unix.EEXIST) { - return fmt.Errorf("failed to enable verity for %q: %w", description, e1) - } - return nil -} - -type verityDigest struct { - Fsv unix.FsverityDigest - Buf [64]byte -} - -func measureVerity(description string, fd int) (string, error) { - var digest verityDigest - digest.Fsv.Size = 64 - _, _, e1 := syscall.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.FS_IOC_MEASURE_VERITY), uintptr(unsafe.Pointer(&digest))) - if e1 != 0 { - return "", fmt.Errorf("failed to measure verity for %q: %w", description, e1) - } - return fmt.Sprintf("%x", digest.Buf[:digest.Fsv.Size]), nil -} - -func enableVerityRecursive(root string) (map[string]string, error) { - digests := make(map[string]string) - walkFn := func(path string, d fs.DirEntry, err error) error { - if err != nil { - return err - } - if !d.Type().IsRegular() { - return nil - } - - f, err := os.Open(path) - if err != nil { - return err - } - defer f.Close() - - if err := enableVerity(path, int(f.Fd())); err != nil { - return err - } - - verity, err := measureVerity(path, int(f.Fd())) - if err != nil { - return err - } - - relPath, err := filepath.Rel(root, path) - if err != nil { - return err - } - - digests[relPath] = verity - return nil - } - err := filepath.WalkDir(root, walkFn) - return digests, err -} - func getComposefsBlob(dataDir string) string { return filepath.Join(dataDir, "composefs.blob") } @@ -156,7 +83,7 @@ func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, com return err } - if err := enableVerity("manifest file", int(newFd.Fd())); err != nil && !errors.Is(err, unix.ENOTSUP) && !errors.Is(err, unix.ENOTTY) { + if err := fsverity.EnableVerity("manifest file", int(newFd.Fd())); err != nil && !errors.Is(err, unix.ENOTSUP) && !errors.Is(err, unix.ENOTTY) { logrus.Warningf("%s", err) } diff --git a/vendor/github.com/containers/storage/drivers/overlay/composefs_notsupported.go b/vendor/github.com/containers/storage/drivers/overlay/composefs_notsupported.go deleted file mode 100644 index 347e4d35..00000000 --- a/vendor/github.com/containers/storage/drivers/overlay/composefs_notsupported.go +++ /dev/null @@ -1,24 +0,0 @@ -//go:build !linux || !composefs || !cgo -// +build !linux !composefs !cgo - -package overlay - -import ( - "fmt" -) - -func composeFsSupported() bool { - return false -} - -func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, composefsDir string) error { - return fmt.Errorf("composefs is not supported") -} - -func mountComposefsBlob(dataDir, mountPoint string) error { - return fmt.Errorf("composefs is not supported") -} - -func enableVerityRecursive(path string) (map[string]string, error) { - return nil, fmt.Errorf("composefs is not supported") -} diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go index 04ecf871..f007aa94 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -82,7 +82,8 @@ const ( lowerFile = "lower" maxDepth = 500 - tocArtifact = "toc" + tocArtifact = "toc" + fsVerityDigestsArtifact = "fs-verity-digests" // idLength represents the number of random characters // which can be used to create the unique link identifier @@ -105,6 +106,7 @@ type overlayOptions struct { mountOptions string ignoreChownErrors bool forceMask *os.FileMode + useComposefs bool } // Driver contains information about the home directory and the list of active mounts that are created using this driver. @@ -122,6 +124,7 @@ type Driver struct { supportsDType bool supportsVolatile *bool usingMetacopy bool + usingComposefs bool supportsIDMappedMounts *bool } @@ -293,7 +296,7 @@ func isNetworkFileSystem(fsMagic graphdriver.FsMagic) bool { // a bunch of network file systems... case graphdriver.FsMagicNfsFs, graphdriver.FsMagicSmbFs, graphdriver.FsMagicAcfs, graphdriver.FsMagicAfs, graphdriver.FsMagicCephFs, graphdriver.FsMagicCIFS, - graphdriver.FsMagicFHGFSFs, graphdriver.FsMagicGPFS, graphdriver.FsMagicIBRIX, + graphdriver.FsMagicGPFS, graphdriver.FsMagicIBRIX, graphdriver.FsMagicKAFS, graphdriver.FsMagicLUSTRE, graphdriver.FsMagicNCP, graphdriver.FsMagicNFSD, graphdriver.FsMagicOCFS2, graphdriver.FsMagicPANFS, graphdriver.FsMagicPRLFS, graphdriver.FsMagicSMB2, graphdriver.FsMagicSNFS, @@ -307,16 +310,6 @@ func isNetworkFileSystem(fsMagic graphdriver.FsMagic) bool { // If overlay filesystem is not supported on the host, a wrapped graphdriver.ErrNotSupported is returned as error. // If an overlay filesystem is not supported over an existing filesystem then a wrapped graphdriver.ErrIncompatibleFS is returned. func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) { - // If custom --imagestore is selected never - // ditch the original graphRoot, instead add it as - // additionalImageStore so its images can still be - // read and used. - if options.ImageStore != "" { - graphRootAsAdditionalStore := fmt.Sprintf("AdditionalImageStore=%s", options.ImageStore) - options.DriverOptions = append(options.DriverOptions, graphRootAsAdditionalStore) - // complete base name with driver name included - options.ImageStore = filepath.Join(options.ImageStore, "overlay") - } opts, err := parseOptions(options.DriverOptions) if err != nil { return nil, err @@ -387,6 +380,22 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) } } + if opts.useComposefs { + if unshare.IsRootless() { + return nil, fmt.Errorf("composefs is not supported in user namespaces") + } + supportsDataOnly, err := supportsDataOnlyLayersCached(home, runhome) + if err != nil { + return nil, err + } + if !supportsDataOnly { + return nil, fmt.Errorf("composefs is not supported on this kernel: %w", graphdriver.ErrIncompatibleFS) + } + if _, err := getComposeFsHelper(); err != nil { + return nil, fmt.Errorf("composefs helper program not found: %w", err) + } + } + var usingMetacopy bool var supportsDType bool var supportsVolatile *bool @@ -448,6 +457,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) supportsDType: supportsDType, usingMetacopy: usingMetacopy, supportsVolatile: supportsVolatile, + usingComposefs: opts.useComposefs, options: *opts, } @@ -555,6 +565,12 @@ func parseOptions(options []string) (*overlayOptions, error) { withReference: withReference, }) } + case "use_composefs": + logrus.Debugf("overlay: use_composefs=%s", val) + o.useComposefs, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } case "mount_program": logrus.Debugf("overlay: mount_program=%s", val) if val != "" { @@ -782,7 +798,7 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI } func (d *Driver) useNaiveDiff() bool { - if d.useComposeFs() { + if d.usingComposefs { return true } @@ -837,22 +853,15 @@ func (d *Driver) Status() [][2]string { // Metadata returns meta data about the overlay driver such as // LowerDir, UpperDir, WorkDir and MergeDir used to store data. func (d *Driver) Metadata(id string) (map[string]string, error) { - dir, imagestore, _ := d.dir2(id) + dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return nil, err } - workDirBase := dir - if imagestore != "" { - if _, err := os.Stat(dir); err != nil { - return nil, err - } - workDirBase = imagestore - } metadata := map[string]string{ - "WorkDir": path.Join(workDirBase, "work"), - "MergedDir": path.Join(workDirBase, "merged"), - "UpperDir": path.Join(workDirBase, "diff"), + "WorkDir": path.Join(dir, "work"), + "MergedDir": path.Join(dir, "merged"), + "UpperDir": path.Join(dir, "diff"), } lowerDirs, err := d.getLowerDirs(id) @@ -870,7 +879,7 @@ func (d *Driver) Metadata(id string) (map[string]string, error) { // is being shutdown. For now, we just have to unmount the bind mounted // we had created. func (d *Driver) Cleanup() error { - _ = os.RemoveAll(d.getStagingDir()) + _ = os.RemoveAll(filepath.Join(d.home, stagingDir)) return mount.Unmount(d.home) } @@ -966,8 +975,10 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr return d.create(id, parent, opts, true) } -func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disableQuota bool) (retErr error) { - dir, imageStore, _ := d.dir2(id) +func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, readOnly bool) (retErr error) { + dir, homedir, _ := d.dir2(id, readOnly) + + disableQuota := readOnly uidMaps := d.uidMaps gidMaps := d.gidMaps @@ -978,7 +989,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable } // Make the link directory if it does not exist - if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0o755, 0, 0); err != nil { + if err := idtools.MkdirAllAs(path.Join(homedir, linkDir), 0o755, 0, 0); err != nil { return err } @@ -995,20 +1006,8 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable if err := idtools.MkdirAllAndChownNew(path.Dir(dir), 0o755, idPair); err != nil { return err } - workDirBase := dir - if imageStore != "" { - workDirBase = imageStore - if err := idtools.MkdirAllAndChownNew(path.Dir(imageStore), 0o755, idPair); err != nil { - return err - } - } if parent != "" { - parentBase, parentImageStore, inAdditionalStore := d.dir2(parent) - // If parentBase path is additional image store, select the image contained in parentBase. - // See https://github.com/containers/podman/issues/19748 - if parentImageStore != "" && !inAdditionalStore { - parentBase = parentImageStore - } + parentBase := d.dir(parent) st, err := system.Stat(filepath.Join(parentBase, "diff")) if err != nil { return err @@ -1029,11 +1028,6 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable if err := idtools.MkdirAllAndChownNew(dir, 0o700, idPair); err != nil { return err } - if imageStore != "" { - if err := idtools.MkdirAllAndChownNew(imageStore, 0o700, idPair); err != nil { - return err - } - } defer func() { // Clean up on failure @@ -1041,11 +1035,6 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable if err2 := os.RemoveAll(dir); err2 != nil { logrus.Errorf("While recovering from a failure creating a layer, error deleting %#v: %v", dir, err2) } - if imageStore != "" { - if err2 := os.RemoveAll(workDirBase); err2 != nil { - logrus.Errorf("While recovering from a failure creating a layer, error deleting %#v: %v", workDirBase, err2) - } - } } }() @@ -1068,11 +1057,6 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable if err := d.quotaCtl.SetQuota(dir, quota); err != nil { return err } - if imageStore != "" { - if err := d.quotaCtl.SetQuota(imageStore, quota); err != nil { - return err - } - } } perms := defaultPerms @@ -1081,12 +1065,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable } if parent != "" { - parentBase, parentImageStore, inAdditionalStore := d.dir2(parent) - // If parentBase path is additional image store, select the image contained in parentBase. - // See https://github.com/containers/podman/issues/19748 - if parentImageStore != "" && !inAdditionalStore { - parentBase = parentImageStore - } + parentBase := d.dir(parent) st, err := system.Stat(filepath.Join(parentBase, "diff")) if err != nil { return err @@ -1094,17 +1073,14 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable perms = os.FileMode(st.Mode()) } - if err := idtools.MkdirAs(path.Join(workDirBase, "diff"), perms, rootUID, rootGID); err != nil { + if err := idtools.MkdirAs(path.Join(dir, "diff"), perms, rootUID, rootGID); err != nil { return err } lid := generateID(idLength) linkBase := path.Join("..", id, "diff") - if imageStore != "" { - linkBase = path.Join(imageStore, "diff") - } - if err := os.Symlink(linkBase, path.Join(d.home, linkDir, lid)); err != nil { + if err := os.Symlink(linkBase, path.Join(homedir, linkDir, lid)); err != nil { return err } @@ -1113,10 +1089,10 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable return err } - if err := idtools.MkdirAs(path.Join(workDirBase, "work"), 0o700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAs(path.Join(dir, "work"), 0o700, rootUID, rootGID); err != nil { return err } - if err := idtools.MkdirAs(path.Join(workDirBase, "merged"), 0o700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAs(path.Join(dir, "merged"), 0o700, rootUID, rootGID); err != nil { return err } @@ -1198,26 +1174,39 @@ func (d *Driver) getLower(parent string) (string, error) { } func (d *Driver) dir(id string) string { - p, _, _ := d.dir2(id) + p, _, _ := d.dir2(id, false) return p } -func (d *Driver) dir2(id string) (string, string, bool) { - newpath := path.Join(d.home, id) - imageStore := "" +func (d *Driver) getAllImageStores() []string { + additionalImageStores := d.AdditionalImageStores() if d.imageStore != "" { - imageStore = path.Join(d.imageStore, id) + additionalImageStores = append([]string{d.imageStore}, additionalImageStores...) + } + return additionalImageStores +} + +func (d *Driver) dir2(id string, useImageStore bool) (string, string, bool) { + var homedir string + + if useImageStore && d.imageStore != "" { + homedir = path.Join(d.imageStore, d.name) + } else { + homedir = d.home } + + newpath := path.Join(homedir, id) + if _, err := os.Stat(newpath); err != nil { - for _, p := range d.AdditionalImageStores() { + for _, p := range d.getAllImageStores() { l := path.Join(p, d.name, id) _, err = os.Stat(l) if err == nil { - return l, imageStore, true + return l, homedir, true } } } - return newpath, imageStore, false + return newpath, homedir, false } func (d *Driver) getLowerDirs(id string) ([]string, error) { @@ -1427,14 +1416,11 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { } func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountOpts) (_ string, retErr error) { - dir, imageStore, inAdditionalStore := d.dir2(id) + dir, _, inAdditionalStore := d.dir2(id, false) if _, err := os.Stat(dir); err != nil { return "", err } - workDirBase := dir - if imageStore != "" { - workDirBase = imageStore - } + readWrite := !inAdditionalStore if !d.SupportsShifting() || options.DisableShifting { @@ -1539,7 +1525,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO }() composeFsLayers := []string{} - composeFsLayersDir := filepath.Join(workDirBase, "composefs-layers") + composeFsLayersDir := filepath.Join(dir, "composefs-layers") maybeAddComposefsMount := func(lowerID string, i int, readWrite bool) (string, error) { composefsBlob := d.getComposefsData(lowerID) _, err = os.Stat(composefsBlob) @@ -1573,7 +1559,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO return dest, nil } - diffDir := path.Join(workDirBase, "diff") + diffDir := path.Join(dir, "diff") if dest, err := maybeAddComposefsMount(id, 0, readWrite); err != nil { return "", err @@ -1591,7 +1577,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO lower := "" newpath := path.Join(d.home, l) if st, err := os.Stat(newpath); err != nil { - for _, p := range d.AdditionalImageStores() { + for _, p := range d.getAllImageStores() { lower = path.Join(p, d.name, l) if st2, err2 := os.Stat(lower); err2 == nil { if !permsKnown { @@ -1659,21 +1645,27 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO optsList = append(optsList, "metacopy=on", "redirect_dir=on") } - if len(absLowers) == 0 { - absLowers = append(absLowers, path.Join(dir, "empty")) - } - // user namespace requires this to move a directory from lower to upper. rootUID, rootGID, err := idtools.GetRootUIDGID(options.UidMaps, options.GidMaps) if err != nil { return "", err } + if len(absLowers) == 0 { + absLowers = append(absLowers, path.Join(dir, "empty")) + } + if err := idtools.MkdirAllAs(diffDir, perms, rootUID, rootGID); err != nil { - return "", err + if !inAdditionalStore { + return "", err + } + // if it is in an additional store, do not fail if the directory already exists + if _, err2 := os.Stat(diffDir); err2 != nil { + return "", err + } } - mergedDir := path.Join(workDirBase, "merged") + mergedDir := path.Join(dir, "merged") // Create the driver merged dir if err := idtools.MkdirAs(mergedDir, 0o700, rootUID, rootGID); err != nil && !os.IsExist(err) { return "", err @@ -1691,7 +1683,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO } }() - workdir := path.Join(workDirBase, "work") + workdir := path.Join(dir, "work") if d.options.mountProgram == "" && unshare.IsRootless() { optsList = append(optsList, "userxattr") @@ -1841,7 +1833,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO // Put unmounts the mount path created for the give id. func (d *Driver) Put(id string) error { - dir := d.dir(id) + dir, _, inAdditionalStore := d.dir2(id, false) if _, err := os.Stat(dir); err != nil { return err } @@ -1902,11 +1894,27 @@ func (d *Driver) Put(id string) error { } } - if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { - logrus.Debugf("Failed to remove mountpoint %s overlay: %s - %v", id, mountpoint, err) - return fmt.Errorf("removing mount point %q: %w", mountpoint, err) - } + if !inAdditionalStore { + uid, gid := int(0), int(0) + fi, err := os.Stat(mountpoint) + if err != nil { + return err + } + if stat, ok := fi.Sys().(*syscall.Stat_t); ok { + uid, gid = int(stat.Uid), int(stat.Gid) + } + tmpMountpoint := path.Join(dir, "merged.1") + if err := idtools.MkdirAs(tmpMountpoint, 0o700, uid, gid); err != nil && !errors.Is(err, os.ErrExist) { + return err + } + // rename(2) can be used on an empty directory, as it is the mountpoint after umount, and it retains + // its atomic semantic. In this way the "merged" directory is never removed. + if err := unix.Rename(tmpMountpoint, mountpoint); err != nil { + logrus.Debugf("Failed to replace mountpoint %s overlay: %s - %v", id, mountpoint, err) + return fmt.Errorf("replacing mount point %q: %w", mountpoint, err) + } + } return nil } @@ -1994,14 +2002,18 @@ func (g *overlayFileGetter) Close() error { return nil } -func (d *Driver) getStagingDir() string { - return filepath.Join(d.home, stagingDir) +func (d *Driver) getStagingDir(id string) string { + _, homedir, _ := d.dir2(id, d.imageStore != "") + return filepath.Join(homedir, stagingDir) } // DiffGetter returns a FileGetCloser that can read files from the directory that // contains files for the layer differences, either for this layer, or one of our // lowers if we're just a template directory. Used for direct access for tar-split. func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + if d.usingComposefs { + return nil, nil + } p, err := d.getDiffPath(id) if err != nil { return nil, err @@ -2018,9 +2030,9 @@ func (d *Driver) CleanupStagingDirectory(stagingDirectory string) error { return os.RemoveAll(stagingDirectory) } -func (d *Driver) supportsDataOnlyLayers() (bool, error) { +func supportsDataOnlyLayersCached(home, runhome string) (bool, error) { feature := "dataonly-layers" - overlayCacheResult, overlayCacheText, err := cachedFeatureCheck(d.runhome, feature) + overlayCacheResult, overlayCacheText, err := cachedFeatureCheck(runhome, feature) if err == nil { if overlayCacheResult { logrus.Debugf("Cached value indicated that data-only layers for overlay are supported") @@ -2029,27 +2041,15 @@ func (d *Driver) supportsDataOnlyLayers() (bool, error) { logrus.Debugf("Cached value indicated that data-only layers for overlay are not supported") return false, errors.New(overlayCacheText) } - supportsDataOnly, err := supportsDataOnlyLayers(d.home) - if err2 := cachedFeatureRecord(d.runhome, feature, supportsDataOnly, ""); err2 != nil { + supportsDataOnly, err := supportsDataOnlyLayers(home) + if err2 := cachedFeatureRecord(runhome, feature, supportsDataOnly, ""); err2 != nil { return false, fmt.Errorf("recording overlay data-only layers support status: %w", err2) } return supportsDataOnly, err } -func (d *Driver) useComposeFs() bool { - if !composeFsSupported() || unshare.IsRootless() { - return false - } - supportsDataOnlyLayers, err := d.supportsDataOnlyLayers() - if err != nil { - logrus.Debugf("Check for data-only layers failed with: %v", err) - return false - } - return supportsDataOnlyLayers -} - // ApplyDiff applies the changes in the new layer using the specified function -func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.ApplyDiffOpts, differ graphdriver.Differ) (output graphdriver.DriverWithDifferOutput, err error) { +func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.ApplyDiffWithDifferOpts, differ graphdriver.Differ) (output graphdriver.DriverWithDifferOutput, err error) { var idMappings *idtools.IDMappings if options != nil { idMappings = options.Mappings @@ -2061,15 +2061,22 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App var applyDir string if id == "" { - err := os.MkdirAll(d.getStagingDir(), 0o700) + stagingDir := d.getStagingDir(id) + err := os.MkdirAll(stagingDir, 0o700) if err != nil && !os.IsExist(err) { return graphdriver.DriverWithDifferOutput{}, err } - applyDir, err = os.MkdirTemp(d.getStagingDir(), "") + applyDir, err = os.MkdirTemp(stagingDir, "") if err != nil { return graphdriver.DriverWithDifferOutput{}, err } - + perms := defaultPerms + if d.options.forceMask != nil { + perms = *d.options.forceMask + } + if err := os.Chmod(applyDir, perms); err != nil { + return graphdriver.DriverWithDifferOutput{}, err + } } else { var err error applyDir, err = d.getDiffPath(id) @@ -2083,8 +2090,9 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App differOptions := graphdriver.DifferOptions{ Format: graphdriver.DifferOutputFormatDir, } - if d.useComposeFs() { + if d.usingComposefs { differOptions.Format = graphdriver.DifferOutputFormatFlat + differOptions.UseFsVerity = graphdriver.DifferFsVerityEnabled } out, err := differ.ApplyDiff(applyDir, &archive.TarOptions{ UIDMaps: idMappings.UIDs(), @@ -2100,33 +2108,42 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App } // ApplyDiffFromStagingDirectory applies the changes using the specified staging directory. -func (d *Driver) ApplyDiffFromStagingDirectory(id, parent, stagingDirectory string, diffOutput *graphdriver.DriverWithDifferOutput, options *graphdriver.ApplyDiffOpts) error { - if filepath.Dir(stagingDirectory) != d.getStagingDir() { +func (d *Driver) ApplyDiffFromStagingDirectory(id, parent string, diffOutput *graphdriver.DriverWithDifferOutput, options *graphdriver.ApplyDiffWithDifferOpts) error { + stagingDirectory := diffOutput.Target + if filepath.Dir(stagingDirectory) != d.getStagingDir(id) { return fmt.Errorf("%q is not a staging directory", stagingDirectory) } + diffPath, err := d.getDiffPath(id) + if err != nil { + return err + } - if d.useComposeFs() { - // FIXME: move this logic into the differ so we don't have to open - // the file twice. - verityDigests, err := enableVerityRecursive(stagingDirectory) - if err != nil && !errors.Is(err, unix.ENOTSUP) && !errors.Is(err, unix.ENOTTY) { - logrus.Warningf("%s", err) + // If the current layer doesn't set the mode for the parent, override it with the parent layer's mode. + if d.options.forceMask == nil && diffOutput.RootDirMode == nil && parent != "" { + parentDiffPath, err := d.getDiffPath(parent) + if err != nil { + return err + } + parentSt, err := os.Stat(parentDiffPath) + if err != nil { + return err } + if err := os.Chmod(stagingDirectory, parentSt.Mode()); err != nil { + return err + } + } + + if d.usingComposefs { toc := diffOutput.Artifacts[tocArtifact] + verityDigests := diffOutput.Artifacts[fsVerityDigestsArtifact].(map[string]string) if err := generateComposeFsBlob(verityDigests, toc, d.getComposefsData(id)); err != nil { return err } } - diffPath, err := d.getDiffPath(id) - if err != nil { - return err - } if err := os.RemoveAll(diffPath); err != nil && !os.IsNotExist(err) { return err } - diffOutput.UncompressedDigest = diffOutput.TOCDigest - return os.Rename(stagingDirectory, diffPath) } @@ -2179,12 +2196,8 @@ func (d *Driver) getComposefsData(id string) string { } func (d *Driver) getDiffPath(id string) (string, error) { - dir, imagestore, _ := d.dir2(id) - base := dir - if imagestore != "" { - base = imagestore - } - return redirectDiffIfAdditionalLayer(path.Join(base, "diff")) + dir := d.dir(id) + return redirectDiffIfAdditionalLayer(path.Join(dir, "diff")) } func (d *Driver) getLowerDiffPaths(id string) ([]string, error) { @@ -2275,12 +2288,8 @@ func (d *Driver) AdditionalImageStores() []string { // by toContainer to those specified by toHost. func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error { var err error - dir, imagestore, _ := d.dir2(id) - base := dir - if imagestore != "" { - base = imagestore - } - diffDir := filepath.Join(base, "diff") + dir := d.dir(id) + diffDir := filepath.Join(dir, "diff") rootUID, rootGID := 0, 0 if toHost != nil { diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go b/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go index 2a7a307a..d4f540c9 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go @@ -4,6 +4,7 @@ package overlay import ( + "fmt" "path" "github.com/containers/storage/pkg/directory" @@ -15,3 +16,15 @@ import ( func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) { return directory.Usage(path.Join(d.dir(id), "diff")) } + +func getComposeFsHelper() (string, error) { + return "", fmt.Errorf("composefs not supported on this build") +} + +func mountComposefsBlob(dataDir, mountPoint string) error { + return fmt.Errorf("composefs not supported on this build") +} + +func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, composefsDir string) error { + return fmt.Errorf("composefs not supported on this build") +} diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota.go b/vendor/github.com/containers/storage/drivers/quota/projectquota.go index 2be79698..081fb2b1 100644 --- a/vendor/github.com/containers/storage/drivers/quota/projectquota.go +++ b/vendor/github.com/containers/storage/drivers/quota/projectquota.go @@ -1,453 +1,5 @@ -//go:build linux && !exclude_disk_quota && cgo -// +build linux,!exclude_disk_quota,cgo - -// -// projectquota.go - implements XFS project quota controls -// for setting quota limits on a newly created directory. -// It currently supports the legacy XFS specific ioctls. -// -// TODO: use generic quota control ioctl FS_IOC_FS{GET,SET}XATTR -// for both xfs/ext4 for kernel version >= v4.5 -// - package quota -/* -#include -#include -#include -#include -#include - -#ifndef FS_XFLAG_PROJINHERIT -struct fsxattr { - __u32 fsx_xflags; - __u32 fsx_extsize; - __u32 fsx_nextents; - __u32 fsx_projid; - unsigned char fsx_pad[12]; -}; -#define FS_XFLAG_PROJINHERIT 0x00000200 -#endif -#ifndef FS_IOC_FSGETXATTR -#define FS_IOC_FSGETXATTR _IOR ('X', 31, struct fsxattr) -#endif -#ifndef FS_IOC_FSSETXATTR -#define FS_IOC_FSSETXATTR _IOW ('X', 32, struct fsxattr) -#endif - -#ifndef PRJQUOTA -#define PRJQUOTA 2 -#endif -#ifndef FS_PROJ_QUOTA -#define FS_PROJ_QUOTA 2 -#endif -#ifndef Q_XSETPQLIM -#define Q_XSETPQLIM QCMD(Q_XSETQLIM, PRJQUOTA) -#endif -#ifndef Q_XGETPQUOTA -#define Q_XGETPQUOTA QCMD(Q_XGETQUOTA, PRJQUOTA) -#endif -*/ -import "C" - -import ( - "errors" - "fmt" - "math" - "os" - "path" - "path/filepath" - "sync" - "syscall" - "unsafe" - - "github.com/containers/storage/pkg/directory" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -const projectIDsAllocatedPerQuotaHome = 10000 - // BackingFsBlockDeviceLink is the name of a file that we place in // the home directory of a driver that uses this package. const BackingFsBlockDeviceLink = "backingFsBlockDev" - -// Quota limit params - currently we only control blocks hard limit and inodes -type Quota struct { - Size uint64 - Inodes uint64 -} - -// Control - Context to be used by storage driver (e.g. overlay) -// who wants to apply project quotas to container dirs -type Control struct { - backingFsBlockDev string - nextProjectID uint32 - quotas *sync.Map - basePath string -} - -// Attempt to generate a unigue projectid. Multiple directories -// per file system can have quota and they need a group of unique -// ids. This function attempts to allocate at least projectIDsAllocatedPerQuotaHome(10000) -// unique projectids, based on the inode of the basepath. -func generateUniqueProjectID(path string) (uint32, error) { - fileinfo, err := os.Stat(path) - if err != nil { - return 0, err - } - stat, ok := fileinfo.Sys().(*syscall.Stat_t) - if !ok { - return 0, fmt.Errorf("not a syscall.Stat_t %s", path) - } - projectID := projectIDsAllocatedPerQuotaHome + (stat.Ino*projectIDsAllocatedPerQuotaHome)%(math.MaxUint32-projectIDsAllocatedPerQuotaHome) - return uint32(projectID), nil -} - -// NewControl - initialize project quota support. -// Test to make sure that quota can be set on a test dir and find -// the first project id to be used for the next container create. -// -// Returns nil (and error) if project quota is not supported. -// -// First get the project id of the basePath directory. -// This test will fail if the backing fs is not xfs. -// -// xfs_quota tool can be used to assign a project id to the driver home directory, e.g.: -// echo 100000:/var/lib/containers/storage/overlay >> /etc/projects -// echo 200000:/var/lib/containers/storage/volumes >> /etc/projects -// echo storage:100000 >> /etc/projid -// echo volumes:200000 >> /etc/projid -// xfs_quota -x -c 'project -s storage volumes' / -// -// In the example above, the storage directory project id will be used as a -// "start offset" and all containers will be assigned larger project ids -// (e.g. >= 100000). Then the volumes directory project id will be used as a -// "start offset" and all volumes will be assigned larger project ids -// (e.g. >= 200000). -// This is a way to prevent xfs_quota management from conflicting with -// containers/storage. - -// Then try to create a test directory with the next project id and set a quota -// on it. If that works, continue to scan existing containers to map allocated -// project ids. -func NewControl(basePath string) (*Control, error) { - // - // Get project id of parent dir as minimal id to be used by driver - // - minProjectID, err := getProjectID(basePath) - if err != nil { - return nil, err - } - if minProjectID == 0 { - // Indicates the storage was never initialized - // Generate a unique range of Projectids for this basepath - minProjectID, err = generateUniqueProjectID(basePath) - if err != nil { - return nil, err - } - - } - // - // create backing filesystem device node - // - backingFsBlockDev, err := makeBackingFsDev(basePath) - if err != nil { - return nil, err - } - - // - // Test if filesystem supports project quotas by trying to set - // a quota on the first available project id - // - quota := Quota{ - Size: 0, - Inodes: 0, - } - - q := Control{ - backingFsBlockDev: backingFsBlockDev, - nextProjectID: minProjectID + 1, - quotas: &sync.Map{}, - basePath: basePath, - } - - if err := q.setProjectQuota(minProjectID, quota); err != nil { - return nil, err - } - - // - // get first project id to be used for next container - // - err = q.findNextProjectID() - if err != nil { - return nil, err - } - - logrus.Debugf("NewControl(%s): nextProjectID = %d", basePath, q.nextProjectID) - return &q, nil -} - -// SetQuota - assign a unique project id to directory and set the quota limits -// for that project id -func (q *Control) SetQuota(targetPath string, quota Quota) error { - var projectID uint32 - value, ok := q.quotas.Load(targetPath) - if ok { - projectID, ok = value.(uint32) - } - if !ok { - projectID = q.nextProjectID - - // - // assign project id to new container directory - // - err := setProjectID(targetPath, projectID) - if err != nil { - return err - } - - q.quotas.Store(targetPath, projectID) - q.nextProjectID++ - } - - // - // set the quota limit for the container's project id - // - logrus.Debugf("SetQuota path=%s, size=%d, inodes=%d, projectID=%d", targetPath, quota.Size, quota.Inodes, projectID) - return q.setProjectQuota(projectID, quota) -} - -// ClearQuota removes the map entry in the quotas map for targetPath. -// It does so to prevent the map leaking entries as directories are deleted. -func (q *Control) ClearQuota(targetPath string) { - q.quotas.Delete(targetPath) -} - -// setProjectQuota - set the quota for project id on xfs block device -func (q *Control) setProjectQuota(projectID uint32, quota Quota) error { - var d C.fs_disk_quota_t - d.d_version = C.FS_DQUOT_VERSION - d.d_id = C.__u32(projectID) - d.d_flags = C.FS_PROJ_QUOTA - - if quota.Size > 0 { - d.d_fieldmask = d.d_fieldmask | C.FS_DQ_BHARD | C.FS_DQ_BSOFT - d.d_blk_hardlimit = C.__u64(quota.Size / 512) - d.d_blk_softlimit = d.d_blk_hardlimit - } - if quota.Inodes > 0 { - d.d_fieldmask = d.d_fieldmask | C.FS_DQ_IHARD | C.FS_DQ_ISOFT - d.d_ino_hardlimit = C.__u64(quota.Inodes) - d.d_ino_softlimit = d.d_ino_hardlimit - } - - cs := C.CString(q.backingFsBlockDev) - defer C.free(unsafe.Pointer(cs)) - - runQuotactl := func() syscall.Errno { - _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XSETPQLIM, - uintptr(unsafe.Pointer(cs)), uintptr(d.d_id), - uintptr(unsafe.Pointer(&d)), 0, 0) - return errno - } - - errno := runQuotactl() - - // If the backingFsBlockDev does not exist any more then try to recreate it. - if errors.Is(errno, unix.ENOENT) { - if _, err := makeBackingFsDev(q.basePath); err != nil { - return fmt.Errorf( - "failed to recreate missing backingFsBlockDev %s for projid %d: %w", - q.backingFsBlockDev, projectID, err, - ) - } - - if errno := runQuotactl(); errno != 0 { - return fmt.Errorf("failed to set quota limit for projid %d on %s after backingFsBlockDev recreation: %w", - projectID, q.backingFsBlockDev, errno) - } - - } else if errno != 0 { - return fmt.Errorf("failed to set quota limit for projid %d on %s: %w", - projectID, q.backingFsBlockDev, errno) - } - - return nil -} - -// GetQuota - get the quota limits of a directory that was configured with SetQuota -func (q *Control) GetQuota(targetPath string, quota *Quota) error { - d, err := q.fsDiskQuotaFromPath(targetPath) - if err != nil { - return err - } - quota.Size = uint64(d.d_blk_hardlimit) * 512 - quota.Inodes = uint64(d.d_ino_hardlimit) - return nil -} - -// GetDiskUsage - get the current disk usage of a directory that was configured with SetQuota -func (q *Control) GetDiskUsage(targetPath string, usage *directory.DiskUsage) error { - d, err := q.fsDiskQuotaFromPath(targetPath) - if err != nil { - return err - } - usage.Size = int64(d.d_bcount) * 512 - usage.InodeCount = int64(d.d_icount) - - return nil -} - -func (q *Control) fsDiskQuotaFromPath(targetPath string) (C.fs_disk_quota_t, error) { - var d C.fs_disk_quota_t - var projectID uint32 - value, ok := q.quotas.Load(targetPath) - if ok { - projectID, ok = value.(uint32) - } - if !ok { - return d, fmt.Errorf("quota not found for path : %s", targetPath) - } - - // - // get the quota limit for the container's project id - // - cs := C.CString(q.backingFsBlockDev) - defer C.free(unsafe.Pointer(cs)) - - _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XGETPQUOTA, - uintptr(unsafe.Pointer(cs)), uintptr(C.__u32(projectID)), - uintptr(unsafe.Pointer(&d)), 0, 0) - if errno != 0 { - return d, fmt.Errorf("failed to get quota limit for projid %d on %s: %w", - projectID, q.backingFsBlockDev, errno) - } - - return d, nil -} - -// getProjectID - get the project id of path on xfs -func getProjectID(targetPath string) (uint32, error) { - dir, err := openDir(targetPath) - if err != nil { - return 0, err - } - defer closeDir(dir) - - var fsx C.struct_fsxattr - _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, - uintptr(unsafe.Pointer(&fsx))) - if errno != 0 { - return 0, fmt.Errorf("failed to get projid for %s: %w", targetPath, errno) - } - - return uint32(fsx.fsx_projid), nil -} - -// setProjectID - set the project id of path on xfs -func setProjectID(targetPath string, projectID uint32) error { - dir, err := openDir(targetPath) - if err != nil { - return err - } - defer closeDir(dir) - - var fsx C.struct_fsxattr - _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, - uintptr(unsafe.Pointer(&fsx))) - if errno != 0 { - return fmt.Errorf("failed to get projid for %s: %w", targetPath, errno) - } - fsx.fsx_projid = C.__u32(projectID) - fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT - _, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR, - uintptr(unsafe.Pointer(&fsx))) - if errno != 0 { - return fmt.Errorf("failed to set projid for %s: %w", targetPath, errno) - } - - return nil -} - -// findNextProjectID - find the next project id to be used for containers -// by scanning driver home directory to find used project ids -func (q *Control) findNextProjectID() error { - files, err := os.ReadDir(q.basePath) - if err != nil { - return fmt.Errorf("read directory failed : %s", q.basePath) - } - for _, file := range files { - if !file.IsDir() { - continue - } - path := filepath.Join(q.basePath, file.Name()) - projid, err := getProjectID(path) - if err != nil { - return err - } - if projid > 0 { - q.quotas.Store(path, projid) - } - if q.nextProjectID <= projid { - q.nextProjectID = projid + 1 - } - } - - return nil -} - -func free(p *C.char) { - C.free(unsafe.Pointer(p)) -} - -func openDir(path string) (*C.DIR, error) { - Cpath := C.CString(path) - defer free(Cpath) - - dir, errno := C.opendir(Cpath) - if dir == nil { - return nil, fmt.Errorf("can't open dir %v: %w", Cpath, errno) - } - return dir, nil -} - -func closeDir(dir *C.DIR) { - if dir != nil { - C.closedir(dir) - } -} - -func getDirFd(dir *C.DIR) uintptr { - return uintptr(C.dirfd(dir)) -} - -// Get the backing block device of the driver home directory -// and create a block device node under the home directory -// to be used by quotactl commands -func makeBackingFsDev(home string) (string, error) { - var stat unix.Stat_t - if err := unix.Stat(home, &stat); err != nil { - return "", err - } - - backingFsBlockDev := path.Join(home, BackingFsBlockDeviceLink) - backingFsBlockDevTmp := backingFsBlockDev + ".tmp" - // Re-create just in case someone copied the home directory over to a new device - if err := unix.Mknod(backingFsBlockDevTmp, unix.S_IFBLK|0o600, int(stat.Dev)); err != nil { - if !errors.Is(err, unix.EEXIST) { - return "", fmt.Errorf("failed to mknod %s: %w", backingFsBlockDevTmp, err) - } - // On EEXIST, try again after unlinking any potential leftover. - _ = unix.Unlink(backingFsBlockDevTmp) - if err := unix.Mknod(backingFsBlockDevTmp, unix.S_IFBLK|0o600, int(stat.Dev)); err != nil { - return "", fmt.Errorf("failed to mknod %s: %w", backingFsBlockDevTmp, err) - } - } - if err := unix.Rename(backingFsBlockDevTmp, backingFsBlockDev); err != nil { - return "", fmt.Errorf("failed to rename %s to %s: %w", backingFsBlockDevTmp, backingFsBlockDev, err) - } - - return backingFsBlockDev, nil -} diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota_supported.go b/vendor/github.com/containers/storage/drivers/quota/projectquota_supported.go new file mode 100644 index 00000000..b0623bda --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/quota/projectquota_supported.go @@ -0,0 +1,449 @@ +//go:build linux && !exclude_disk_quota && cgo +// +build linux,!exclude_disk_quota,cgo + +// +// projectquota.go - implements XFS project quota controls +// for setting quota limits on a newly created directory. +// It currently supports the legacy XFS specific ioctls. +// +// TODO: use generic quota control ioctl FS_IOC_FS{GET,SET}XATTR +// for both xfs/ext4 for kernel version >= v4.5 +// + +package quota + +/* +#include +#include +#include +#include +#include + +#ifndef FS_XFLAG_PROJINHERIT +struct fsxattr { + __u32 fsx_xflags; + __u32 fsx_extsize; + __u32 fsx_nextents; + __u32 fsx_projid; + unsigned char fsx_pad[12]; +}; +#define FS_XFLAG_PROJINHERIT 0x00000200 +#endif +#ifndef FS_IOC_FSGETXATTR +#define FS_IOC_FSGETXATTR _IOR ('X', 31, struct fsxattr) +#endif +#ifndef FS_IOC_FSSETXATTR +#define FS_IOC_FSSETXATTR _IOW ('X', 32, struct fsxattr) +#endif + +#ifndef PRJQUOTA +#define PRJQUOTA 2 +#endif +#ifndef FS_PROJ_QUOTA +#define FS_PROJ_QUOTA 2 +#endif +#ifndef Q_XSETPQLIM +#define Q_XSETPQLIM QCMD(Q_XSETQLIM, PRJQUOTA) +#endif +#ifndef Q_XGETPQUOTA +#define Q_XGETPQUOTA QCMD(Q_XGETQUOTA, PRJQUOTA) +#endif +*/ +import "C" + +import ( + "errors" + "fmt" + "math" + "os" + "path" + "path/filepath" + "sync" + "syscall" + "unsafe" + + "github.com/containers/storage/pkg/directory" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +const projectIDsAllocatedPerQuotaHome = 10000 + +// Quota limit params - currently we only control blocks hard limit and inodes +type Quota struct { + Size uint64 + Inodes uint64 +} + +// Control - Context to be used by storage driver (e.g. overlay) +// who wants to apply project quotas to container dirs +type Control struct { + backingFsBlockDev string + nextProjectID uint32 + quotas *sync.Map + basePath string +} + +// Attempt to generate a unigue projectid. Multiple directories +// per file system can have quota and they need a group of unique +// ids. This function attempts to allocate at least projectIDsAllocatedPerQuotaHome(10000) +// unique projectids, based on the inode of the basepath. +func generateUniqueProjectID(path string) (uint32, error) { + fileinfo, err := os.Stat(path) + if err != nil { + return 0, err + } + stat, ok := fileinfo.Sys().(*syscall.Stat_t) + if !ok { + return 0, fmt.Errorf("not a syscall.Stat_t %s", path) + } + projectID := projectIDsAllocatedPerQuotaHome + (stat.Ino*projectIDsAllocatedPerQuotaHome)%(math.MaxUint32-projectIDsAllocatedPerQuotaHome) + return uint32(projectID), nil +} + +// NewControl - initialize project quota support. +// Test to make sure that quota can be set on a test dir and find +// the first project id to be used for the next container create. +// +// Returns nil (and error) if project quota is not supported. +// +// First get the project id of the basePath directory. +// This test will fail if the backing fs is not xfs. +// +// xfs_quota tool can be used to assign a project id to the driver home directory, e.g.: +// echo 100000:/var/lib/containers/storage/overlay >> /etc/projects +// echo 200000:/var/lib/containers/storage/volumes >> /etc/projects +// echo storage:100000 >> /etc/projid +// echo volumes:200000 >> /etc/projid +// xfs_quota -x -c 'project -s storage volumes' / +// +// In the example above, the storage directory project id will be used as a +// "start offset" and all containers will be assigned larger project ids +// (e.g. >= 100000). Then the volumes directory project id will be used as a +// "start offset" and all volumes will be assigned larger project ids +// (e.g. >= 200000). +// This is a way to prevent xfs_quota management from conflicting with +// containers/storage. + +// Then try to create a test directory with the next project id and set a quota +// on it. If that works, continue to scan existing containers to map allocated +// project ids. +func NewControl(basePath string) (*Control, error) { + // + // Get project id of parent dir as minimal id to be used by driver + // + minProjectID, err := getProjectID(basePath) + if err != nil { + return nil, err + } + if minProjectID == 0 { + // Indicates the storage was never initialized + // Generate a unique range of Projectids for this basepath + minProjectID, err = generateUniqueProjectID(basePath) + if err != nil { + return nil, err + } + + } + // + // create backing filesystem device node + // + backingFsBlockDev, err := makeBackingFsDev(basePath) + if err != nil { + return nil, err + } + + // + // Test if filesystem supports project quotas by trying to set + // a quota on the first available project id + // + quota := Quota{ + Size: 0, + Inodes: 0, + } + + q := Control{ + backingFsBlockDev: backingFsBlockDev, + nextProjectID: minProjectID + 1, + quotas: &sync.Map{}, + basePath: basePath, + } + + if err := q.setProjectQuota(minProjectID, quota); err != nil { + return nil, err + } + + // + // get first project id to be used for next container + // + err = q.findNextProjectID() + if err != nil { + return nil, err + } + + logrus.Debugf("NewControl(%s): nextProjectID = %d", basePath, q.nextProjectID) + return &q, nil +} + +// SetQuota - assign a unique project id to directory and set the quota limits +// for that project id +func (q *Control) SetQuota(targetPath string, quota Quota) error { + var projectID uint32 + value, ok := q.quotas.Load(targetPath) + if ok { + projectID, ok = value.(uint32) + } + if !ok { + projectID = q.nextProjectID + + // + // assign project id to new container directory + // + err := setProjectID(targetPath, projectID) + if err != nil { + return err + } + + q.quotas.Store(targetPath, projectID) + q.nextProjectID++ + } + + // + // set the quota limit for the container's project id + // + logrus.Debugf("SetQuota path=%s, size=%d, inodes=%d, projectID=%d", targetPath, quota.Size, quota.Inodes, projectID) + return q.setProjectQuota(projectID, quota) +} + +// ClearQuota removes the map entry in the quotas map for targetPath. +// It does so to prevent the map leaking entries as directories are deleted. +func (q *Control) ClearQuota(targetPath string) { + q.quotas.Delete(targetPath) +} + +// setProjectQuota - set the quota for project id on xfs block device +func (q *Control) setProjectQuota(projectID uint32, quota Quota) error { + var d C.fs_disk_quota_t + d.d_version = C.FS_DQUOT_VERSION + d.d_id = C.__u32(projectID) + d.d_flags = C.FS_PROJ_QUOTA + + if quota.Size > 0 { + d.d_fieldmask = d.d_fieldmask | C.FS_DQ_BHARD | C.FS_DQ_BSOFT + d.d_blk_hardlimit = C.__u64(quota.Size / 512) + d.d_blk_softlimit = d.d_blk_hardlimit + } + if quota.Inodes > 0 { + d.d_fieldmask = d.d_fieldmask | C.FS_DQ_IHARD | C.FS_DQ_ISOFT + d.d_ino_hardlimit = C.__u64(quota.Inodes) + d.d_ino_softlimit = d.d_ino_hardlimit + } + + cs := C.CString(q.backingFsBlockDev) + defer C.free(unsafe.Pointer(cs)) + + runQuotactl := func() syscall.Errno { + _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XSETPQLIM, + uintptr(unsafe.Pointer(cs)), uintptr(d.d_id), + uintptr(unsafe.Pointer(&d)), 0, 0) + return errno + } + + errno := runQuotactl() + + // If the backingFsBlockDev does not exist any more then try to recreate it. + if errors.Is(errno, unix.ENOENT) { + if _, err := makeBackingFsDev(q.basePath); err != nil { + return fmt.Errorf( + "failed to recreate missing backingFsBlockDev %s for projid %d: %w", + q.backingFsBlockDev, projectID, err, + ) + } + + if errno := runQuotactl(); errno != 0 { + return fmt.Errorf("failed to set quota limit for projid %d on %s after backingFsBlockDev recreation: %w", + projectID, q.backingFsBlockDev, errno) + } + + } else if errno != 0 { + return fmt.Errorf("failed to set quota limit for projid %d on %s: %w", + projectID, q.backingFsBlockDev, errno) + } + + return nil +} + +// GetQuota - get the quota limits of a directory that was configured with SetQuota +func (q *Control) GetQuota(targetPath string, quota *Quota) error { + d, err := q.fsDiskQuotaFromPath(targetPath) + if err != nil { + return err + } + quota.Size = uint64(d.d_blk_hardlimit) * 512 + quota.Inodes = uint64(d.d_ino_hardlimit) + return nil +} + +// GetDiskUsage - get the current disk usage of a directory that was configured with SetQuota +func (q *Control) GetDiskUsage(targetPath string, usage *directory.DiskUsage) error { + d, err := q.fsDiskQuotaFromPath(targetPath) + if err != nil { + return err + } + usage.Size = int64(d.d_bcount) * 512 + usage.InodeCount = int64(d.d_icount) + + return nil +} + +func (q *Control) fsDiskQuotaFromPath(targetPath string) (C.fs_disk_quota_t, error) { + var d C.fs_disk_quota_t + var projectID uint32 + value, ok := q.quotas.Load(targetPath) + if ok { + projectID, ok = value.(uint32) + } + if !ok { + return d, fmt.Errorf("quota not found for path : %s", targetPath) + } + + // + // get the quota limit for the container's project id + // + cs := C.CString(q.backingFsBlockDev) + defer C.free(unsafe.Pointer(cs)) + + _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XGETPQUOTA, + uintptr(unsafe.Pointer(cs)), uintptr(C.__u32(projectID)), + uintptr(unsafe.Pointer(&d)), 0, 0) + if errno != 0 { + return d, fmt.Errorf("failed to get quota limit for projid %d on %s: %w", + projectID, q.backingFsBlockDev, errno) + } + + return d, nil +} + +// getProjectID - get the project id of path on xfs +func getProjectID(targetPath string) (uint32, error) { + dir, err := openDir(targetPath) + if err != nil { + return 0, err + } + defer closeDir(dir) + + var fsx C.struct_fsxattr + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return 0, fmt.Errorf("failed to get projid for %s: %w", targetPath, errno) + } + + return uint32(fsx.fsx_projid), nil +} + +// setProjectID - set the project id of path on xfs +func setProjectID(targetPath string, projectID uint32) error { + dir, err := openDir(targetPath) + if err != nil { + return err + } + defer closeDir(dir) + + var fsx C.struct_fsxattr + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return fmt.Errorf("failed to get projid for %s: %w", targetPath, errno) + } + fsx.fsx_projid = C.__u32(projectID) + fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT + _, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return fmt.Errorf("failed to set projid for %s: %w", targetPath, errno) + } + + return nil +} + +// findNextProjectID - find the next project id to be used for containers +// by scanning driver home directory to find used project ids +func (q *Control) findNextProjectID() error { + files, err := os.ReadDir(q.basePath) + if err != nil { + return fmt.Errorf("read directory failed : %s", q.basePath) + } + for _, file := range files { + if !file.IsDir() { + continue + } + path := filepath.Join(q.basePath, file.Name()) + projid, err := getProjectID(path) + if err != nil { + return err + } + if projid > 0 { + q.quotas.Store(path, projid) + } + if q.nextProjectID <= projid { + q.nextProjectID = projid + 1 + } + } + + return nil +} + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func openDir(path string) (*C.DIR, error) { + Cpath := C.CString(path) + defer free(Cpath) + + dir, errno := C.opendir(Cpath) + if dir == nil { + return nil, fmt.Errorf("can't open dir %v: %w", Cpath, errno) + } + return dir, nil +} + +func closeDir(dir *C.DIR) { + if dir != nil { + C.closedir(dir) + } +} + +func getDirFd(dir *C.DIR) uintptr { + return uintptr(C.dirfd(dir)) +} + +// Get the backing block device of the driver home directory +// and create a block device node under the home directory +// to be used by quotactl commands +func makeBackingFsDev(home string) (string, error) { + var stat unix.Stat_t + if err := unix.Stat(home, &stat); err != nil { + return "", err + } + + backingFsBlockDev := path.Join(home, BackingFsBlockDeviceLink) + backingFsBlockDevTmp := backingFsBlockDev + ".tmp" + // Re-create just in case someone copied the home directory over to a new device + if err := unix.Mknod(backingFsBlockDevTmp, unix.S_IFBLK|0o600, int(stat.Dev)); err != nil { + if !errors.Is(err, unix.EEXIST) { + return "", fmt.Errorf("failed to mknod %s: %w", backingFsBlockDevTmp, err) + } + // On EEXIST, try again after unlinking any potential leftover. + _ = unix.Unlink(backingFsBlockDevTmp) + if err := unix.Mknod(backingFsBlockDevTmp, unix.S_IFBLK|0o600, int(stat.Dev)); err != nil { + return "", fmt.Errorf("failed to mknod %s: %w", backingFsBlockDevTmp, err) + } + } + if err := unix.Rename(backingFsBlockDevTmp, backingFsBlockDev); err != nil { + return "", fmt.Errorf("failed to rename %s to %s: %w", backingFsBlockDevTmp, backingFsBlockDev, err) + } + + return backingFsBlockDev, nil +} diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go index 599cf095..9b552254 100644 --- a/vendor/github.com/containers/storage/drivers/vfs/driver.go +++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go @@ -31,8 +31,9 @@ func init() { func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) { d := &Driver{ name: "vfs", - homes: []string{home}, + home: home, idMappings: idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), + imageStore: options.ImageStore, } rootIDs := d.idMappings.RootPair() @@ -47,7 +48,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) key = strings.ToLower(key) switch key { case "vfs.imagestore", ".imagestore": - d.homes = append(d.homes, strings.Split(val, ",")...) + d.additionalHomes = append(d.additionalHomes, strings.Split(val, ",")...) continue case "vfs.mountopt": return nil, fmt.Errorf("vfs driver does not support mount options") @@ -62,12 +63,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) return nil, fmt.Errorf("vfs driver does not support %s options", key) } } - // If --imagestore is provided, lets add writable graphRoot - // to vfs's additional image store, as it is done for - // `overlay` driver. - if options.ImageStore != "" { - d.homes = append(d.homes, options.ImageStore) - } + d.updater = graphdriver.NewNaiveLayerIDMapUpdater(d) d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, d.updater) @@ -80,11 +76,13 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) // Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver type Driver struct { name string - homes []string + home string + additionalHomes []string idMappings *idtools.IDMappings ignoreChownErrors bool naiveDiff graphdriver.DiffDriver updater graphdriver.LayerIDMapUpdater + imageStore string } func (d *Driver) String() string { @@ -158,7 +156,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool idMappings = opts.IDMappings } - dir := d.dir(id) + dir := d.dir2(id, ro) rootIDs := idMappings.RootPair() if err := idtools.MkdirAllAndChown(filepath.Dir(dir), 0o700, rootIDs); err != nil { return err @@ -204,18 +202,32 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool return nil } -func (d *Driver) dir(id string) string { - for i, home := range d.homes { - if i > 0 { - home = filepath.Join(home, d.String()) +func (d *Driver) dir2(id string, useImageStore bool) string { + var homedir string + + if useImageStore && d.imageStore != "" { + homedir = filepath.Join(d.imageStore, d.String(), "dir", filepath.Base(id)) + } else { + homedir = filepath.Join(d.home, "dir", filepath.Base(id)) + } + if _, err := os.Stat(homedir); err != nil { + additionalHomes := d.additionalHomes[:] + if d.imageStore != "" { + additionalHomes = append(additionalHomes, d.imageStore) } - candidate := filepath.Join(home, "dir", filepath.Base(id)) - fi, err := os.Stat(candidate) - if err == nil && fi.IsDir() { - return candidate + for _, home := range additionalHomes { + candidate := filepath.Join(home, d.String(), "dir", filepath.Base(id)) + fi, err := os.Stat(candidate) + if err == nil && fi.IsDir() { + return candidate + } } } - return filepath.Join(d.homes[0], "dir", filepath.Base(id)) + return homedir +} + +func (d *Driver) dir(id string) string { + return d.dir2(id, false) } // Remove deletes the content from the directory for a given id. @@ -263,7 +275,7 @@ func (d *Driver) Exists(id string) bool { // List layers (not including additional image stores) func (d *Driver) ListLayers() ([]string, error) { - entries, err := os.ReadDir(filepath.Join(d.homes[0], "dir")) + entries, err := os.ReadDir(filepath.Join(d.home, "dir")) if err != nil { return nil, err } @@ -285,8 +297,8 @@ func (d *Driver) ListLayers() ([]string, error) { // AdditionalImageStores returns additional image stores supported by the driver func (d *Driver) AdditionalImageStores() []string { - if len(d.homes) > 1 { - return d.homes[1:] + if len(d.additionalHomes) > 0 { + return d.additionalHomes } return nil } diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go index d105e73f..f1325262 100644 --- a/vendor/github.com/containers/storage/layers.go +++ b/vendor/github.com/containers/storage/layers.go @@ -126,6 +126,13 @@ type Layer struct { // as a DiffID. UncompressedDigest digest.Digest `json:"diff-digest,omitempty"` + // TOCDigest represents the digest of the Table of Contents (TOC) of the blob. + // This digest is utilized when the UncompressedDigest is not + // validated during the partial image pull process, but the + // TOC itself is validated. + // It serves as an alternative reference under these specific conditions. + TOCDigest digest.Digest `json:"toc-digest,omitempty"` + // UncompressedSize is the length of the blob that was last passed to // ApplyDiff() or create(), after we decompressed it. If // UncompressedDigest is not set, this should be treated as if it were @@ -174,6 +181,13 @@ type DiffOptions struct { Compression *archive.Compression } +// stagedLayerOptions are the options passed to .create to populate a staged +// layer +type stagedLayerOptions struct { + DiffOutput *drivers.DriverWithDifferOutput + DiffOptions *drivers.ApplyDiffWithDifferOpts +} + // roLayerStore wraps a graph driver, adding the ability to refer to layers by // name, and keeping track of parent-child relationships, along with a list of // all known layers. @@ -228,6 +242,10 @@ type roLayerStore interface { // specified uncompressed digest value recorded for them. LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) + // LayersByTOCDigest returns a slice of the layers with the + // specified uncompressed digest value recorded for them. + LayersByTOCDigest(d digest.Digest) ([]Layer, error) + // Layers returns a slice of the known layers. Layers() ([]Layer, error) } @@ -256,7 +274,7 @@ type rwLayerStore interface { // underlying drivers do not themselves distinguish between writeable // and read-only layers. Returns the new layer structure and the size of the // diff which was applied to its parent to initialize its contents. - create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader) (*Layer, int64, error) + create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader, slo *stagedLayerOptions) (*Layer, int64, error) // updateNames modifies names associated with a layer based on (op, names). updateNames(id string, names []string, op updateNameOperation) error @@ -296,13 +314,13 @@ type rwLayerStore interface { // ApplyDiffWithDiffer applies the changes through the differ callback function. // If to is the empty string, then a staging directory is created by the driver. - ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) + ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) // CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors CleanupStagingDirectory(stagingDirectory string) error - // ApplyDiffFromStagingDirectory uses stagingDirectory to create the diff. - ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error + // applyDiffFromStagingDirectory uses diffOutput.Target to create the diff. + applyDiffFromStagingDirectory(id string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error // DifferTarget gets the location where files are stored for the layer. DifferTarget(id string) (string, error) @@ -316,10 +334,71 @@ type rwLayerStore interface { GarbageCollect() error } +type multipleLockFile struct { + lockfiles []*lockfile.LockFile +} + +func (l multipleLockFile) Lock() { + for _, lock := range l.lockfiles { + lock.Lock() + } +} + +func (l multipleLockFile) RLock() { + for _, lock := range l.lockfiles { + lock.RLock() + } +} + +func (l multipleLockFile) Unlock() { + for _, lock := range l.lockfiles { + lock.Unlock() + } +} + +func (l multipleLockFile) ModifiedSince(lastWrite lockfile.LastWrite) (lockfile.LastWrite, bool, error) { + // Look up only the first lockfile, since this is the value returned by RecordWrite(). + return l.lockfiles[0].ModifiedSince(lastWrite) +} + +func (l multipleLockFile) AssertLockedForWriting() { + for _, lock := range l.lockfiles { + lock.AssertLockedForWriting() + } +} + +func (l multipleLockFile) GetLastWrite() (lockfile.LastWrite, error) { + return l.lockfiles[0].GetLastWrite() +} + +func (l multipleLockFile) RecordWrite() (lockfile.LastWrite, error) { + var lastWrite *lockfile.LastWrite + for _, lock := range l.lockfiles { + lw, err := lock.RecordWrite() + if err != nil { + return lw, err + } + // Return the first value we get so we know that + // all the locks have a write time >= to this one. + if lastWrite == nil { + lastWrite = &lw + } + } + return *lastWrite, nil +} + +func (l multipleLockFile) IsReadWrite() bool { + return l.lockfiles[0].IsReadWrite() +} + +func newMultipleLockFile(l ...*lockfile.LockFile) *multipleLockFile { + return &multipleLockFile{lockfiles: l} +} + type layerStore struct { // The following fields are only set when constructing layerStore, and must never be modified afterwards. // They are safe to access without any other locking. - lockfile *lockfile.LockFile // lockfile.IsReadWrite can be used to distinguish between read-write and read-only layer stores. + lockfile *multipleLockFile // lockfile.IsReadWrite can be used to distinguish between read-write and read-only layer stores. mountsLockfile *lockfile.LockFile // Can _only_ be obtained with inProcessLock held. rundir string jsonPath [numLayerLocationIndex]string @@ -337,6 +416,7 @@ type layerStore struct { bymount map[string]*Layer bycompressedsum map[digest.Digest][]string byuncompressedsum map[digest.Digest][]string + bytocsum map[digest.Digest][]string layerspathsModified [numLayerLocationIndex]time.Time // FIXME: This field is only set when constructing layerStore, but locking rules of the driver @@ -366,6 +446,7 @@ func copyLayer(l *Layer) *Layer { CompressedSize: l.CompressedSize, UncompressedDigest: l.UncompressedDigest, UncompressedSize: l.UncompressedSize, + TOCDigest: l.TOCDigest, CompressionType: l.CompressionType, ReadOnly: l.ReadOnly, volatileStore: l.volatileStore, @@ -745,6 +826,7 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) { names := make(map[string]*Layer) compressedsums := make(map[digest.Digest][]string) uncompressedsums := make(map[digest.Digest][]string) + tocsums := make(map[digest.Digest][]string) var errorToResolveBySaving error // == nil; if there are multiple errors, this is one of them. if r.lockfile.IsReadWrite() { selinux.ClearLabels() @@ -765,6 +847,9 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) { if layer.UncompressedDigest != "" { uncompressedsums[layer.UncompressedDigest] = append(uncompressedsums[layer.UncompressedDigest], layer.ID) } + if layer.TOCDigest != "" { + tocsums[layer.TOCDigest] = append(tocsums[layer.TOCDigest], layer.ID) + } if layer.MountLabel != "" { selinux.ReserveLabel(layer.MountLabel) } @@ -792,6 +877,7 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) { r.byname = names r.bycompressedsum = compressedsums r.byuncompressedsum = uncompressedsums + r.bytocsum = tocsums // Load and merge information about which layers are mounted, and where. if r.lockfile.IsReadWrite() { @@ -998,22 +1084,37 @@ func (r *layerStore) saveMounts() error { return r.loadMounts() } -func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Driver, transient bool) (rwLayerStore, error) { +func (s *store) newLayerStore(rundir, layerdir, imagedir string, driver drivers.Driver, transient bool) (rwLayerStore, error) { if err := os.MkdirAll(rundir, 0o700); err != nil { return nil, err } if err := os.MkdirAll(layerdir, 0o700); err != nil { return nil, err } + if imagedir != "" { + if err := os.MkdirAll(imagedir, 0o700); err != nil { + return nil, err + } + } // Note: While the containers.lock file is in rundir for transient stores // we don't want to do this here, because the non-transient layers in // layers.json might be used externally as a read-only layer (using e.g. // additionalimagestores), and that would look for the lockfile in the // same directory + var lockFiles []*lockfile.LockFile lockFile, err := lockfile.GetLockFile(filepath.Join(layerdir, "layers.lock")) if err != nil { return nil, err } + lockFiles = append(lockFiles, lockFile) + if imagedir != "" { + lockFile, err := lockfile.GetLockFile(filepath.Join(imagedir, "layers.lock")) + if err != nil { + return nil, err + } + lockFiles = append(lockFiles, lockFile) + } + mountsLockfile, err := lockfile.GetLockFile(filepath.Join(rundir, "mountpoints.lock")) if err != nil { return nil, err @@ -1023,7 +1124,7 @@ func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Dri volatileDir = rundir } rlstore := layerStore{ - lockfile: lockFile, + lockfile: newMultipleLockFile(lockFiles...), mountsLockfile: mountsLockfile, rundir: rundir, jsonPath: [numLayerLocationIndex]string{ @@ -1060,7 +1161,7 @@ func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (roL return nil, err } rlstore := layerStore{ - lockfile: lockfile, + lockfile: newMultipleLockFile(lockfile), mountsLockfile: nil, rundir: rundir, jsonPath: [numLayerLocationIndex]string{ @@ -1112,7 +1213,7 @@ func (r *layerStore) Size(name string) (int64, error) { // We use the presence of a non-empty digest as an indicator that the size value was intentionally set, and that // a zero value is not just present because it was never set to anything else (which can happen if the layer was // created by a version of this library that didn't keep track of digest and size information). - if layer.UncompressedDigest != "" { + if layer.TOCDigest != "" || layer.UncompressedDigest != "" { return layer.UncompressedSize, nil } return -1, nil @@ -1201,6 +1302,9 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s if layer.UncompressedDigest != "" { r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID) } + if layer.TOCDigest != "" { + r.bytocsum[layer.TOCDigest] = append(r.bytocsum[layer.TOCDigest], layer.ID) + } if err := r.saveFor(layer); err != nil { if e := r.Delete(layer.ID); e != nil { logrus.Errorf("While recovering from a failure to save layers, error deleting layer %#v: %v", id, e) @@ -1211,7 +1315,7 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s } // Requires startWriting. -func (r *layerStore) create(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader) (layer *Layer, size int64, err error) { +func (r *layerStore) create(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader, slo *stagedLayerOptions) (layer *Layer, size int64, err error) { if moreOptions == nil { moreOptions = &LayerOptions{} } @@ -1251,6 +1355,7 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount templateCompressedDigest digest.Digest templateCompressedSize int64 templateUncompressedDigest digest.Digest + templateTOCDigest digest.Digest templateUncompressedSize int64 templateCompressionType archive.Compression templateUIDs, templateGIDs []uint32 @@ -1263,6 +1368,7 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount } templateMetadata = templateLayer.Metadata templateIDMappings = idtools.NewIDMappingsFromMaps(templateLayer.UIDMap, templateLayer.GIDMap) + templateTOCDigest = templateLayer.TOCDigest templateCompressedDigest, templateCompressedSize = templateLayer.CompressedDigest, templateLayer.CompressedSize templateUncompressedDigest, templateUncompressedSize = templateLayer.UncompressedDigest, templateLayer.UncompressedSize templateCompressionType = templateLayer.CompressionType @@ -1291,6 +1397,7 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount CompressedDigest: templateCompressedDigest, CompressedSize: templateCompressedSize, UncompressedDigest: templateUncompressedDigest, + TOCDigest: templateTOCDigest, UncompressedSize: templateUncompressedSize, CompressionType: templateCompressionType, UIDs: templateUIDs, @@ -1402,6 +1509,11 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount cleanupFailureContext = "applying layer diff" return nil, -1, err } + } else if slo != nil { + if err := r.applyDiffFromStagingDirectory(layer.ID, slo.DiffOutput, slo.DiffOptions); err != nil { + cleanupFailureContext = "applying staged directory diff" + return nil, -1, err + } } else { // applyDiffWithOptions() would have updated r.bycompressedsum // and r.byuncompressedsum for us, but if we used a template @@ -1413,6 +1525,9 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount if layer.UncompressedDigest != "" { r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID) } + if layer.TOCDigest != "" { + r.bytocsum[layer.TOCDigest] = append(r.bytocsum[layer.TOCDigest], layer.ID) + } } delete(layer.Flags, incompleteFlag) @@ -2007,9 +2122,16 @@ func (s *simpleGetCloser) Close() error { // LOCKING BUG: See the comments in layerStore.Diff func (r *layerStore) newFileGetter(id string) (drivers.FileGetCloser, error) { if getter, ok := r.driver.(drivers.DiffGetterDriver); ok { - return getter.DiffGetter(id) + fgc, err := getter.DiffGetter(id) + if err != nil { + return nil, err + } + if fgc != nil { + return fgc, nil + } } - path, err := r.Mount(id, drivers.MountOpts{}) + + path, err := r.Mount(id, drivers.MountOpts{Options: []string{"ro"}}) if err != nil { return nil, err } @@ -2197,6 +2319,25 @@ func (r *layerStore) DiffSize(from, to string) (size int64, err error) { return r.driver.DiffSize(to, r.layerMappings(toLayer), from, r.layerMappings(fromLayer), toLayer.MountLabel) } +func updateDigestMap(m *map[digest.Digest][]string, oldvalue, newvalue digest.Digest, id string) { + var newList []string + if oldvalue != "" { + for _, value := range (*m)[oldvalue] { + if value != id { + newList = append(newList, value) + } + } + if len(newList) > 0 { + (*m)[oldvalue] = newList + } else { + delete(*m, oldvalue) + } + } + if newvalue != "" { + (*m)[newvalue] = append((*m)[newvalue], id) + } +} + // Requires startWriting. func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error) { return r.applyDiffWithOptions(to, nil, diff) @@ -2233,7 +2374,7 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions, if layerOptions != nil && layerOptions.UncompressedDigest != "" && layerOptions.UncompressedDigest.Algorithm() == digest.Canonical { uncompressedDigest = layerOptions.UncompressedDigest - } else { + } else if compression != archive.Uncompressed { uncompressedDigester = digest.Canonical.Digester() } @@ -2312,28 +2453,17 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions, if uncompressedDigester != nil { uncompressedDigest = uncompressedDigester.Digest() } - - updateDigestMap := func(m *map[digest.Digest][]string, oldvalue, newvalue digest.Digest, id string) { - var newList []string - if oldvalue != "" { - for _, value := range (*m)[oldvalue] { - if value != id { - newList = append(newList, value) - } - } - if len(newList) > 0 { - (*m)[oldvalue] = newList - } else { - delete(*m, oldvalue) - } - } - if newvalue != "" { - (*m)[newvalue] = append((*m)[newvalue], id) - } + if uncompressedDigest == "" && compression == archive.Uncompressed { + uncompressedDigest = compressedDigest } + updateDigestMap(&r.bycompressedsum, layer.CompressedDigest, compressedDigest, layer.ID) layer.CompressedDigest = compressedDigest - layer.CompressedSize = compressedCounter.Count + if layerOptions != nil && layerOptions.OriginalDigest != "" && layerOptions.OriginalSize != nil { + layer.CompressedSize = *layerOptions.OriginalSize + } else { + layer.CompressedSize = compressedCounter.Count + } updateDigestMap(&r.byuncompressedsum, layer.UncompressedDigest, uncompressedDigest, layer.ID) layer.UncompressedDigest = uncompressedDigest layer.UncompressedSize = uncompressedCounter.Count @@ -2372,7 +2502,7 @@ func (r *layerStore) DifferTarget(id string) (string, error) { } // Requires startWriting. -func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error { +func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error { ddriver, ok := r.driver.(drivers.DriverWithDiffer) if !ok { return ErrNotSupported @@ -2382,20 +2512,39 @@ func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string, return ErrLayerUnknown } if options == nil { - options = &drivers.ApplyDiffOpts{ - Mappings: r.layerMappings(layer), - MountLabel: layer.MountLabel, + options = &drivers.ApplyDiffWithDifferOpts{ + ApplyDiffOpts: drivers.ApplyDiffOpts{ + Mappings: r.layerMappings(layer), + MountLabel: layer.MountLabel, + }, + Flags: nil, } } - err := ddriver.ApplyDiffFromStagingDirectory(layer.ID, layer.Parent, stagingDirectory, diffOutput, options) + + err := ddriver.ApplyDiffFromStagingDirectory(layer.ID, layer.Parent, diffOutput, options) if err != nil { return err } layer.UIDs = diffOutput.UIDs layer.GIDs = diffOutput.GIDs + updateDigestMap(&r.byuncompressedsum, layer.UncompressedDigest, diffOutput.UncompressedDigest, layer.ID) layer.UncompressedDigest = diffOutput.UncompressedDigest + updateDigestMap(&r.bytocsum, diffOutput.TOCDigest, diffOutput.TOCDigest, layer.ID) + layer.TOCDigest = diffOutput.TOCDigest layer.UncompressedSize = diffOutput.Size layer.Metadata = diffOutput.Metadata + if options != nil && options.Flags != nil { + if layer.Flags == nil { + layer.Flags = make(map[string]interface{}) + } + for k, v := range options.Flags { + layer.Flags[k] = v + } + } + if err = r.saveFor(layer); err != nil { + return err + } + if len(diffOutput.TarSplit) != 0 { tsdata := bytes.Buffer{} compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed) @@ -2425,14 +2574,11 @@ func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string, return err } } - if err = r.saveFor(layer); err != nil { - return err - } return err } // Requires startWriting. -func (r *layerStore) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) { +func (r *layerStore) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) { ddriver, ok := r.driver.(drivers.DriverWithDiffer) if !ok { return nil, ErrNotSupported @@ -2448,9 +2594,11 @@ func (r *layerStore) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOp return nil, ErrLayerUnknown } if options == nil { - options = &drivers.ApplyDiffOpts{ - Mappings: r.layerMappings(layer), - MountLabel: layer.MountLabel, + options = &drivers.ApplyDiffWithDifferOpts{ + ApplyDiffOpts: drivers.ApplyDiffOpts{ + Mappings: r.layerMappings(layer), + MountLabel: layer.MountLabel, + }, } } output, err := ddriver.ApplyDiffWithDiffer(layer.ID, layer.Parent, options, differ) @@ -2494,6 +2642,11 @@ func (r *layerStore) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error return r.layersByDigestMap(r.byuncompressedsum, d) } +// Requires startReading or startWriting. +func (r *layerStore) LayersByTOCDigest(d digest.Digest) ([]Layer, error) { + return r.layersByDigestMap(r.bytocsum, d) +} + func closeAll(closes ...func() error) (rErr error) { for _, f := range closes { if err := f(); err != nil { diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go index 05d25711..70f76d66 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive.go @@ -339,12 +339,43 @@ func (compression *Compression) Extension() string { return "" } +// nosysFileInfo hides the system-dependent info of the wrapped FileInfo to +// prevent tar.FileInfoHeader from introspecting it and potentially calling into +// glibc. +type nosysFileInfo struct { + os.FileInfo +} + +func (fi nosysFileInfo) Sys() interface{} { + // A Sys value of type *tar.Header is safe as it is system-independent. + // The tar.FileInfoHeader function copies the fields into the returned + // header without performing any OS lookups. + if sys, ok := fi.FileInfo.Sys().(*tar.Header); ok { + return sys + } + return nil +} + +// sysStatOverride, if non-nil, populates hdr from system-dependent fields of fi. +var sysStatOverride func(fi os.FileInfo, hdr *tar.Header) error + +func fileInfoHeaderNoLookups(fi os.FileInfo, link string) (*tar.Header, error) { + if sysStatOverride == nil { + return tar.FileInfoHeader(fi, link) + } + hdr, err := tar.FileInfoHeader(nosysFileInfo{fi}, link) + if err != nil { + return nil, err + } + return hdr, sysStatOverride(fi, hdr) +} + // FileInfoHeader creates a populated Header from fi. // Compared to archive pkg this function fills in more information. // Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), // which have been deleted since Go 1.9 archive/tar. func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { - hdr, err := tar.FileInfoHeader(fi, link) + hdr, err := fileInfoHeaderNoLookups(fi, link) if err != nil { return nil, err } @@ -385,7 +416,7 @@ func ReadUserXattrToTarHeader(path string, hdr *tar.Header) error { return err } for _, key := range xattrs { - if strings.HasPrefix(key, "user.") { + if strings.HasPrefix(key, "user.") && !strings.HasPrefix(key, "user.overlay.") { value, err := system.Lgetxattr(path, key) if err != nil { if errors.Is(err, system.E2BIG) { @@ -477,7 +508,7 @@ func (ta *tarAppender) addTarFile(path, name string) error { } } if fi.Mode()&os.ModeSocket != 0 { - logrus.Warnf("archive: skipping %q since it is a socket", path) + logrus.Infof("archive: skipping %q since it is a socket", path) return nil } @@ -534,6 +565,10 @@ func (ta *tarAppender) addTarFile(path, name string) error { if ta.ChownOpts != nil { hdr.Uid = ta.ChownOpts.UID hdr.Gid = ta.ChownOpts.GID + // Don’t expose the user names from the local system; they probably don’t match the ta.ChownOpts value anyway, + // and they unnecessarily give recipients of the tar file potentially private data. + hdr.Uname = "" + hdr.Gname = "" } maybeTruncateHeaderModTime(hdr) diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go index 88192f22..c6811031 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go @@ -15,6 +15,31 @@ import ( "golang.org/x/sys/unix" ) +func init() { + sysStatOverride = statUnix +} + +// statUnix populates hdr from system-dependent fields of fi without performing +// any OS lookups. +// Adapted from Moby. +func statUnix(fi os.FileInfo, hdr *tar.Header) error { + s, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return nil + } + + hdr.Uid = int(s.Uid) + hdr.Gid = int(s.Gid) + + if s.Mode&unix.S_IFBLK != 0 || + s.Mode&unix.S_IFCHR != 0 { + hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) //nolint: unconvert + hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) //nolint: unconvert + } + + return nil +} + // fixVolumePathPrefix does platform specific processing to ensure that if // the path being passed in is not in a volume path format, convert it to one. func fixVolumePathPrefix(srcPath string) string { diff --git a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go index 5d4befc2..1e3ad86d 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go @@ -25,7 +25,7 @@ import ( const ( cacheKey = "chunked-manifest-cache" - cacheVersion = 1 + cacheVersion = 2 digestSha256Empty = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" ) @@ -207,9 +207,9 @@ func calculateHardLinkFingerprint(f *internal.FileMetadata) (string, error) { return string(digester.Digest()), nil } -// generateFileLocation generates a file location in the form $OFFSET@$PATH -func generateFileLocation(path string, offset uint64) []byte { - return []byte(fmt.Sprintf("%d@%s", offset, path)) +// generateFileLocation generates a file location in the form $OFFSET:$LEN:$PATH +func generateFileLocation(path string, offset, len uint64) []byte { + return []byte(fmt.Sprintf("%d:%d:%s", offset, len, path)) } // generateTag generates a tag in the form $DIGEST$OFFSET@LEN. @@ -245,7 +245,7 @@ func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id strin var tags []string for _, k := range toc { if k.Digest != "" { - location := generateFileLocation(k.Name, 0) + location := generateFileLocation(k.Name, 0, uint64(k.Size)) off := uint64(vdata.Len()) l := uint64(len(location)) @@ -276,7 +276,7 @@ func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id strin digestLen = len(k.Digest) } if k.ChunkDigest != "" { - location := generateFileLocation(k.Name, uint64(k.ChunkOffset)) + location := generateFileLocation(k.Name, uint64(k.ChunkOffset), uint64(k.ChunkSize)) off := uint64(vdata.Len()) l := uint64(len(location)) d := generateTag(k.ChunkDigest, off, l) @@ -490,7 +490,9 @@ func findTag(digest string, metadata *metadata) (string, uint64, uint64) { if digest == d { startOff := i*metadata.tagLen + metadata.digestLen parts := strings.Split(string(metadata.tags[startOff:(i+1)*metadata.tagLen]), "@") + off, _ := strconv.ParseInt(parts[0], 10, 64) + len, _ := strconv.ParseInt(parts[1], 10, 64) return digest, uint64(off), uint64(len) } @@ -507,12 +509,16 @@ func (c *layersCache) findDigestInternal(digest string) (string, string, int64, defer c.mutex.RUnlock() for _, layer := range c.layers { - digest, off, len := findTag(digest, layer.metadata) + digest, off, tagLen := findTag(digest, layer.metadata) if digest != "" { - position := string(layer.metadata.vdata[off : off+len]) - parts := strings.SplitN(position, "@", 2) + position := string(layer.metadata.vdata[off : off+tagLen]) + parts := strings.SplitN(position, ":", 3) + if len(parts) != 3 { + continue + } offFile, _ := strconv.ParseInt(parts[0], 10, 64) - return layer.target, parts[1], offFile, nil + // parts[1] is the chunk length, currently unused. + return layer.target, parts[2], offFile, nil } } @@ -578,7 +584,10 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) { return byteSliceAsString(buf.Bytes()[from:to]) } - iter = jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest) + pool := iter.Pool() + pool.ReturnIterator(iter) + iter = pool.BorrowIterator(manifest) + for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { if strings.ToLower(field) == "version" { toc.Version = iter.ReadInt() @@ -657,8 +666,17 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) { } toc.Entries = append(toc.Entries, m) } - break } + + // validate there is no extra data in the provided input. This is a security measure to avoid + // that the digest we calculate for the TOC refers to the entire document. + if iter.Error != nil && iter.Error != io.EOF { + return nil, iter.Error + } + if iter.WhatIsNext() != jsoniter.InvalidValue || !errors.Is(iter.Error, io.EOF) { + return nil, fmt.Errorf("unexpected data after manifest") + } + toc.StringsBuf = buf return &toc, nil } diff --git a/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go b/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go index 1d8141e3..112ca2c7 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go @@ -257,8 +257,8 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, ann return decodedBlob, decodedTarSplit, int64(footerData.Offset), err } -func decodeAndValidateBlob(blob []byte, lengthUncompressed uint64, expectedUncompressedChecksum string) ([]byte, error) { - d, err := digest.Parse(expectedUncompressedChecksum) +func decodeAndValidateBlob(blob []byte, lengthUncompressed uint64, expectedCompressedChecksum string) ([]byte, error) { + d, err := digest.Parse(expectedCompressedChecksum) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go index ca7ce30f..a2fd904c 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go +++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go @@ -420,6 +420,14 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r zstdWriter.Close() return err } + + // make sure the entire tarball is flushed to the output as it might contain + // some trailing zeros that affect the checksum. + if _, err := io.Copy(zstdWriter, its); err != nil { + zstdWriter.Close() + return err + } + if err := zstdWriter.Flush(); err != nil { zstdWriter.Close() return err @@ -452,12 +460,12 @@ type zstdChunkedWriter struct { } func (w zstdChunkedWriter) Close() error { - err := <-w.tarSplitErr - if err != nil { - w.tarSplitOut.Close() + errClose := w.tarSplitOut.Close() + + if err := <-w.tarSplitErr; err != nil && err != io.EOF { return err } - return w.tarSplitOut.Close() + return errClose } func (w zstdChunkedWriter) Write(p []byte) (int, error) { diff --git a/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go b/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go index a0892803..d3c105c4 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go +++ b/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go @@ -4,6 +4,7 @@ import ( "bufio" "fmt" "io" + "path/filepath" "strings" "time" "unicode" @@ -93,13 +94,18 @@ func getStMode(mode uint32, typ string) (uint32, error) { return mode, nil } -func dumpNode(out io.Writer, links map[string]int, verityDigests map[string]string, entry *internal.FileMetadata) error { - path := entry.Name - if path == "" { +func sanitizeName(name string) string { + path := filepath.Clean(name) + if path == "." { path = "/" } else if path[0] != '/' { path = "/" + path } + return path +} + +func dumpNode(out io.Writer, links map[string]int, verityDigests map[string]string, entry *internal.FileMetadata) error { + path := sanitizeName(entry.Name) if _, err := fmt.Fprint(out, escaped(path, ESCAPE_STANDARD)); err != nil { return err @@ -133,9 +139,10 @@ func dumpNode(out io.Writer, links map[string]int, verityDigests map[string]stri var payload string if entry.Linkname != "" { - payload = entry.Linkname - if entry.Type == internal.TypeLink && payload[0] != '/' { - payload = "/" + payload + if entry.Type == internal.TypeSymlink { + payload = entry.Linkname + } else { + payload = sanitizeName(entry.Linkname) } } else { if len(entry.Digest) > 10 { @@ -198,10 +205,13 @@ func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader, if e.Linkname == "" { continue } + if e.Type == internal.TypeSymlink { + continue + } links[e.Linkname] = links[e.Linkname] + 1 } - if len(toc.Entries) == 0 || (toc.Entries[0].Name != "" && toc.Entries[0].Name != "/") { + if len(toc.Entries) == 0 || (sanitizeName(toc.Entries[0].Name) != "/") { root := &internal.FileMetadata{ Name: "/", Type: internal.TypeDir, diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go index 8493a2c1..f300df34 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go @@ -25,6 +25,7 @@ import ( "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/chunked/compressor" "github.com/containers/storage/pkg/chunked/internal" + "github.com/containers/storage/pkg/fsverity" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/system" "github.com/containers/storage/types" @@ -40,12 +41,14 @@ import ( const ( maxNumberMissingChunks = 1024 + autoMergePartsThreshold = 128 // if the gap between two ranges is below this threshold, automatically merge them. newFileFlags = (unix.O_CREAT | unix.O_TRUNC | unix.O_EXCL | unix.O_WRONLY) containersOverrideXattr = "user.containers.override_stat" bigDataKey = "zstd-chunked-manifest" chunkedData = "zstd-chunked-data" chunkedLayerDataKey = "zstd-chunked-layer-data" tocKey = "toc" + fsVerityDigestsKey = "fs-verity-digests" fileTypeZstdChunked = iota fileTypeEstargz @@ -71,11 +74,9 @@ type chunkedDiffer struct { zstdReader *zstd.Decoder rawReader io.Reader - // contentDigest is the digest of the uncompressed content - // (diffID) when the layer is fully retrieved. If the layer - // is not fully retrieved, instead of using the digest of the - // uncompressed content, it refers to the digest of the TOC. - contentDigest digest.Digest + // tocDigest is the digest of the TOC document when the layer + // is partially pulled. + tocDigest digest.Digest // convertedToZstdChunked is set to true if the layer needs to // be converted to the zstd:chunked format before it can be @@ -86,9 +87,18 @@ type chunkedDiffer struct { // the layer are trusted and should not be validated. skipValidation bool + // blobDigest is the digest of the whole compressed layer. It is used if + // convertToZstdChunked to validate a layer when it is converted since there + // is no TOC referenced by the manifest. + blobDigest digest.Digest + blobSize int64 storeOpts *types.StoreOptions + + useFsVerity graphdriver.DifferFsVerity + fsVerityDigests map[string]string + fsVerityMutex sync.Mutex } var xattrsToIgnore = map[string]interface{}{ @@ -188,33 +198,7 @@ func (f *seekableFile) GetBlobAt(chunks []ImageSourceChunk) (chan io.ReadCloser, return streams, errs, nil } -func convertTarToZstdChunked(destDirectory string, blobSize int64, iss ImageSourceSeekable) (*seekableFile, digest.Digest, map[string]string, error) { - var payload io.ReadCloser - var streams chan io.ReadCloser - var errs chan error - var err error - - chunksToRequest := []ImageSourceChunk{ - { - Offset: 0, - Length: uint64(blobSize), - }, - } - - streams, errs, err = iss.GetBlobAt(chunksToRequest) - if err != nil { - return nil, "", nil, err - } - select { - case p := <-streams: - payload = p - case err := <-errs: - return nil, "", nil, err - } - if payload == nil { - return nil, "", nil, errors.New("invalid stream returned") - } - +func convertTarToZstdChunked(destDirectory string, payload *os.File) (*seekableFile, digest.Digest, map[string]string, error) { diff, err := archive.DecompressStream(payload) if err != nil { return nil, "", nil, err @@ -235,10 +219,8 @@ func convertTarToZstdChunked(destDirectory string, blobSize int64, iss ImageSour return nil, "", nil, err } - digester := digest.Canonical.Digester() - hash := digester.Hash() - - if _, err := io.Copy(io.MultiWriter(chunked, hash), diff); err != nil { + convertedOutputDigester := digest.Canonical.Digester() + if _, err := io.Copy(io.MultiWriter(chunked, convertedOutputDigester.Hash()), diff); err != nil { f.Close() return nil, "", nil, err } @@ -249,27 +231,39 @@ func convertTarToZstdChunked(destDirectory string, blobSize int64, iss ImageSour is := seekableFile{ file: f, } - return &is, digester.Digest(), newAnnotations, nil + + return &is, convertedOutputDigester.Digest(), newAnnotations, nil } // GetDiffer returns a differ than can be used with ApplyDiffWithDiffer. -func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) { - storeOpts, err := types.DefaultStoreOptionsAutoDetectUID() +func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) { + storeOpts, err := types.DefaultStoreOptions() if err != nil { return nil, err } - if _, ok := annotations[internal.ManifestChecksumKey]; ok { + if !parseBooleanPullOption(&storeOpts, "enable_partial_images", true) { + return nil, errors.New("enable_partial_images not configured") + } + + _, hasZstdChunkedTOC := annotations[internal.ManifestChecksumKey] + _, hasEstargzTOC := annotations[estargz.TOCJSONDigestAnnotation] + + if hasZstdChunkedTOC && hasEstargzTOC { + return nil, errors.New("both zstd:chunked and eStargz TOC found") + } + + if hasZstdChunkedTOC { return makeZstdChunkedDiffer(ctx, store, blobSize, annotations, iss, &storeOpts) } - if _, ok := annotations[estargz.TOCJSONDigestAnnotation]; ok { + if hasEstargzTOC { return makeEstargzChunkedDiffer(ctx, store, blobSize, annotations, iss, &storeOpts) } - return makeConvertFromRawDiffer(ctx, store, blobSize, annotations, iss, &storeOpts) + return makeConvertFromRawDiffer(ctx, store, blobDigest, blobSize, annotations, iss, &storeOpts) } -func makeConvertFromRawDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) { +func makeConvertFromRawDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) { if !parseBooleanPullOption(storeOpts, "convert_images", false) { return nil, errors.New("convert_images not configured") } @@ -280,6 +274,8 @@ func makeConvertFromRawDiffer(ctx context.Context, store storage.Store, blobSize } return &chunkedDiffer{ + fsVerityDigests: make(map[string]string), + blobDigest: blobDigest, blobSize: blobSize, convertToZstdChunked: true, copyBuffer: makeCopyBuffer(), @@ -299,22 +295,23 @@ func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize in return nil, err } - contentDigest, err := digest.Parse(annotations[internal.ManifestChecksumKey]) + tocDigest, err := digest.Parse(annotations[internal.ManifestChecksumKey]) if err != nil { return nil, fmt.Errorf("parse TOC digest %q: %w", annotations[internal.ManifestChecksumKey], err) } return &chunkedDiffer{ - blobSize: blobSize, - contentDigest: contentDigest, - copyBuffer: makeCopyBuffer(), - fileType: fileTypeZstdChunked, - layersCache: layersCache, - manifest: manifest, - storeOpts: storeOpts, - stream: iss, - tarSplit: tarSplit, - tocOffset: tocOffset, + fsVerityDigests: make(map[string]string), + blobSize: blobSize, + tocDigest: tocDigest, + copyBuffer: makeCopyBuffer(), + fileType: fileTypeZstdChunked, + layersCache: layersCache, + manifest: manifest, + storeOpts: storeOpts, + stream: iss, + tarSplit: tarSplit, + tocOffset: tocOffset, }, nil } @@ -328,21 +325,22 @@ func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize return nil, err } - contentDigest, err := digest.Parse(annotations[estargz.TOCJSONDigestAnnotation]) + tocDigest, err := digest.Parse(annotations[estargz.TOCJSONDigestAnnotation]) if err != nil { return nil, fmt.Errorf("parse TOC digest %q: %w", annotations[estargz.TOCJSONDigestAnnotation], err) } return &chunkedDiffer{ - blobSize: blobSize, - contentDigest: contentDigest, - copyBuffer: makeCopyBuffer(), - fileType: fileTypeEstargz, - layersCache: layersCache, - manifest: manifest, - storeOpts: storeOpts, - stream: iss, - tocOffset: tocOffset, + fsVerityDigests: make(map[string]string), + blobSize: blobSize, + tocDigest: tocDigest, + copyBuffer: makeCopyBuffer(), + fileType: fileTypeEstargz, + layersCache: layersCache, + manifest: manifest, + storeOpts: storeOpts, + stream: iss, + tocOffset: tocOffset, }, nil } @@ -939,6 +937,8 @@ func (c *chunkedDiffer) appendCompressedStreamToFile(compression compressedFileT return nil } +type recordFsVerityFunc func(string, *os.File) error + type destinationFile struct { digester digest.Digester dirfd int @@ -948,9 +948,10 @@ type destinationFile struct { options *archive.TarOptions skipValidation bool to io.Writer + recordFsVerity recordFsVerityFunc } -func openDestinationFile(dirfd int, metadata *internal.FileMetadata, options *archive.TarOptions, skipValidation bool) (*destinationFile, error) { +func openDestinationFile(dirfd int, metadata *internal.FileMetadata, options *archive.TarOptions, skipValidation bool, recordFsVerity recordFsVerityFunc) (*destinationFile, error) { file, err := openFileUnderRoot(metadata.Name, dirfd, newFileFlags, 0) if err != nil { return nil, err @@ -977,15 +978,32 @@ func openDestinationFile(dirfd int, metadata *internal.FileMetadata, options *ar options: options, dirfd: dirfd, skipValidation: skipValidation, + recordFsVerity: recordFsVerity, }, nil } func (d *destinationFile) Close() (Err error) { defer func() { - err := d.file.Close() + var roFile *os.File + var err error + + if d.recordFsVerity != nil { + roFile, err = reopenFileReadOnly(d.file) + if err == nil { + defer roFile.Close() + } else if Err == nil { + Err = err + } + } + + err = d.file.Close() if Err == nil { Err = err } + + if Err == nil && roFile != nil { + Err = d.recordFsVerity(d.metadata.Name, roFile) + } }() if !d.skipValidation { @@ -1008,6 +1026,35 @@ func closeDestinationFiles(files chan *destinationFile, errors chan error) { close(errors) } +func (c *chunkedDiffer) recordFsVerity(path string, roFile *os.File) error { + if c.useFsVerity == graphdriver.DifferFsVerityDisabled { + return nil + } + // fsverity.EnableVerity doesn't return an error if fs-verity was already + // enabled on the file. + err := fsverity.EnableVerity(path, int(roFile.Fd())) + if err != nil { + if c.useFsVerity == graphdriver.DifferFsVerityRequired { + return err + } + + // If it is not required, ignore the error if the filesystem does not support it. + if errors.Is(err, unix.ENOTSUP) || errors.Is(err, unix.ENOTTY) { + return nil + } + } + verity, err := fsverity.MeasureVerity(path, int(roFile.Fd())) + if err != nil { + return err + } + + c.fsVerityMutex.Lock() + c.fsVerityDigests[path] = verity + c.fsVerityMutex.Unlock() + + return nil +} + func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan error, dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) (Err error) { var destFile *destinationFile @@ -1095,7 +1142,11 @@ func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan } filesToClose <- destFile } - destFile, err = openDestinationFile(dirfd, mf.File, options, c.skipValidation) + recordFsVerity := c.recordFsVerity + if c.useFsVerity == graphdriver.DifferFsVerityDisabled { + recordFsVerity = nil + } + destFile, err = openDestinationFile(dirfd, mf.File, options, c.skipValidation, recordFsVerity) if err != nil { Err = err goto exit @@ -1130,22 +1181,12 @@ func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan } func mergeMissingChunks(missingParts []missingPart, target int) []missingPart { - getGap := func(missingParts []missingPart, i int) int { + getGap := func(missingParts []missingPart, i int) uint64 { prev := missingParts[i-1].SourceChunk.Offset + missingParts[i-1].SourceChunk.Length - return int(missingParts[i].SourceChunk.Offset - prev) - } - getCost := func(missingParts []missingPart, i int) int { - cost := getGap(missingParts, i) - if missingParts[i-1].OriginFile != nil { - cost += int(missingParts[i-1].SourceChunk.Length) - } - if missingParts[i].OriginFile != nil { - cost += int(missingParts[i].SourceChunk.Length) - } - return cost + return missingParts[i].SourceChunk.Offset - prev } - // simple case: merge chunks from the same file. + // simple case: merge chunks from the same file. Useful to reduce the number of parts to work with later. newMissingParts := missingParts[0:1] prevIndex := 0 for i := 1; i < len(missingParts); i++ { @@ -1165,28 +1206,50 @@ func mergeMissingChunks(missingParts []missingPart, target int) []missingPart { } missingParts = newMissingParts - if len(missingParts) <= target { - return missingParts - } - - // this implementation doesn't account for duplicates, so it could merge - // more than necessary to reach the specified target. Since target itself - // is a heuristic value, it doesn't matter. - costs := make([]int, len(missingParts)-1) - for i := 1; i < len(missingParts); i++ { - costs[i-1] = getCost(missingParts, i) + type gap struct { + from int + to int + cost uint64 + } + var requestGaps []gap + lastOffset := int(-1) + numberSourceChunks := 0 + for i, c := range missingParts { + if c.OriginFile != nil || c.Hole { + // it does not require a network request + continue + } + numberSourceChunks++ + if lastOffset >= 0 { + prevEnd := missingParts[lastOffset].SourceChunk.Offset + missingParts[lastOffset].SourceChunk.Length + cost := c.SourceChunk.Offset - prevEnd + g := gap{ + from: lastOffset, + to: i, + cost: cost, + } + requestGaps = append(requestGaps, g) + } + lastOffset = i } - sort.Ints(costs) - - toShrink := len(missingParts) - target - if toShrink >= len(costs) { - toShrink = len(costs) - 1 + sort.Slice(requestGaps, func(i, j int) bool { + return requestGaps[i].cost < requestGaps[j].cost + }) + toMergeMap := make([]bool, len(missingParts)) + remainingToMerge := numberSourceChunks - target + for _, g := range requestGaps { + if remainingToMerge < 0 && g.cost > autoMergePartsThreshold { + continue + } + for i := g.from + 1; i <= g.to; i++ { + toMergeMap[i] = true + } + remainingToMerge-- } - targetValue := costs[toShrink] newMissingParts = missingParts[0:1] for i := 1; i < len(missingParts); i++ { - if getCost(missingParts, i) > targetValue { + if !toMergeMap[i] { newMissingParts = append(newMissingParts, missingParts[i]) } else { gap := getGap(missingParts, i) @@ -1218,6 +1281,7 @@ func (c *chunkedDiffer) retrieveMissingFiles(stream ImageSourceSeekable, dest st } } + missingParts = mergeMissingChunks(missingParts, maxNumberMissingChunks) calculateChunksToRequest() // There are some missing files. Prepare a multirange request for the missing chunks. @@ -1231,14 +1295,13 @@ func (c *chunkedDiffer) retrieveMissingFiles(stream ImageSourceSeekable, dest st } if _, ok := err.(ErrBadRequest); ok { - requested := len(missingParts) // If the server cannot handle at least 64 chunks in a single request, just give up. - if requested < 64 { + if len(chunksToRequest) < 64 { return err } // Merge more chunks to request - missingParts = mergeMissingChunks(missingParts, requested/2) + missingParts = mergeMissingChunks(missingParts, len(chunksToRequest)/2) calculateChunksToRequest() continue } @@ -1426,15 +1489,39 @@ type findAndCopyFileOptions struct { options *archive.TarOptions } +func reopenFileReadOnly(f *os.File) (*os.File, error) { + path := fmt.Sprintf("/proc/self/fd/%d", f.Fd()) + fd, err := unix.Open(path, unix.O_RDONLY|unix.O_CLOEXEC, 0) + if err != nil { + return nil, err + } + return os.NewFile(uintptr(fd), f.Name()), nil +} + func (c *chunkedDiffer) findAndCopyFile(dirfd int, r *internal.FileMetadata, copyOptions *findAndCopyFileOptions, mode os.FileMode) (bool, error) { finalizeFile := func(dstFile *os.File) error { - if dstFile != nil { - defer dstFile.Close() - if err := setFileAttrs(dirfd, dstFile, mode, r, copyOptions.options, false); err != nil { - return err - } + if dstFile == nil { + return nil } - return nil + err := setFileAttrs(dirfd, dstFile, mode, r, copyOptions.options, false) + if err != nil { + dstFile.Close() + return err + } + var roFile *os.File + if c.useFsVerity != graphdriver.DifferFsVerityDisabled { + roFile, err = reopenFileReadOnly(dstFile) + } + dstFile.Close() + if err != nil { + return err + } + if roFile == nil { + return nil + } + + defer roFile.Close() + return c.recordFsVerity(r.Name, roFile) } found, dstFile, _, err := findFileInOtherLayers(c.layersCache, r, dirfd, copyOptions.useHardLinks) @@ -1491,6 +1578,43 @@ func makeEntriesFlat(mergedEntries []internal.FileMetadata) ([]internal.FileMeta return new, nil } +func (c *chunkedDiffer) copyAllBlobToFile(destination *os.File) (digest.Digest, error) { + var payload io.ReadCloser + var streams chan io.ReadCloser + var errs chan error + var err error + + chunksToRequest := []ImageSourceChunk{ + { + Offset: 0, + Length: uint64(c.blobSize), + }, + } + + streams, errs, err = c.stream.GetBlobAt(chunksToRequest) + if err != nil { + return "", err + } + select { + case p := <-streams: + payload = p + case err := <-errs: + return "", err + } + if payload == nil { + return "", errors.New("invalid stream returned") + } + + originalRawDigester := digest.Canonical.Digester() + + r := io.TeeReader(payload, originalRawDigester.Hash()) + + // copy the entire tarball and compute its digest + _, err = io.Copy(destination, r) + + return originalRawDigester.Digest(), err +} + func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, differOpts *graphdriver.DifferOptions) (graphdriver.DriverWithDifferOutput, error) { defer c.layersCache.release() defer func() { @@ -1499,11 +1623,40 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff } }() + c.useFsVerity = differOpts.UseFsVerity + // stream to use for reading the zstd:chunked or Estargz file. stream := c.stream + var uncompressedDigest digest.Digest + if c.convertToZstdChunked { - fileSource, diffID, annotations, err := convertTarToZstdChunked(dest, c.blobSize, c.stream) + fd, err := unix.Open(dest, unix.O_TMPFILE|unix.O_RDWR|unix.O_CLOEXEC, 0o600) + if err != nil { + return graphdriver.DriverWithDifferOutput{}, err + } + blobFile := os.NewFile(uintptr(fd), "blob-file") + defer func() { + if blobFile != nil { + blobFile.Close() + } + }() + + // calculate the checksum before accessing the file. + compressedDigest, err := c.copyAllBlobToFile(blobFile) + if err != nil { + return graphdriver.DriverWithDifferOutput{}, err + } + + if compressedDigest != c.blobDigest { + return graphdriver.DriverWithDifferOutput{}, fmt.Errorf("invalid digest to convert: expected %q, got %q", c.blobDigest, compressedDigest) + } + + if _, err := blobFile.Seek(0, io.SeekStart); err != nil { + return graphdriver.DriverWithDifferOutput{}, err + } + + fileSource, diffID, annotations, err := convertTarToZstdChunked(dest, blobFile) if err != nil { return graphdriver.DriverWithDifferOutput{}, err } @@ -1511,6 +1664,10 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff // need to keep it open until the entire file is processed. defer fileSource.Close() + // Close the file so that the file descriptor is released and the file is deleted. + blobFile.Close() + blobFile = nil + manifest, tarSplit, tocOffset, err := readZstdChunkedManifest(fileSource, c.blobSize, annotations) if err != nil { return graphdriver.DriverWithDifferOutput{}, fmt.Errorf("read zstd:chunked manifest: %w", err) @@ -1523,12 +1680,12 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff c.fileType = fileTypeZstdChunked c.manifest = manifest c.tarSplit = tarSplit - // since we retrieved the whole file and it was validated, use the diffID instead of the TOC digest. - c.contentDigest = diffID c.tocOffset = tocOffset // the file was generated by us and the digest for each file was already computed, no need to validate it again. c.skipValidation = true + // since we retrieved the whole file and it was validated, set the uncompressed digest. + uncompressedDigest = diffID } lcd := chunkedLayerData{ @@ -1557,11 +1714,8 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff Artifacts: map[string]interface{}{ tocKey: toc, }, - TOCDigest: c.contentDigest, - } - - if !parseBooleanPullOption(c.storeOpts, "enable_partial_images", false) { - return output, errors.New("enable_partial_images not configured") + TOCDigest: c.tocDigest, + UncompressedDigest: uncompressedDigest, } // When the hard links deduplication is used, file attributes are ignored because setting them @@ -1678,13 +1832,17 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff mode := os.FileMode(r.Mode) - r.Name = filepath.Clean(r.Name) - r.Linkname = filepath.Clean(r.Linkname) - t, err := typeToTarType(r.Type) if err != nil { return output, err } + + r.Name = filepath.Clean(r.Name) + // do not modify the value of symlinks + if r.Linkname != "" && t != tar.TypeSymlink { + r.Linkname = filepath.Clean(r.Linkname) + } + if whiteoutConverter != nil { hdr := archivetar.Header{ Typeflag: t, @@ -1730,6 +1888,9 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff } case tar.TypeDir: + if r.Name == "" || r.Name == "." { + output.RootDirMode = &mode + } if err := safeMkdir(dirfd, mode, r.Name, &r, options); err != nil { return output, err } @@ -1851,7 +2012,6 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff } // There are some missing files. Prepare a multirange request for the missing chunks. if len(missingParts) > 0 { - missingParts = mergeMissingChunks(missingParts, maxNumberMissingChunks) if err := c.retrieveMissingFiles(stream, dest, dirfd, missingParts, options); err != nil { return output, err } @@ -1867,6 +2027,8 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff logrus.Debugf("Missing %d bytes out of %d (%.2f %%)", missingPartsSize, totalChunksSize, float32(missingPartsSize*100.0)/float32(totalChunksSize)) } + output.Artifacts[fsVerityDigestsKey] = c.fsVerityDigests + return output, nil } @@ -1926,7 +2088,10 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i e.Chunks = make([]*internal.FileMetadata, nChunks+1) for j := 0; j <= nChunks; j++ { - e.Chunks[j] = &entries[i+j] + // we need a copy here, otherwise we override the + // .Size later + copy := entries[i+j] + e.Chunks[j] = © e.EndOffset = entries[i+j].EndOffset } i += nChunks diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go b/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go index 8d3fcf2b..ac6bdfec 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go @@ -9,9 +9,10 @@ import ( storage "github.com/containers/storage" graphdriver "github.com/containers/storage/drivers" + digest "github.com/opencontainers/go-digest" ) // GetDiffer returns a differ than can be used with ApplyDiffWithDiffer. -func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) { +func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) { return nil, errors.New("format not supported on this system") } diff --git a/vendor/github.com/containers/storage/pkg/chunked/toc/toc.go b/vendor/github.com/containers/storage/pkg/chunked/toc/toc.go new file mode 100644 index 00000000..6fbaa41b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/toc/toc.go @@ -0,0 +1,41 @@ +package toc + +import ( + "errors" + + "github.com/containers/storage/pkg/chunked/internal" + digest "github.com/opencontainers/go-digest" +) + +// tocJSONDigestAnnotation is the annotation key for the digest of the estargz +// TOC JSON. +// It is defined in github.com/containerd/stargz-snapshotter/estargz as TOCJSONDigestAnnotation +// Duplicate it here to avoid a dependency on the package. +const tocJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest" + +// GetTOCDigest returns the digest of the TOC as recorded in the annotations. +// This function retrieves a digest that represents the content of a +// table of contents (TOC) from the image's annotations. +// This is an experimental feature and may be changed/removed in the future. +func GetTOCDigest(annotations map[string]string) (*digest.Digest, error) { + d1, ok1 := annotations[tocJSONDigestAnnotation] + d2, ok2 := annotations[internal.ManifestChecksumKey] + switch { + case ok1 && ok2: + return nil, errors.New("both zstd:chunked and eStargz TOC found") + case ok1: + d, err := digest.Parse(d1) + if err != nil { + return nil, err + } + return &d, nil + case ok2: + d, err := digest.Parse(d2) + if err != nil { + return nil, err + } + return &d, nil + default: + return nil, nil + } +} diff --git a/vendor/github.com/containers/storage/pkg/config/config.go b/vendor/github.com/containers/storage/pkg/config/config.go index 20d72ca8..febe8a0c 100644 --- a/vendor/github.com/containers/storage/pkg/config/config.go +++ b/vendor/github.com/containers/storage/pkg/config/config.go @@ -97,6 +97,8 @@ type OverlayOptionsConfig struct { Inodes string `toml:"inodes,omitempty"` // Do not create a bind mount on the storage home SkipMountHome string `toml:"skip_mount_home,omitempty"` + // Specify whether composefs must be used to mount the data layers + UseComposefs string `toml:"use_composefs,omitempty"` // ForceMask indicates the permissions mask (e.g. "0755") to use for new // files and directories ForceMask string `toml:"force_mask,omitempty"` @@ -147,6 +149,9 @@ type OptionsConfig struct { // ignored when building an image. IgnoreChownErrors string `toml:"ignore_chown_errors,omitempty"` + // Specify whether composefs must be used to mount the data layers + UseComposefs string `toml:"use_composefs,omitempty"` + // ForceMask indicates the permissions mask (e.g. "0755") to use for new // files and directories. ForceMask os.FileMode `toml:"force_mask,omitempty"` @@ -283,6 +288,7 @@ func GetGraphDriverOptions(driverName string, options OptionsConfig) []string { } case "overlay", "overlay2": + // Specify whether composefs must be used to mount the data layers if options.Overlay.IgnoreChownErrors != "" { doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.Overlay.IgnoreChownErrors)) } else if options.IgnoreChownErrors != "" { @@ -316,6 +322,9 @@ func GetGraphDriverOptions(driverName string, options OptionsConfig) []string { } else if options.ForceMask != 0 { doptions = append(doptions, fmt.Sprintf("%s.force_mask=%s", driverName, options.ForceMask)) } + if options.Overlay.UseComposefs != "" { + doptions = append(doptions, fmt.Sprintf("%s.use_composefs=%s", driverName, options.Overlay.UseComposefs)) + } case "vfs": if options.Vfs.IgnoreChownErrors != "" { doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.Vfs.IgnoreChownErrors)) diff --git a/vendor/github.com/containers/storage/pkg/fsverity/fsverity_linux.go b/vendor/github.com/containers/storage/pkg/fsverity/fsverity_linux.go new file mode 100644 index 00000000..5b21c4b7 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fsverity/fsverity_linux.go @@ -0,0 +1,45 @@ +package fsverity + +import ( + "errors" + "fmt" + "syscall" + "unsafe" + + "golang.org/x/sys/unix" +) + +// verityDigest struct represents the digest used for verifying the integrity of a file. +type verityDigest struct { + Fsv unix.FsverityDigest + Buf [64]byte +} + +// EnableVerity enables the verity feature on a file represented by the file descriptor 'fd'. The file must be opened +// in read-only mode. +// The 'description' parameter is a human-readable description of the file. +func EnableVerity(description string, fd int) error { + enableArg := unix.FsverityEnableArg{ + Version: 1, + Hash_algorithm: unix.FS_VERITY_HASH_ALG_SHA256, + Block_size: 4096, + } + + _, _, e1 := syscall.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.FS_IOC_ENABLE_VERITY), uintptr(unsafe.Pointer(&enableArg))) + if e1 != 0 && !errors.Is(e1, unix.EEXIST) { + return fmt.Errorf("failed to enable verity for %q: %w", description, e1) + } + return nil +} + +// MeasureVerity measures and returns the verity digest for the file represented by 'fd'. +// The 'description' parameter is a human-readable description of the file. +func MeasureVerity(description string, fd int) (string, error) { + var digest verityDigest + digest.Fsv.Size = 64 + _, _, e1 := syscall.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.FS_IOC_MEASURE_VERITY), uintptr(unsafe.Pointer(&digest))) + if e1 != 0 { + return "", fmt.Errorf("failed to measure verity for %q: %w", description, e1) + } + return fmt.Sprintf("%x", digest.Buf[:digest.Fsv.Size]), nil +} diff --git a/vendor/github.com/containers/storage/pkg/fsverity/fsverity_unsupported.go b/vendor/github.com/containers/storage/pkg/fsverity/fsverity_unsupported.go new file mode 100644 index 00000000..46e68c57 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fsverity/fsverity_unsupported.go @@ -0,0 +1,21 @@ +//go:build !linux +// +build !linux + +package fsverity + +import ( + "fmt" +) + +// EnableVerity enables the verity feature on a file represented by the file descriptor 'fd'. The file must be opened +// in read-only mode. +// The 'description' parameter is a human-readable description of the file. +func EnableVerity(description string, fd int) error { + return fmt.Errorf("fs-verity is not supported on this platform") +} + +// MeasureVerity measures and returns the verity digest for the file represented by 'fd'. +// The 'description' parameter is a human-readable description of the file. +func MeasureVerity(description string, fd int) (string, error) { + return "", fmt.Errorf("fs-verity is not supported on this platform") +} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir.go b/vendor/github.com/containers/storage/pkg/homedir/homedir.go index 85c5e76c..7eb63b67 100644 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir.go +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir.go @@ -6,21 +6,6 @@ import ( "path/filepath" ) -// GetConfigHome returns XDG_CONFIG_HOME. -// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetConfigHome() (string, error) { - if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" { - return xdgConfigHome, nil - } - home := Get() - if home == "" { - return "", errors.New("could not get either XDG_CONFIG_HOME or HOME") - } - return filepath.Join(home, ".config"), nil -} - // GetDataHome returns XDG_DATA_HOME. // GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set. // diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go index 0883ee02..9057fe1b 100644 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go @@ -1,5 +1,5 @@ -//go:build !linux && !darwin && !freebsd -// +build !linux,!darwin,!freebsd +//go:build !linux && !darwin && !freebsd && !windows +// +build !linux,!darwin,!freebsd,!windows package homedir @@ -8,6 +8,8 @@ package homedir import ( "errors" + "os" + "path/filepath" ) // GetRuntimeDir is unsupported on non-linux system. @@ -19,3 +21,18 @@ func GetRuntimeDir() (string, error) { func StickRuntimeDirContents(files []string) ([]string, error) { return nil, errors.New("homedir.StickRuntimeDirContents() is not supported on this system") } + +// GetConfigHome returns XDG_CONFIG_HOME. +// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetConfigHome() (string, error) { + if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" { + return xdgConfigHome, nil + } + home := Get() + if home == "" { + return "", errors.New("could not get either XDG_CONFIG_HOME or HOME") + } + return filepath.Join(home, ".config"), nil +} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go index 9976f19a..45be8765 100644 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go @@ -7,12 +7,16 @@ package homedir // NOTE: this package has originally been copied from github.com/docker/docker. import ( - "errors" + "fmt" "os" "path/filepath" + "strconv" "strings" + "sync" + "syscall" "github.com/containers/storage/pkg/unshare" + "github.com/sirupsen/logrus" ) // Key returns the env var name for the user's home dir based on @@ -40,18 +44,6 @@ func GetShortcutString() string { return "~" } -// GetRuntimeDir returns XDG_RUNTIME_DIR. -// XDG_RUNTIME_DIR is typically configured via pam_systemd. -// GetRuntimeDir returns non-nil error if XDG_RUNTIME_DIR is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetRuntimeDir() (string, error) { - if xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR"); xdgRuntimeDir != "" { - return filepath.EvalSymlinks(xdgRuntimeDir) - } - return "", errors.New("could not get XDG_RUNTIME_DIR") -} - // StickRuntimeDirContents sets the sticky bit on files that are under // XDG_RUNTIME_DIR, so that the files won't be periodically removed by the system. // @@ -94,3 +86,98 @@ func stick(f string) error { m |= os.ModeSticky return os.Chmod(f, m) } + +var ( + rootlessConfigHomeDirError error + rootlessConfigHomeDirOnce sync.Once + rootlessConfigHomeDir string + rootlessRuntimeDirOnce sync.Once + rootlessRuntimeDir string +) + +// isWriteableOnlyByOwner checks that the specified permission mask allows write +// access only to the owner. +func isWriteableOnlyByOwner(perm os.FileMode) bool { + return (perm & 0o722) == 0o700 +} + +// GetConfigHome returns XDG_CONFIG_HOME. +// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetConfigHome() (string, error) { + rootlessConfigHomeDirOnce.Do(func() { + cfgHomeDir := os.Getenv("XDG_CONFIG_HOME") + if cfgHomeDir == "" { + home := Get() + resolvedHome, err := filepath.EvalSymlinks(home) + if err != nil { + rootlessConfigHomeDirError = fmt.Errorf("cannot resolve %s: %w", home, err) + return + } + tmpDir := filepath.Join(resolvedHome, ".config") + _ = os.MkdirAll(tmpDir, 0o700) + st, err := os.Stat(tmpDir) + if err != nil { + rootlessConfigHomeDirError = err + return + } else if int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() { + cfgHomeDir = tmpDir + } else { + rootlessConfigHomeDirError = fmt.Errorf("path %q exists and it is not owned by the current user", tmpDir) + return + } + } + rootlessConfigHomeDir = cfgHomeDir + }) + + return rootlessConfigHomeDir, rootlessConfigHomeDirError +} + +// GetRuntimeDir returns a directory suitable to store runtime files. +// The function will try to use the XDG_RUNTIME_DIR env variable if it is set. +// XDG_RUNTIME_DIR is typically configured via pam_systemd. +// If XDG_RUNTIME_DIR is not set, GetRuntimeDir will try to find a suitable +// directory for the current user. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetRuntimeDir() (string, error) { + var rootlessRuntimeDirError error + + rootlessRuntimeDirOnce.Do(func() { + runtimeDir := os.Getenv("XDG_RUNTIME_DIR") + + if runtimeDir != "" { + rootlessRuntimeDir, rootlessRuntimeDirError = filepath.EvalSymlinks(runtimeDir) + return + } + + uid := strconv.Itoa(unshare.GetRootlessUID()) + if runtimeDir == "" { + tmpDir := filepath.Join("/run", "user", uid) + if err := os.MkdirAll(tmpDir, 0o700); err != nil { + logrus.Debug(err) + } + st, err := os.Lstat(tmpDir) + if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && isWriteableOnlyByOwner(st.Mode().Perm()) { + runtimeDir = tmpDir + } + } + if runtimeDir == "" { + tmpDir := filepath.Join(os.TempDir(), fmt.Sprintf("storage-run-%s", uid)) + if err := os.MkdirAll(tmpDir, 0o700); err != nil { + logrus.Debug(err) + } + st, err := os.Lstat(tmpDir) + if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && isWriteableOnlyByOwner(st.Mode().Perm()) { + runtimeDir = tmpDir + } else { + rootlessRuntimeDirError = fmt.Errorf("path %q exists and it is not writeable only by the current user", tmpDir) + return + } + } + rootlessRuntimeDir = runtimeDir + }) + + return rootlessRuntimeDir, rootlessRuntimeDirError +} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go index af65f2c0..a76610f9 100644 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go @@ -5,6 +5,7 @@ package homedir import ( "os" + "path/filepath" ) // Key returns the env var name for the user's home dir based on @@ -25,8 +26,36 @@ func Get() string { return home } +// GetConfigHome returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +func GetConfigHome() (string, error) { + return filepath.Join(Get(), ".config"), nil +} + // GetShortcutString returns the string that is shortcut to user's home directory // in the native shell of the platform running on. func GetShortcutString() string { return "%USERPROFILE%" // be careful while using in format functions } + +// StickRuntimeDirContents is a no-op on Windows +func StickRuntimeDirContents(files []string) ([]string, error) { + return nil, nil +} + +// GetRuntimeDir returns a directory suitable to store runtime files. +// The function will try to use the XDG_RUNTIME_DIR env variable if it is set. +// XDG_RUNTIME_DIR is typically configured via pam_systemd. +// If XDG_RUNTIME_DIR is not set, GetRuntimeDir will try to find a suitable +// directory for the current user. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetRuntimeDir() (string, error) { + data, err := GetDataHome() + if err != nil { + return "", err + } + runtimeDir := filepath.Join(data, "containers", "storage") + return runtimeDir, nil +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go index 4701dc5a..d7cb4ac2 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go @@ -14,7 +14,7 @@ import ( "syscall" "github.com/containers/storage/pkg/system" - "github.com/opencontainers/runc/libcontainer/user" + "github.com/moby/sys/user" ) var ( diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go index 83de680c..e3160d0d 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go @@ -17,7 +17,7 @@ const ( // IsRootless tells us if we are running in rootless mode func IsRootless() bool { - return false + return os.Getuid() != 0 } // GetRootlessUID returns the UID of the user in the parent userNS diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf index cb4525f2..924e8f13 100644 --- a/vendor/github.com/containers/storage/storage.conf +++ b/vendor/github.com/containers/storage/storage.conf @@ -59,7 +59,7 @@ additionalimagestores = [ # can deduplicate pulling of content, disk storage of content and can allow the # kernel to use less memory when running containers. -# containers/storage supports three keys +# containers/storage supports four keys # * enable_partial_images="true" | "false" # Tells containers/storage to look for files previously pulled in storage # rather then always pulling them from the container registry. @@ -70,7 +70,12 @@ additionalimagestores = [ # Tells containers/storage where an ostree repository exists that might have # previously pulled content which can be used when attempting to avoid # pulling content from the container registry -pull_options = {enable_partial_images = "false", use_hard_links = "false", ostree_repos=""} +# * convert_images = "false" | "true" +# If set to true, containers/storage will convert images to a +# format compatible with partial pulls in order to take advantage +# of local deduplication and hard linking. It is an expensive +# operation so it is not enabled by default. +pull_options = {enable_partial_images = "true", use_hard_links = "false", ostree_repos=""} # Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of # a container, to the UIDs/GIDs as they should appear outside of the container, @@ -130,6 +135,9 @@ mountopt = "nodev" # Set to skip a PRIVATE bind mount on the storage home directory. # skip_mount_home = "false" +# Set to use composefs to mount data layers with overlay. +# use_composefs = "false" + # Size is used to set a maximum size of the container image. # size = "" diff --git a/vendor/github.com/containers/storage/storage.conf-freebsd b/vendor/github.com/containers/storage/storage.conf-freebsd index c8abee64..03bbe246 100644 --- a/vendor/github.com/containers/storage/storage.conf-freebsd +++ b/vendor/github.com/containers/storage/storage.conf-freebsd @@ -96,6 +96,9 @@ mountopt = "nodev" # Set to skip a PRIVATE bind mount on the storage home directory. # skip_mount_home = "false" +# Set to use composefs to mount data layers with overlay. +# use_composefs = "false" + # Size is used to set a maximum size of the container image. # size = "" diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index 6753b296..c6f12518 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -1,6 +1,7 @@ package storage import ( + _ "embed" "encoding/base64" "errors" "fmt" @@ -10,6 +11,7 @@ import ( "reflect" "strings" "sync" + "syscall" "time" // register all of the built-in drivers @@ -69,6 +71,19 @@ type metadataStore interface { rwMetadataStore } +// ApplyStagedLayerOptions contains options to pass to ApplyStagedLayer +type ApplyStagedLayerOptions struct { + ID string // Mandatory + ParentLayer string // Optional + Names []string // Optional + MountLabel string // Optional + Writeable bool // Optional + LayerOptions *LayerOptions // Optional + + DiffOutput *drivers.DriverWithDifferOutput // Mandatory + DiffOptions *drivers.ApplyDiffWithDifferOpts // Mandatory +} + // An roBigDataStore wraps up the read-only big-data related methods of the // various types of file-based lookaside stores that we implement. type roBigDataStore interface { @@ -313,14 +328,24 @@ type Store interface { // ApplyDiffer applies a diff to a layer. // It is the caller responsibility to clean the staging directory if it is not // successfully applied with ApplyDiffFromStagingDirectory. - ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) + ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) // ApplyDiffFromStagingDirectory uses stagingDirectory to create the diff. - ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error + // Deprecated: it will be removed soon. Use ApplyStagedLayer instead. + ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error // CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors + // Deprecated: it will be removed soon. Use CleanupStagedLayer instead. CleanupStagingDirectory(stagingDirectory string) error + // ApplyStagedLayer combines the functions of CreateLayer and ApplyDiffFromStagingDirectory, + // marking the layer for automatic removal if applying the diff fails + // for any reason. + ApplyStagedLayer(args ApplyStagedLayerOptions) (*Layer, error) + + // CleanupStagedLayer cleanups the staging directory. It can be used to cleanup the staging directory on errors + CleanupStagedLayer(diffOutput *drivers.DriverWithDifferOutput) error + // DifferTarget gets the path to the differ target. DifferTarget(id string) (string, error) @@ -332,6 +357,10 @@ type Store interface { // specified uncompressed digest value recorded for them. LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) + // LayersByTOCDigest returns a slice of the layers with the + // specified TOC digest value recorded for them. + LayersByTOCDigest(d digest.Digest) ([]Layer, error) + // LayerSize returns a cached approximation of the layer's size, or -1 // if we don't have a value on hand. LayerSize(id string) (int64, error) @@ -391,6 +420,18 @@ type Store interface { // allow ImagesByDigest to find images by their correct digests. SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error + // ImageDirectory returns a path of a directory which the caller can + // use to store data, specific to the image, which the library does not + // directly manage. The directory will be deleted when the image is + // deleted. + ImageDirectory(id string) (string, error) + + // ImageRunDirectory returns a path of a directory which the caller can + // use to store data, specific to the image, which the library does not + // directly manage. The directory will be deleted when the host system + // is restarted. + ImageRunDirectory(id string) (string, error) + // ListLayerBigData retrieves a list of the (possibly large) chunks of // named data associated with a layer. ListLayerBigData(id string) ([]string, error) @@ -562,10 +603,19 @@ type LayerOptions struct { // initialize this layer. If set, it should be a child of the layer // which we want to use as the parent of the new layer. TemplateLayer string - // OriginalDigest specifies a digest of the tarstream (diff), if one is + // OriginalDigest specifies a digest of the (possibly-compressed) tarstream (diff), if one is // provided along with these LayerOptions, and reliably known by the caller. + // The digest might not be exactly the digest of the provided tarstream + // (e.g. the digest might be of a compressed representation, while providing + // an uncompressed one); in that case the caller is responsible for the two matching. // Use the default "" if this fields is not applicable or the value is not known. OriginalDigest digest.Digest + // OriginalSize specifies a size of the (possibly-compressed) tarstream corresponding + // to OriginalDigest. + // If the digest does not match the provided tarstream, OriginalSize must match OriginalDigest, + // not the tarstream. + // Use nil if not applicable or not known. + OriginalSize *int64 // UncompressedDigest specifies a digest of the uncompressed version (“DiffID”) // of the tarstream (diff), if one is provided along with these LayerOptions, // and reliably known by the caller. @@ -922,11 +972,13 @@ func (s *store) load() error { if err := os.MkdirAll(gipath, 0o700); err != nil { return err } - ris, err := newImageStore(gipath) + imageStore, err := newImageStore(gipath) if err != nil { return err } - s.imageStore = ris + s.imageStore = imageStore + + s.rwImageStores = []rwImageStore{imageStore} gcpath := filepath.Join(s.graphRoot, driverPrefix+"containers") if err := os.MkdirAll(gcpath, 0o700); err != nil { @@ -944,13 +996,16 @@ func (s *store) load() error { s.containerStore = rcs - for _, store := range driver.AdditionalImageStores() { + additionalImageStores := s.graphDriver.AdditionalImageStores() + if s.imageStoreDir != "" { + additionalImageStores = append([]string{s.graphRoot}, additionalImageStores...) + } + + for _, store := range additionalImageStores { gipath := filepath.Join(store, driverPrefix+"images") var ris roImageStore - if s.imageStoreDir != "" && store == s.graphRoot { - // If --imagestore was set and current store - // is `graphRoot` then mount it as a `rw` additional - // store instead of `readonly` additional store. + // both the graphdriver and the imagestore must be used read-write. + if store == s.imageStoreDir || store == s.graphRoot { imageStore, err := newImageStore(gipath) if err != nil { return err @@ -960,6 +1015,10 @@ func (s *store) load() error { } else { ris, err = newROImageStore(gipath) if err != nil { + if errors.Is(err, syscall.EROFS) { + logrus.Debugf("Ignoring creation of lockfiles on read-only file systems %q, %v", gipath, err) + continue + } return err } } @@ -1031,15 +1090,9 @@ func (s *store) stopUsingGraphDriver() { // Almost all users should use startUsingGraphDriver instead. // The caller must hold s.graphLock. func (s *store) createGraphDriverLocked() (drivers.Driver, error) { - driverRoot := s.imageStoreDir - imageStoreBase := s.graphRoot - if driverRoot == "" { - driverRoot = s.graphRoot - imageStoreBase = "" - } config := drivers.Options{ - Root: driverRoot, - ImageStore: imageStoreBase, + Root: s.graphRoot, + ImageStore: s.imageStoreDir, RunRoot: s.runRoot, DriverPriority: s.graphDriverPriority, DriverOptions: s.graphOptions, @@ -1069,15 +1122,15 @@ func (s *store) getLayerStoreLocked() (rwLayerStore, error) { if err := os.MkdirAll(rlpath, 0o700); err != nil { return nil, err } - imgStoreRoot := s.imageStoreDir - if imgStoreRoot == "" { - imgStoreRoot = s.graphRoot - } - glpath := filepath.Join(imgStoreRoot, driverPrefix+"layers") + glpath := filepath.Join(s.graphRoot, driverPrefix+"layers") if err := os.MkdirAll(glpath, 0o700); err != nil { return nil, err } - rls, err := s.newLayerStore(rlpath, glpath, s.graphDriver, s.transientStore) + ilpath := "" + if s.imageStoreDir != "" { + ilpath = filepath.Join(s.imageStoreDir, driverPrefix+"layers") + } + rls, err := s.newLayerStore(rlpath, glpath, ilpath, s.graphDriver, s.transientStore) if err != nil { return nil, err } @@ -1108,8 +1161,10 @@ func (s *store) getROLayerStoresLocked() ([]roLayerStore, error) { if err := os.MkdirAll(rlpath, 0o700); err != nil { return nil, err } + for _, store := range s.graphDriver.AdditionalImageStores() { glpath := filepath.Join(store, driverPrefix+"layers") + rls, err := newROLayerStore(rlpath, glpath, s.graphDriver) if err != nil { return nil, err @@ -1390,8 +1445,7 @@ func (s *store) canUseShifting(uidmap, gidmap []idtools.IDMap) bool { return true } -func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, lOptions *LayerOptions, diff io.Reader) (*Layer, int64, error) { - var parentLayer *Layer +func (s *store) putLayer(id, parent string, names []string, mountLabel string, writeable bool, lOptions *LayerOptions, diff io.Reader, slo *stagedLayerOptions) (*Layer, int64, error) { rlstore, rlstores, err := s.bothLayerStoreKinds() if err != nil { return nil, -1, err @@ -1404,6 +1458,8 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w return nil, -1, err } defer s.containerStore.stopWriting() + + var parentLayer *Layer var options LayerOptions if lOptions != nil { options = *lOptions @@ -1463,6 +1519,7 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w } layerOptions := LayerOptions{ OriginalDigest: options.OriginalDigest, + OriginalSize: options.OriginalSize, UncompressedDigest: options.UncompressedDigest, Flags: options.Flags, } @@ -1476,7 +1533,11 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w GIDMap: copyIDMap(gidMap), } } - return rlstore.create(id, parentLayer, names, mountLabel, nil, &layerOptions, writeable, diff) + return rlstore.create(id, parentLayer, names, mountLabel, nil, &layerOptions, writeable, diff, slo) +} + +func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, lOptions *LayerOptions, diff io.Reader) (*Layer, int64, error) { + return s.putLayer(id, parent, names, mountLabel, writeable, lOptions, diff, nil) } func (s *store) CreateLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions) (*Layer, error) { @@ -1686,7 +1747,7 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore roImageStore, rlst } } layerOptions.TemplateLayer = layer.ID - mappedLayer, _, err := rlstore.create("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil) + mappedLayer, _, err := rlstore.create("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil, nil) if err != nil { return nil, fmt.Errorf("creating an ID-mapped copy of layer %q: %w", layer.ID, err) } @@ -1857,7 +1918,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat options.Flags[mountLabelFlag] = mountLabel } - clayer, _, err := rlstore.create(layer, imageTopLayer, nil, mlabel, options.StorageOpt, layerOptions, true, nil) + clayer, _, err := rlstore.create(layer, imageTopLayer, nil, mlabel, options.StorageOpt, layerOptions, true, nil, nil) if err != nil { return nil, err } @@ -2530,7 +2591,7 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) if err := s.writeToAllStores(func(rlstore rwLayerStore) error { // Delete image from all available imagestores configured to be used. imageFound := false - for _, is := range append([]rwImageStore{s.imageStore}, s.rwImageStores...) { + for _, is := range s.rwImageStores { if is != s.imageStore { // This is an additional writeable image store // so we must perform lock @@ -2741,7 +2802,13 @@ func (s *store) Status() ([][2]string, error) { return rlstore.Status() } +//go:embed VERSION +var storageVersion string + func (s *store) Version() ([][2]string, error) { + if trimmedVersion := strings.TrimSpace(storageVersion); trimmedVersion != "" { + return [][2]string{{"Version", trimmedVersion}}, nil + } return [][2]string{}, nil } @@ -2915,16 +2982,29 @@ func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, erro return nil, ErrLayerUnknown } -func (s *store) ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error { +func (s *store) ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error { + if stagingDirectory != diffOutput.Target { + return fmt.Errorf("invalid value for staging directory, it must be the same as the differ target directory") + } _, err := writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) { if !rlstore.Exists(to) { return struct{}{}, ErrLayerUnknown } - return struct{}{}, rlstore.ApplyDiffFromStagingDirectory(to, stagingDirectory, diffOutput, options) + return struct{}{}, rlstore.applyDiffFromStagingDirectory(to, diffOutput, options) }) return err } +func (s *store) ApplyStagedLayer(args ApplyStagedLayerOptions) (*Layer, error) { + slo := stagedLayerOptions{ + DiffOutput: args.DiffOutput, + DiffOptions: args.DiffOptions, + } + + layer, _, err := s.putLayer(args.ID, args.ParentLayer, args.Names, args.MountLabel, args.Writeable, args.LayerOptions, nil, &slo) + return layer, err +} + func (s *store) CleanupStagingDirectory(stagingDirectory string) error { _, err := writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) { return struct{}{}, rlstore.CleanupStagingDirectory(stagingDirectory) @@ -2932,7 +3012,14 @@ func (s *store) CleanupStagingDirectory(stagingDirectory string) error { return err } -func (s *store) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) { +func (s *store) CleanupStagedLayer(diffOutput *drivers.DriverWithDifferOutput) error { + _, err := writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) { + return struct{}{}, rlstore.CleanupStagingDirectory(diffOutput.Target) + }) + return err +} + +func (s *store) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) { return writeToLayerStore(s, func(rlstore rwLayerStore) (*drivers.DriverWithDifferOutput, error) { if to != "" && !rlstore.Exists(to) { return nil, ErrLayerUnknown @@ -2994,6 +3081,13 @@ func (s *store) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) { return s.layersByMappedDigest(func(r roLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByUncompressedDigest(d) }, d) } +func (s *store) LayersByTOCDigest(d digest.Digest) ([]Layer, error) { + if err := d.Validate(); err != nil { + return nil, fmt.Errorf("looking for TOC matching digest %q: %w", d, err) + } + return s.layersByMappedDigest(func(r roLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByTOCDigest(d) }, d) +} + func (s *store) LayerSize(id string) (int64, error) { if res, done, err := readAllLayerStores(s, func(store roLayerStore) (int64, bool, error) { if store.Exists(id) { @@ -3288,6 +3382,27 @@ func (s *store) ContainerByLayer(id string) (*Container, error) { return nil, ErrContainerUnknown } +func (s *store) ImageDirectory(id string) (string, error) { + foundImage := false + if res, done, err := readAllImageStores(s, func(store roImageStore) (string, bool, error) { + if store.Exists(id) { + foundImage = true + } + middleDir := s.graphDriverName + "-images" + gipath := filepath.Join(s.GraphRoot(), middleDir, id, "userdata") + if err := os.MkdirAll(gipath, 0o700); err != nil { + return "", true, err + } + return gipath, true, nil + }); done { + return res, err + } + if foundImage { + return "", fmt.Errorf("locating image with ID %q (consider removing the image to resolve the issue): %w", id, os.ErrNotExist) + } + return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) +} + func (s *store) ContainerDirectory(id string) (string, error) { res, _, err := readContainerStore(s, func() (string, bool, error) { id, err := s.containerStore.Lookup(id) @@ -3305,6 +3420,28 @@ func (s *store) ContainerDirectory(id string) (string, error) { return res, err } +func (s *store) ImageRunDirectory(id string) (string, error) { + foundImage := false + if res, done, err := readAllImageStores(s, func(store roImageStore) (string, bool, error) { + if store.Exists(id) { + foundImage = true + } + + middleDir := s.graphDriverName + "-images" + rcpath := filepath.Join(s.RunRoot(), middleDir, id, "userdata") + if err := os.MkdirAll(rcpath, 0o700); err != nil { + return "", true, err + } + return rcpath, true, nil + }); done { + return res, err + } + if foundImage { + return "", fmt.Errorf("locating image with ID %q (consider removing the image to resolve the issue): %w", id, os.ErrNotExist) + } + return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) +} + func (s *store) ContainerRunDirectory(id string) (string, error) { res, _, err := readContainerStore(s, func() (string, bool, error) { id, err := s.containerStore.Lookup(id) @@ -3545,8 +3682,8 @@ func SetDefaultConfigFilePath(path string) { } // DefaultConfigFile returns the path to the storage config file used -func DefaultConfigFile(rootless bool) (string, error) { - return types.DefaultConfigFile(rootless) +func DefaultConfigFile() (string, error) { + return types.DefaultConfigFile() } // ReloadConfigurationFile parses the specified configuration file and overrides diff --git a/vendor/github.com/containers/storage/types/options.go b/vendor/github.com/containers/storage/types/options.go index 5ae667a4..ad0bfa43 100644 --- a/vendor/github.com/containers/storage/types/options.go +++ b/vendor/github.com/containers/storage/types/options.go @@ -11,7 +11,9 @@ import ( "github.com/BurntSushi/toml" cfg "github.com/containers/storage/pkg/config" + "github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/unshare" "github.com/sirupsen/logrus" ) @@ -87,7 +89,7 @@ func loadDefaultStoreOptions() { _, err := os.Stat(defaultOverrideConfigFile) if err == nil { - // The DefaultConfigFile(rootless) function returns the path + // The DefaultConfigFile() function returns the path // of the used storage.conf file, by returning defaultConfigFile // If override exists containers/storage uses it by default. defaultConfigFile = defaultOverrideConfigFile @@ -109,21 +111,41 @@ func loadDefaultStoreOptions() { setDefaults() } -// defaultStoreOptionsIsolated is an internal implementation detail of DefaultStoreOptions to allow testing. -// Everyone but the tests this is intended for should only call DefaultStoreOptions, never this function. -func defaultStoreOptionsIsolated(rootless bool, rootlessUID int, storageConf string) (StoreOptions, error) { +// loadStoreOptions returns the default storage ops for containers +func loadStoreOptions() (StoreOptions, error) { + storageConf, err := DefaultConfigFile() + if err != nil { + return defaultStoreOptions, err + } + return loadStoreOptionsFromConfFile(storageConf) +} + +// usePerUserStorage returns whether the user private storage must be used. +// We cannot simply use the unshare.IsRootless() condition, because +// that checks only if the current process needs a user namespace to +// work and it would break cases where the process is already created +// in a user namespace (e.g. nested Podman/Buildah) and the desired +// behavior is to use system paths instead of user private paths. +func usePerUserStorage() bool { + return unshare.IsRootless() && unshare.GetRootlessUID() != 0 +} + +// loadStoreOptionsFromConfFile is an internal implementation detail of DefaultStoreOptions to allow testing. +// Everyone but the tests this is intended for should only call loadStoreOptions, never this function. +func loadStoreOptionsFromConfFile(storageConf string) (StoreOptions, error) { var ( defaultRootlessRunRoot string defaultRootlessGraphRoot string err error ) + defaultStoreOptionsOnce.Do(loadDefaultStoreOptions) if loadDefaultStoreOptionsErr != nil { return StoreOptions{}, loadDefaultStoreOptionsErr } storageOpts := defaultStoreOptions - if rootless && rootlessUID != 0 { - storageOpts, err = getRootlessStorageOpts(rootlessUID, storageOpts) + if usePerUserStorage() { + storageOpts, err = getRootlessStorageOpts(storageOpts) if err != nil { return storageOpts, err } @@ -137,7 +159,7 @@ func defaultStoreOptionsIsolated(rootless bool, rootlessUID int, storageConf str defaultRootlessGraphRoot = storageOpts.GraphRoot storageOpts = StoreOptions{} reloadConfigurationFileIfNeeded(storageConf, &storageOpts) - if rootless && rootlessUID != 0 { + if usePerUserStorage() { // If the file did not specify a graphroot or runroot, // set sane defaults so we don't try and use root-owned // directories @@ -156,6 +178,7 @@ func defaultStoreOptionsIsolated(rootless bool, rootlessUID int, storageConf str if storageOpts.RunRoot == "" { return storageOpts, fmt.Errorf("runroot must be set") } + rootlessUID := unshare.GetRootlessUID() runRoot, err := expandEnvPath(storageOpts.RunRoot, rootlessUID) if err != nil { return storageOpts, err @@ -186,26 +209,17 @@ func defaultStoreOptionsIsolated(rootless bool, rootlessUID int, storageConf str return storageOpts, nil } -// loadStoreOptions returns the default storage ops for containers -func loadStoreOptions(rootless bool, rootlessUID int) (StoreOptions, error) { - storageConf, err := DefaultConfigFile(rootless && rootlessUID != 0) - if err != nil { - return defaultStoreOptions, err - } - return defaultStoreOptionsIsolated(rootless, rootlessUID, storageConf) -} - // UpdateOptions should be called iff container engine received a SIGHUP, // otherwise use DefaultStoreOptions -func UpdateStoreOptions(rootless bool, rootlessUID int) (StoreOptions, error) { - storeOptions, storeError = loadStoreOptions(rootless, rootlessUID) +func UpdateStoreOptions() (StoreOptions, error) { + storeOptions, storeError = loadStoreOptions() return storeOptions, storeError } // DefaultStoreOptions returns the default storage ops for containers -func DefaultStoreOptions(rootless bool, rootlessUID int) (StoreOptions, error) { +func DefaultStoreOptions() (StoreOptions, error) { once.Do(func() { - storeOptions, storeError = loadStoreOptions(rootless, rootlessUID) + storeOptions, storeError = loadStoreOptions() }) return storeOptions, storeError } @@ -270,14 +284,26 @@ func isRootlessDriver(driver string) bool { } // getRootlessStorageOpts returns the storage opts for containers running as non root -func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOptions, error) { +func getRootlessStorageOpts(systemOpts StoreOptions) (StoreOptions, error) { var opts StoreOptions - dataDir, rootlessRuntime, err := getRootlessDirInfo(rootlessUID) + rootlessUID := unshare.GetRootlessUID() + + dataDir, err := homedir.GetDataHome() + if err != nil { + return opts, err + } + + rootlessRuntime, err := homedir.GetRuntimeDir() if err != nil { return opts, err } - opts.RunRoot = rootlessRuntime + + opts.RunRoot = filepath.Join(rootlessRuntime, "containers") + if err := os.MkdirAll(opts.RunRoot, 0o700); err != nil { + return opts, fmt.Errorf("unable to make rootless runtime: %w", err) + } + opts.PullOptions = systemOpts.PullOptions if systemOpts.RootlessStoragePath != "" { opts.GraphRoot, err = expandEnvPath(systemOpts.RootlessStoragePath, rootlessUID) @@ -343,12 +369,6 @@ func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOpti return opts, nil } -// DefaultStoreOptionsAutoDetectUID returns the default storage ops for containers -func DefaultStoreOptionsAutoDetectUID() (StoreOptions, error) { - uid := getRootlessUID() - return DefaultStoreOptions(uid != 0, uid) -} - var prevReloadConfig = struct { storeOptions *StoreOptions mod time.Time @@ -518,8 +538,8 @@ func Options() (StoreOptions, error) { } // Save overwrites the tomlConfig in storage.conf with the given conf -func Save(conf TomlConfig, rootless bool) error { - configFile, err := DefaultConfigFile(rootless) +func Save(conf TomlConfig) error { + configFile, err := DefaultConfigFile() if err != nil { return err } @@ -537,10 +557,10 @@ func Save(conf TomlConfig, rootless bool) error { } // StorageConfig is used to retrieve the storage.conf toml in order to overwrite it -func StorageConfig(rootless bool) (*TomlConfig, error) { +func StorageConfig() (*TomlConfig, error) { config := new(TomlConfig) - configFile, err := DefaultConfigFile(rootless) + configFile, err := DefaultConfigFile() if err != nil { return nil, err } diff --git a/vendor/github.com/containers/storage/types/utils.go b/vendor/github.com/containers/storage/types/utils.go index 73134f82..5b4b31b8 100644 --- a/vendor/github.com/containers/storage/types/utils.go +++ b/vendor/github.com/containers/storage/types/utils.go @@ -2,162 +2,15 @@ package types import ( "errors" - "fmt" "os" "path/filepath" "strconv" "strings" "github.com/containers/storage/pkg/homedir" - "github.com/containers/storage/pkg/system" "github.com/sirupsen/logrus" ) -// GetRootlessRuntimeDir returns the runtime directory when running as non root -func GetRootlessRuntimeDir(rootlessUID int) (string, error) { - path, err := getRootlessRuntimeDir(rootlessUID) - if err != nil { - return "", err - } - path = filepath.Join(path, "containers") - if err := os.MkdirAll(path, 0o700); err != nil { - return "", fmt.Errorf("unable to make rootless runtime: %w", err) - } - return path, nil -} - -type rootlessRuntimeDirEnvironment interface { - getProcCommandFile() string - getRunUserDir() string - getTmpPerUserDir() string - - homeDirGetRuntimeDir() (string, error) - systemLstat(string) (*system.StatT, error) - homedirGet() string -} - -type rootlessRuntimeDirEnvironmentImplementation struct { - procCommandFile string - runUserDir string - tmpPerUserDir string -} - -func (env rootlessRuntimeDirEnvironmentImplementation) getProcCommandFile() string { - return env.procCommandFile -} - -func (env rootlessRuntimeDirEnvironmentImplementation) getRunUserDir() string { - return env.runUserDir -} - -func (env rootlessRuntimeDirEnvironmentImplementation) getTmpPerUserDir() string { - return env.tmpPerUserDir -} - -func (rootlessRuntimeDirEnvironmentImplementation) homeDirGetRuntimeDir() (string, error) { - return homedir.GetRuntimeDir() -} - -func (rootlessRuntimeDirEnvironmentImplementation) systemLstat(path string) (*system.StatT, error) { - return system.Lstat(path) -} - -func (rootlessRuntimeDirEnvironmentImplementation) homedirGet() string { - return homedir.Get() -} - -func isRootlessRuntimeDirOwner(dir string, env rootlessRuntimeDirEnvironment) bool { - st, err := env.systemLstat(dir) - return err == nil && int(st.UID()) == os.Getuid() && st.Mode()&0o700 == 0o700 && st.Mode()&0o066 == 0o000 -} - -// getRootlessRuntimeDirIsolated is an internal implementation detail of getRootlessRuntimeDir to allow testing. -// Everyone but the tests this is intended for should only call getRootlessRuntimeDir, never this function. -func getRootlessRuntimeDirIsolated(env rootlessRuntimeDirEnvironment) (string, error) { - runtimeDir, err := env.homeDirGetRuntimeDir() - if err == nil { - return runtimeDir, nil - } - - initCommand, err := os.ReadFile(env.getProcCommandFile()) - if err != nil || string(initCommand) == "systemd" { - runUserDir := env.getRunUserDir() - if isRootlessRuntimeDirOwner(runUserDir, env) { - return runUserDir, nil - } - } - - tmpPerUserDir := env.getTmpPerUserDir() - if tmpPerUserDir != "" { - if _, err := env.systemLstat(tmpPerUserDir); os.IsNotExist(err) { - if err := os.Mkdir(tmpPerUserDir, 0o700); err != nil { - logrus.Errorf("Failed to create temp directory for user: %v", err) - } else { - return tmpPerUserDir, nil - } - } else if isRootlessRuntimeDirOwner(tmpPerUserDir, env) { - return tmpPerUserDir, nil - } - } - - homeDir := env.homedirGet() - if homeDir == "" { - return "", errors.New("neither XDG_RUNTIME_DIR nor temp dir nor HOME was set non-empty") - } - resolvedHomeDir, err := filepath.EvalSymlinks(homeDir) - if err != nil { - return "", err - } - return filepath.Join(resolvedHomeDir, "rundir"), nil -} - -func getRootlessRuntimeDir(rootlessUID int) (string, error) { - return getRootlessRuntimeDirIsolated( - rootlessRuntimeDirEnvironmentImplementation{ - "/proc/1/comm", - fmt.Sprintf("/run/user/%d", rootlessUID), - fmt.Sprintf("%s/containers-user-%d", os.TempDir(), rootlessUID), - }, - ) -} - -// getRootlessDirInfo returns the parent path of where the storage for containers and -// volumes will be in rootless mode -func getRootlessDirInfo(rootlessUID int) (string, string, error) { - rootlessRuntime, err := GetRootlessRuntimeDir(rootlessUID) - if err != nil { - return "", "", err - } - - dataDir, err := homedir.GetDataHome() - if err == nil { - return dataDir, rootlessRuntime, nil - } - - home := homedir.Get() - if home == "" { - return "", "", fmt.Errorf("neither XDG_DATA_HOME nor HOME was set non-empty: %w", err) - } - // runc doesn't like symlinks in the rootfs path, and at least - // on CoreOS /home is a symlink to /var/home, so resolve any symlink. - resolvedHome, err := filepath.EvalSymlinks(home) - if err != nil { - return "", "", err - } - dataDir = filepath.Join(resolvedHome, ".local", "share") - - return dataDir, rootlessRuntime, nil -} - -func getRootlessUID() int { - uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID") - if uidEnv != "" { - u, _ := strconv.Atoi(uidEnv) - return u - } - return os.Geteuid() -} - func expandEnvPath(path string, rootlessUID int) (string, error) { var err error path = strings.Replace(path, "$UID", strconv.Itoa(rootlessUID), -1) @@ -169,7 +22,7 @@ func expandEnvPath(path string, rootlessUID int) (string, error) { return newpath, nil } -func DefaultConfigFile(rootless bool) (string, error) { +func DefaultConfigFile() (string, error) { if defaultConfigFileSet { return defaultConfigFile, nil } @@ -177,7 +30,7 @@ func DefaultConfigFile(rootless bool) (string, error) { if path, ok := os.LookupEnv(storageConfEnv); ok { return path, nil } - if !rootless { + if !usePerUserStorage() { if _, err := os.Stat(defaultOverrideConfigFile); err == nil { return defaultOverrideConfigFile, nil } diff --git a/vendor/github.com/containers/storage/userns.go b/vendor/github.com/containers/storage/userns.go index 32ae830b..57120731 100644 --- a/vendor/github.com/containers/storage/userns.go +++ b/vendor/github.com/containers/storage/userns.go @@ -11,7 +11,7 @@ import ( "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/unshare" "github.com/containers/storage/types" - libcontainerUser "github.com/opencontainers/runc/libcontainer/user" + libcontainerUser "github.com/moby/sys/user" "github.com/sirupsen/logrus" ) @@ -175,7 +175,7 @@ outer: // We need to create a temporary layer so we can mount it and lookup the // maximum IDs used. - clayer, _, err := rlstore.create("", topLayer, nil, "", nil, layerOptions, false, nil) + clayer, _, err := rlstore.create("", topLayer, nil, "", nil, layerOptions, false, nil, nil) if err != nil { return 0, err } diff --git a/vendor/github.com/containers/storage/utils.go b/vendor/github.com/containers/storage/utils.go index 6b5a3421..5bade6ff 100644 --- a/vendor/github.com/containers/storage/utils.go +++ b/vendor/github.com/containers/storage/utils.go @@ -11,19 +11,9 @@ func ParseIDMapping(UIDMapSlice, GIDMapSlice []string, subUIDMap, subGIDMap stri return types.ParseIDMapping(UIDMapSlice, GIDMapSlice, subUIDMap, subGIDMap) } -// GetRootlessRuntimeDir returns the runtime directory when running as non root -func GetRootlessRuntimeDir(rootlessUID int) (string, error) { - return types.GetRootlessRuntimeDir(rootlessUID) -} - -// DefaultStoreOptionsAutoDetectUID returns the default storage options for containers -func DefaultStoreOptionsAutoDetectUID() (types.StoreOptions, error) { - return types.DefaultStoreOptionsAutoDetectUID() -} - // DefaultStoreOptions returns the default storage options for containers -func DefaultStoreOptions(rootless bool, rootlessUID int) (types.StoreOptions, error) { - return types.DefaultStoreOptions(rootless, rootlessUID) +func DefaultStoreOptions() (types.StoreOptions, error) { + return types.DefaultStoreOptions() } func validateMountOptions(mountOptions []string) error { diff --git a/vendor/github.com/docker/distribution/reference/helpers_deprecated.go b/vendor/github.com/docker/distribution/reference/helpers_deprecated.go deleted file mode 100644 index cbd11925..00000000 --- a/vendor/github.com/docker/distribution/reference/helpers_deprecated.go +++ /dev/null @@ -1,34 +0,0 @@ -package reference - -import "github.com/distribution/reference" - -// IsNameOnly returns true if reference only contains a repo name. -// -// Deprecated: use [reference.IsNameOnly]. -func IsNameOnly(ref reference.Named) bool { - return reference.IsNameOnly(ref) -} - -// FamiliarName returns the familiar name string -// for the given named, familiarizing if needed. -// -// Deprecated: use [reference.FamiliarName]. -func FamiliarName(ref reference.Named) string { - return reference.FamiliarName(ref) -} - -// FamiliarString returns the familiar string representation -// for the given reference, familiarizing if needed. -// -// Deprecated: use [reference.FamiliarString]. -func FamiliarString(ref reference.Reference) string { - return reference.FamiliarString(ref) -} - -// FamiliarMatch reports whether ref matches the specified pattern. -// See [path.Match] for supported patterns. -// -// Deprecated: use [reference.FamiliarMatch]. -func FamiliarMatch(pattern string, ref reference.Reference) (bool, error) { - return reference.FamiliarMatch(pattern, ref) -} diff --git a/vendor/github.com/docker/distribution/reference/normalize_deprecated.go b/vendor/github.com/docker/distribution/reference/normalize_deprecated.go deleted file mode 100644 index 1b4a459d..00000000 --- a/vendor/github.com/docker/distribution/reference/normalize_deprecated.go +++ /dev/null @@ -1,92 +0,0 @@ -package reference - -import ( - "regexp" - - "github.com/distribution/reference" - "github.com/opencontainers/go-digest" - "github.com/opencontainers/go-digest/digestset" -) - -// ParseNormalizedNamed parses a string into a named reference -// transforming a familiar name from Docker UI to a fully -// qualified reference. If the value may be an identifier -// use ParseAnyReference. -// -// Deprecated: use [reference.ParseNormalizedNamed]. -func ParseNormalizedNamed(s string) (reference.Named, error) { - return reference.ParseNormalizedNamed(s) -} - -// ParseDockerRef normalizes the image reference following the docker convention, -// which allows for references to contain both a tag and a digest. -// -// Deprecated: use [reference.ParseDockerRef]. -func ParseDockerRef(ref string) (reference.Named, error) { - return reference.ParseDockerRef(ref) -} - -// TagNameOnly adds the default tag "latest" to a reference if it only has -// a repo name. -// -// Deprecated: use [reference.TagNameOnly]. -func TagNameOnly(ref reference.Named) reference.Named { - return reference.TagNameOnly(ref) -} - -// ParseAnyReference parses a reference string as a possible identifier, -// full digest, or familiar name. -// -// Deprecated: use [reference.ParseAnyReference]. -func ParseAnyReference(ref string) (reference.Reference, error) { - return reference.ParseAnyReference(ref) -} - -// Functions and types below have been removed in distribution v3 and -// have not been ported to github.com/distribution/reference. See -// https://github.com/distribution/distribution/pull/3774 - -var ( - // ShortIdentifierRegexp is the format used to represent a prefix - // of an identifier. A prefix may be used to match a sha256 identifier - // within a list of trusted identifiers. - // - // Deprecated: support for short-identifiers is deprecated, and will be removed in v3. - ShortIdentifierRegexp = regexp.MustCompile(shortIdentifier) - - shortIdentifier = `([a-f0-9]{6,64})` - - // anchoredShortIdentifierRegexp is used to check if a value - // is a possible identifier prefix, anchored at start and end - // of string. - anchoredShortIdentifierRegexp = regexp.MustCompile(`^` + shortIdentifier + `$`) -) - -type digestReference digest.Digest - -func (d digestReference) String() string { - return digest.Digest(d).String() -} - -func (d digestReference) Digest() digest.Digest { - return digest.Digest(d) -} - -// ParseAnyReferenceWithSet parses a reference string as a possible short -// identifier to be matched in a digest set, a full digest, or familiar name. -// -// Deprecated: support for short-identifiers is deprecated, and will be removed in v3. -func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) { - if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok { - dgst, err := ds.Lookup(ref) - if err == nil { - return digestReference(dgst), nil - } - } else { - if dgst, err := digest.Parse(ref); err == nil { - return digestReference(dgst), nil - } - } - - return reference.ParseNormalizedNamed(ref) -} diff --git a/vendor/github.com/docker/distribution/reference/reference_deprecated.go b/vendor/github.com/docker/distribution/reference/reference_deprecated.go deleted file mode 100644 index 5b732498..00000000 --- a/vendor/github.com/docker/distribution/reference/reference_deprecated.go +++ /dev/null @@ -1,172 +0,0 @@ -// Package reference is deprecated, and has moved to github.com/distribution/reference. -// -// Deprecated: use github.com/distribution/reference instead. -package reference - -import ( - "github.com/distribution/reference" - "github.com/opencontainers/go-digest" -) - -const ( - // NameTotalLengthMax is the maximum total number of characters in a repository name. - // - // Deprecated: use [reference.NameTotalLengthMax]. - NameTotalLengthMax = reference.NameTotalLengthMax -) - -var ( - // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. - // - // Deprecated: use [reference.ErrReferenceInvalidFormat]. - ErrReferenceInvalidFormat = reference.ErrReferenceInvalidFormat - - // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. - // - // Deprecated: use [reference.ErrTagInvalidFormat]. - ErrTagInvalidFormat = reference.ErrTagInvalidFormat - - // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. - // - // Deprecated: use [reference.ErrDigestInvalidFormat]. - ErrDigestInvalidFormat = reference.ErrDigestInvalidFormat - - // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. - // - // Deprecated: use [reference.ErrNameContainsUppercase]. - ErrNameContainsUppercase = reference.ErrNameContainsUppercase - - // ErrNameEmpty is returned for empty, invalid repository names. - // - // Deprecated: use [reference.ErrNameEmpty]. - ErrNameEmpty = reference.ErrNameEmpty - - // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. - // - // Deprecated: use [reference.ErrNameTooLong]. - ErrNameTooLong = reference.ErrNameTooLong - - // ErrNameNotCanonical is returned when a name is not canonical. - // - // Deprecated: use [reference.ErrNameNotCanonical]. - ErrNameNotCanonical = reference.ErrNameNotCanonical -) - -// Reference is an opaque object reference identifier that may include -// modifiers such as a hostname, name, tag, and digest. -// -// Deprecated: use [reference.Reference]. -type Reference = reference.Reference - -// Field provides a wrapper type for resolving correct reference types when -// working with encoding. -// -// Deprecated: use [reference.Field]. -type Field = reference.Field - -// AsField wraps a reference in a Field for encoding. -// -// Deprecated: use [reference.AsField]. -func AsField(ref reference.Reference) reference.Field { - return reference.AsField(ref) -} - -// Named is an object with a full name -// -// Deprecated: use [reference.Named]. -type Named = reference.Named - -// Tagged is an object which has a tag -// -// Deprecated: use [reference.Tagged]. -type Tagged = reference.Tagged - -// NamedTagged is an object including a name and tag. -// -// Deprecated: use [reference.NamedTagged]. -type NamedTagged reference.NamedTagged - -// Digested is an object which has a digest -// in which it can be referenced by -// -// Deprecated: use [reference.Digested]. -type Digested reference.Digested - -// Canonical reference is an object with a fully unique -// name including a name with domain and digest -// -// Deprecated: use [reference.Canonical]. -type Canonical reference.Canonical - -// Domain returns the domain part of the [Named] reference. -// -// Deprecated: use [reference.Domain]. -func Domain(named reference.Named) string { - return reference.Domain(named) -} - -// Path returns the name without the domain part of the [Named] reference. -// -// Deprecated: use [reference.Path]. -func Path(named reference.Named) (name string) { - return reference.Path(named) -} - -// SplitHostname splits a named reference into a -// hostname and name string. If no valid hostname is -// found, the hostname is empty and the full value -// is returned as name -// -// Deprecated: Use [reference.Domain] or [reference.Path]. -func SplitHostname(named reference.Named) (string, string) { - return reference.SplitHostname(named) -} - -// Parse parses s and returns a syntactically valid Reference. -// If an error was encountered it is returned, along with a nil Reference. -// -// Deprecated: use [reference.Parse]. -func Parse(s string) (reference.Reference, error) { - return reference.Parse(s) -} - -// ParseNamed parses s and returns a syntactically valid reference implementing -// the Named interface. The reference must have a name and be in the canonical -// form, otherwise an error is returned. -// If an error was encountered it is returned, along with a nil Reference. -// -// Deprecated: use [reference.ParseNamed]. -func ParseNamed(s string) (reference.Named, error) { - return reference.ParseNamed(s) -} - -// WithName returns a named object representing the given string. If the input -// is invalid ErrReferenceInvalidFormat will be returned. -// -// Deprecated: use [reference.WithName]. -func WithName(name string) (reference.Named, error) { - return reference.WithName(name) -} - -// WithTag combines the name from "name" and the tag from "tag" to form a -// reference incorporating both the name and the tag. -// -// Deprecated: use [reference.WithTag]. -func WithTag(name reference.Named, tag string) (reference.NamedTagged, error) { - return reference.WithTag(name, tag) -} - -// WithDigest combines the name from "name" and the digest from "digest" to form -// a reference incorporating both the name and the digest. -// -// Deprecated: use [reference.WithDigest]. -func WithDigest(name reference.Named, digest digest.Digest) (reference.Canonical, error) { - return reference.WithDigest(name, digest) -} - -// TrimNamed removes any tag or digest from the named reference. -// -// Deprecated: use [reference.TrimNamed]. -func TrimNamed(ref reference.Named) reference.Named { - return reference.TrimNamed(ref) -} diff --git a/vendor/github.com/docker/distribution/reference/regexp_deprecated.go b/vendor/github.com/docker/distribution/reference/regexp_deprecated.go deleted file mode 100644 index 4b9c1b58..00000000 --- a/vendor/github.com/docker/distribution/reference/regexp_deprecated.go +++ /dev/null @@ -1,50 +0,0 @@ -package reference - -import ( - "github.com/distribution/reference" -) - -// DigestRegexp matches well-formed digests, including algorithm (e.g. "sha256:"). -// -// Deprecated: use [reference.DigestRegexp]. -var DigestRegexp = reference.DigestRegexp - -// DomainRegexp matches hostname or IP-addresses, optionally including a port -// number. It defines the structure of potential domain components that may be -// part of image names. This is purposely a subset of what is allowed by DNS to -// ensure backwards compatibility with Docker image names. It may be a subset of -// DNS domain name, an IPv4 address in decimal format, or an IPv6 address between -// square brackets (excluding zone identifiers as defined by [RFC 6874] or special -// addresses such as IPv4-Mapped). -// -// Deprecated: use [reference.DomainRegexp]. -// -// [RFC 6874]: https://www.rfc-editor.org/rfc/rfc6874. -var DomainRegexp = reference.DigestRegexp - -// IdentifierRegexp is the format for string identifier used as a -// content addressable identifier using sha256. These identifiers -// are like digests without the algorithm, since sha256 is used. -// -// Deprecated: use [reference.IdentifierRegexp]. -var IdentifierRegexp = reference.IdentifierRegexp - -// NameRegexp is the format for the name component of references, including -// an optional domain and port, but without tag or digest suffix. -// -// Deprecated: use [reference.NameRegexp]. -var NameRegexp = reference.NameRegexp - -// ReferenceRegexp is the full supported format of a reference. The regexp -// is anchored and has capturing groups for name, tag, and digest -// components. -// -// Deprecated: use [reference.ReferenceRegexp]. -var ReferenceRegexp = reference.ReferenceRegexp - -// TagRegexp matches valid tag names. From [docker/docker:graph/tags.go]. -// -// Deprecated: use [reference.TagRegexp]. -// -// [docker/docker:graph/tags.go]: https://github.com/moby/moby/blob/v1.6.0/graph/tags.go#L26-L28 -var TagRegexp = reference.TagRegexp diff --git a/vendor/github.com/docker/distribution/reference/sort_deprecated.go b/vendor/github.com/docker/distribution/reference/sort_deprecated.go deleted file mode 100644 index a73251b6..00000000 --- a/vendor/github.com/docker/distribution/reference/sort_deprecated.go +++ /dev/null @@ -1,10 +0,0 @@ -package reference - -import "github.com/distribution/reference" - -// Sort sorts string references preferring higher information references. -// -// Deprecated: use [reference.Sort]. -func Sort(references []string) []string { - return reference.Sort(references) -} diff --git a/vendor/github.com/docker/docker-credential-helpers/client/client.go b/vendor/github.com/docker/docker-credential-helpers/client/client.go index 678153cf..7ca5ab72 100644 --- a/vendor/github.com/docker/docker-credential-helpers/client/client.go +++ b/vendor/github.com/docker/docker-credential-helpers/client/client.go @@ -16,11 +16,9 @@ func isValidCredsMessage(msg string) error { if credentials.IsCredentialsMissingServerURLMessage(msg) { return credentials.NewErrCredentialsMissingServerURL() } - if credentials.IsCredentialsMissingUsernameMessage(msg) { return credentials.NewErrCredentialsMissingUsername() } - return nil } @@ -36,13 +34,10 @@ func Store(program ProgramFunc, creds *credentials.Credentials) error { out, err := cmd.Output() if err != nil { - t := strings.TrimSpace(string(out)) - - if isValidErr := isValidCredsMessage(t); isValidErr != nil { + if isValidErr := isValidCredsMessage(string(out)); isValidErr != nil { err = isValidErr } - - return fmt.Errorf("error storing credentials - err: %v, out: `%s`", err, t) + return fmt.Errorf("error storing credentials - err: %v, out: `%s`", err, strings.TrimSpace(string(out))) } return nil @@ -55,17 +50,15 @@ func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error out, err := cmd.Output() if err != nil { - t := strings.TrimSpace(string(out)) - - if credentials.IsErrCredentialsNotFoundMessage(t) { + if credentials.IsErrCredentialsNotFoundMessage(string(out)) { return nil, credentials.NewErrCredentialsNotFound() } - if isValidErr := isValidCredsMessage(t); isValidErr != nil { + if isValidErr := isValidCredsMessage(string(out)); isValidErr != nil { err = isValidErr } - return nil, fmt.Errorf("error getting credentials - err: %v, out: `%s`", err, t) + return nil, fmt.Errorf("error getting credentials - err: %v, out: `%s`", err, strings.TrimSpace(string(out))) } resp := &credentials.Credentials{ diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/error.go b/vendor/github.com/docker/docker-credential-helpers/credentials/error.go index 8fa4d5d2..2283d5a4 100644 --- a/vendor/github.com/docker/docker-credential-helpers/credentials/error.go +++ b/vendor/github.com/docker/docker-credential-helpers/credentials/error.go @@ -1,6 +1,9 @@ package credentials -import "errors" +import ( + "errors" + "strings" +) const ( // ErrCredentialsNotFound standardizes the not found error, so every helper returns @@ -47,7 +50,7 @@ func IsErrCredentialsNotFound(err error) bool { // This function helps to check messages returned by an // external program via its standard output. func IsErrCredentialsNotFoundMessage(err string) bool { - return err == errCredentialsNotFoundMessage + return strings.TrimSpace(err) == errCredentialsNotFoundMessage } // errCredentialsMissingServerURL represents an error raised @@ -104,7 +107,7 @@ func IsCredentialsMissingServerURL(err error) bool { // IsCredentialsMissingServerURLMessage checks for an // errCredentialsMissingServerURL in the error message. func IsCredentialsMissingServerURLMessage(err string) bool { - return err == errCredentialsMissingServerURLMessage + return strings.TrimSpace(err) == errCredentialsMissingServerURLMessage } // IsCredentialsMissingUsername returns true if the error @@ -117,5 +120,5 @@ func IsCredentialsMissingUsername(err error) bool { // IsCredentialsMissingUsernameMessage checks for an // errCredentialsMissingUsername in the error message. func IsCredentialsMissingUsernameMessage(err string) bool { - return err == errCredentialsMissingUsernameMessage + return strings.TrimSpace(err) == errCredentialsMissingUsernameMessage } diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS index b3141819..48d04f9a 100644 --- a/vendor/github.com/docker/docker/AUTHORS +++ b/vendor/github.com/docker/docker/AUTHORS @@ -27,6 +27,7 @@ Adam Miller Adam Mills Adam Pointer Adam Singer +Adam Thornton Adam Walz Adam Williams AdamKorcz @@ -173,6 +174,7 @@ Andy Rothfusz Andy Smith Andy Wilson Andy Zhang +Aneesh Kulkarni Anes Hasicic Angel Velazquez Anil Belur @@ -236,6 +238,7 @@ Ben Golub Ben Gould Ben Hall Ben Langfeld +Ben Lovy Ben Sargent Ben Severson Ben Toews @@ -262,7 +265,7 @@ Billy Ridgway Bily Zhang Bin Liu Bingshen Wang -Bjorn Neergaard +Bjorn Neergaard Blake Geno Boaz Shuster bobby abbott @@ -279,6 +282,7 @@ Brandon Liu Brandon Philips Brandon Rhodes Brendan Dixon +Brennan Kinney <5098581+polarathene@users.noreply.github.com> Brent Salisbury Brett Higgins Brett Kochendorfer @@ -363,6 +367,7 @@ chenyuzhu Chetan Birajdar Chewey Chia-liang Kao +Chiranjeevi Tirunagari chli Cholerae Hu Chris Alfonso @@ -433,8 +438,8 @@ Cristian Staretu cristiano balducci Cristina Yenyxe Gonzalez Garcia Cruceru Calin-Cristian +cui fliter CUI Wei -cuishuang Cuong Manh Le Cyprian Gracz Cyril F @@ -513,6 +518,7 @@ David Dooling David Gageot David Gebler David Glasser +David Karlsson <35727626+dvdksn@users.noreply.github.com> David Lawrence David Lechner David M. Karr @@ -602,6 +608,7 @@ Donald Huang Dong Chen Donghwa Kim Donovan Jones +Dorin Geman Doron Podoleanu Doug Davis Doug MacEachern @@ -636,6 +643,7 @@ Emily Rose Emir Ozer Eng Zer Jun Enguerran +Enrico Weigelt, metux IT consult Eohyung Lee epeterso er0k @@ -676,6 +684,7 @@ Evan Allrich Evan Carmi Evan Hazlett Evan Krall +Evan Lezar Evan Phoenix Evan Wies Evelyn Xu @@ -744,6 +753,7 @@ Frank Groeneveld Frank Herrmann Frank Macreery Frank Rosquin +Frank Villaro-Dixon Frank Yang Fred Lifton Frederick F. Kautz IV @@ -983,6 +993,7 @@ Jean Rouge Jean-Baptiste Barth Jean-Baptiste Dalido Jean-Christophe Berthon +Jean-Michel Rouet Jean-Paul Calderone Jean-Pierre Huynh Jean-Tiare Le Bigot @@ -1013,6 +1024,7 @@ Jeroen Jacobs Jesse Dearing Jesse Dubay Jessica Frazelle +Jeyanthinath Muthuram Jezeniel Zapanta Jhon Honce Ji.Zhilong @@ -1141,6 +1153,7 @@ junxu Jussi Nummelin Justas Brazauskas Justen Martin +Justin Chadwell Justin Cormack Justin Force Justin Keller <85903732+jk-vb@users.noreply.github.com> @@ -1183,6 +1196,7 @@ Ke Xu Kei Ohmura Keith Hudgins Keli Hu +Ken Bannister Ken Cochrane Ken Herner Ken ICHIKAWA @@ -1192,7 +1206,7 @@ Kenjiro Nakayama Kent Johnson Kenta Tada Kevin "qwazerty" Houdebert -Kevin Alvarez +Kevin Alvarez Kevin Burke Kevin Clark Kevin Feyrer @@ -1225,6 +1239,7 @@ Konstantin Gribov Konstantin L Konstantin Pelykh Kostadin Plachkov +kpcyrd Krasi Georgiev Krasimir Georgiev Kris-Mikael Krister @@ -1306,6 +1321,7 @@ Lorenzo Fontana Lotus Fenn Louis Delossantos Louis Opter +Luboslav Pivarc Luca Favatella Luca Marturana Luca Orlandi @@ -1344,6 +1360,7 @@ Manuel Meurer Manuel Rüger Manuel Woelker mapk0y +Marat Radchenko Marc Abramowitz Marc Kuo Marc Tamsky @@ -1383,6 +1400,7 @@ Martijn van Oosterhout Martin Braun Martin Dojcak Martin Honermeyer +Martin Jirku Martin Kelly Martin Mosegaard Amdisen Martin Muzatko @@ -1461,6 +1479,7 @@ Michael Holzheu Michael Hudson-Doyle Michael Huettermann Michael Irwin +Michael Kebe Michael Kuehn Michael Käufl Michael Neale @@ -1509,10 +1528,11 @@ Mike Lundy Mike MacCana Mike Naberezny Mike Snitzer +Mike Sul mikelinjie <294893458@qq.com> Mikhail Sobolev Miklos Szegedi -Milas Bowman +Milas Bowman Milind Chawre Miloslav Trmač mingqing @@ -1524,6 +1544,7 @@ mlarcher Mohammad Banikazemi Mohammad Nasirifar Mohammed Aaqib Ansari +Mohd Sadiq Mohit Soni Moorthy RS Morgan Bauer @@ -1606,6 +1627,7 @@ Noah Treuhaft NobodyOnSE noducks Nolan Darilek +Nolan Miles Noriki Nakamura nponeccop Nurahmadie @@ -1661,6 +1683,7 @@ Paul Lietar Paul Liljenberg Paul Morie Paul Nasrat +Paul Seiffert Paul Weaver Paulo Gomes Paulo Ribeiro @@ -1674,6 +1697,7 @@ Pavlos Ratis Pavol Vargovcik Pawel Konczalski Paweł Gronowski +payall4u Peeyush Gupta Peggy Li Pei Su @@ -1703,7 +1727,9 @@ Phil Estes Phil Sphicas Phil Spitler Philip Alexander Etling +Philip K. Warren Philip Monroe +Philipp Fruck Philipp Gillé Philipp Wahala Philipp Weissensteiner @@ -1741,6 +1767,7 @@ Quentin Brossard Quentin Perez Quentin Tayssier r0n22 +Rachit Sharma Radostin Stoyanov Rafal Jeczalik Rafe Colton @@ -1773,6 +1800,7 @@ Rich Horwood Rich Moyse Rich Seymour Richard Burnison +Richard Hansen Richard Harvey Richard Mathie Richard Metzler @@ -1788,6 +1816,7 @@ Ritesh H Shukla Riyaz Faizullabhoy Rob Cowsill <42620235+rcowsill@users.noreply.github.com> Rob Gulewich +Rob Murray Rob Vesse Robert Bachmann Robert Bittle @@ -1869,6 +1898,7 @@ ryancooper7 RyanDeng Ryo Nakao Ryoga Saito +Régis Behmo Rémy Greinhofer s. rannou Sabin Basyal @@ -1885,6 +1915,7 @@ Sam J Sharpe Sam Neirinck Sam Reis Sam Rijs +Sam Thibault Sam Whited Sambuddha Basu Sami Wagiaalla @@ -1908,6 +1939,7 @@ Satoshi Tagomori Scott Bessler Scott Collier Scott Johnston +Scott Moser Scott Percival Scott Stamp Scott Walls @@ -1923,6 +1955,7 @@ Sebastiaan van Steenis Sebastiaan van Stijn Sebastian Höffner Sebastian Radloff +Sebastian Thomschke Sebastien Goasguen Senthil Kumar Selvaraj Senthil Kumaran @@ -1996,6 +2029,7 @@ Stanislav Bondarenko Stanislav Levin Steeve Morin Stefan Berger +Stefan Gehrig Stefan J. Wernli Stefan Praszalowicz Stefan S. @@ -2003,6 +2037,7 @@ Stefan Scherer Stefan Staudenmeyer Stefan Weil Steffen Butzer +Stephan Henningsen Stephan Spindler Stephen Benjamin Stephen Crosby @@ -2204,6 +2239,7 @@ Vinod Kulkarni Vishal Doshi Vishnu Kannan Vitaly Ostrosablin +Vitor Anjos Vitor Monteiro Vivek Agarwal Vivek Dasgupta @@ -2250,6 +2286,7 @@ Wenxuan Zhao Wenyu You <21551128@zju.edu.cn> Wenzhi Liang Wes Morgan +Wesley Pettit Wewang Xiaorenfine Wiktor Kwapisiewicz Will Dietz @@ -2289,7 +2326,7 @@ xiekeyang Ximo Guanter Gonzálbez xin.li Xinbo Weng -Xinfeng Liu +Xinfeng Liu Xinzi Zhou Xiuming Chen Xuecong Liao @@ -2355,6 +2392,7 @@ Zen Lin(Zhinan Lin) Zhang Kun Zhang Wei Zhang Wentao +zhangguanzhang ZhangHang zhangxianwei Zhenan Ye <21551168@zju.edu.cn> @@ -2381,6 +2419,7 @@ Zuhayr Elahi Zunayed Ali Álvaro Lázaro Átila Camurça Alves +吴小白 <296015668@qq.com> 尹吉峰 屈骏 徐俊杰 diff --git a/vendor/github.com/docker/docker/api/README.md b/vendor/github.com/docker/docker/api/README.md index f136c343..381f1988 100644 --- a/vendor/github.com/docker/docker/api/README.md +++ b/vendor/github.com/docker/docker/api/README.md @@ -37,6 +37,6 @@ There is hopefully enough example material in the file for you to copy a similar When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly. -Run `make swagger-docs` and a preview will be running at `http://localhost`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation. +Run `make swagger-docs` and a preview will be running at `http://localhost:9000`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation. The production documentation is generated by vendoring `swagger.yaml` into [docker/docker.github.io](https://github.com/docker/docker.github.io). diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go index cba66bc4..37e553d4 100644 --- a/vendor/github.com/docker/docker/api/common.go +++ b/vendor/github.com/docker/docker/api/common.go @@ -3,7 +3,7 @@ package api // import "github.com/docker/docker/api" // Common constants for daemon and client. const ( // DefaultVersion of Current REST API - DefaultVersion = "1.43" + DefaultVersion = "1.44" // NoBaseImageSpecifier is the symbol used by the FROM // command to specify that no base image is to be used. diff --git a/vendor/github.com/docker/docker/api/common_unix.go b/vendor/github.com/docker/docker/api/common_unix.go deleted file mode 100644 index 19fc63d6..00000000 --- a/vendor/github.com/docker/docker/api/common_unix.go +++ /dev/null @@ -1,7 +0,0 @@ -//go:build !windows -// +build !windows - -package api // import "github.com/docker/docker/api" - -// MinVersion represents Minimum REST API version supported -const MinVersion = "1.12" diff --git a/vendor/github.com/docker/docker/api/common_windows.go b/vendor/github.com/docker/docker/api/common_windows.go deleted file mode 100644 index 590ba547..00000000 --- a/vendor/github.com/docker/docker/api/common_windows.go +++ /dev/null @@ -1,8 +0,0 @@ -package api // import "github.com/docker/docker/api" - -// MinVersion represents Minimum REST API version supported -// Technically the first daemon API version released on Windows is v1.25 in -// engine version 1.13. However, some clients are explicitly using downlevel -// APIs (e.g. docker-compose v2.1 file format) and that is just too restrictive. -// Hence also allowing 1.24 on Windows. -const MinVersion string = "1.24" diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml index 7635b9f6..e55a76fc 100644 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -19,10 +19,10 @@ produces: consumes: - "application/json" - "text/plain" -basePath: "/v1.43" +basePath: "/v1.44" info: title: "Docker Engine API" - version: "1.43" + version: "1.44" x-logo: url: "https://docs.docker.com/assets/images/logo-docker-main.png" description: | @@ -55,8 +55,8 @@ info: the URL is not supported by the daemon, a HTTP `400 Bad Request` error message is returned. - If you omit the version-prefix, the current version of the API (v1.43) is used. - For example, calling `/info` is the same as calling `/v1.43/info`. Using the + If you omit the version-prefix, the current version of the API (v1.44) is used. + For example, calling `/info` is the same as calling `/v1.44/info`. Using the API without a version-prefix is deprecated and will be removed in a future release. Engine releases in the near future should support this version of the API, @@ -388,6 +388,16 @@ definitions: description: "Create mount point on host if missing" type: "boolean" default: false + ReadOnlyNonRecursive: + description: | + Make the mount non-recursively read-only, but still leave the mount recursive + (unless NonRecursive is set to true in conjunction). + type: "boolean" + default: false + ReadOnlyForceRecursive: + description: "Raise an error if the mount cannot be made recursively read-only." + type: "boolean" + default: false VolumeOptions: description: "Optional configuration for the `volume` type." type: "object" @@ -794,6 +804,12 @@ definitions: 1000000 (1 ms). 0 means inherit. type: "integer" format: "int64" + StartInterval: + description: | + The time to wait between checks in nanoseconds during the start period. + It should be 0 or at least 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" Health: description: | @@ -1297,7 +1313,10 @@ definitions: type: "boolean" x-nullable: true MacAddress: - description: "MAC address of the container." + description: | + MAC address of the container. + + Deprecated: this field is deprecated in API v1.44 and up. Use EndpointSettings.MacAddress instead. type: "string" x-nullable: true OnBuild: @@ -1347,16 +1366,16 @@ definitions: EndpointsConfig: description: | A mapping of network name to endpoint configuration for that network. + The endpoint configuration can be left empty to connect to that + network with no particular endpoint configuration. type: "object" additionalProperties: $ref: "#/definitions/EndpointSettings" example: # putting an example here, instead of using the example values from - # /definitions/EndpointSettings, because containers/create currently - # does not support attaching to multiple networks, so the example request - # would be confusing if it showed that multiple networks can be contained - # in the EndpointsConfig. - # TODO remove once we support multiple networks on container create (see https://github.com/moby/moby/blob/07e6b843594e061f82baa5fa23c2ff7d536c2a05/daemon/create.go#L323) + # /definitions/EndpointSettings, because EndpointSettings contains + # operational data returned when inspecting a container that we don't + # accept here. EndpointsConfig: isolated_nw: IPAMConfig: @@ -1365,19 +1384,22 @@ definitions: LinkLocalIPs: - "169.254.34.68" - "fe80::3468" + MacAddress: "02:42:ac:12:05:02" Links: - "container_1" - "container_2" Aliases: - "server_x" - "server_y" + database_nw: {} NetworkSettings: description: "NetworkSettings exposes the network settings in the API" type: "object" properties: Bridge: - description: Name of the network's bridge (for example, `docker0`). + description: | + Name of the default bridge interface when dockerd's --bridge flag is set. type: "string" example: "docker0" SandboxID: @@ -1387,34 +1409,40 @@ definitions: HairpinMode: description: | Indicates if hairpin NAT should be enabled on the virtual interface. + + Deprecated: This field is never set and will be removed in a future release. type: "boolean" example: false LinkLocalIPv6Address: - description: IPv6 unicast address using the link-local prefix. + description: | + IPv6 unicast address using the link-local prefix. + + Deprecated: This field is never set and will be removed in a future release. type: "string" - example: "fe80::42:acff:fe11:1" + example: "" LinkLocalIPv6PrefixLen: - description: Prefix length of the IPv6 unicast address. + description: | + Prefix length of the IPv6 unicast address. + + Deprecated: This field is never set and will be removed in a future release. type: "integer" - example: "64" + example: "" Ports: $ref: "#/definitions/PortMap" SandboxKey: - description: SandboxKey identifies the sandbox + description: SandboxKey is the full path of the netns handle type: "string" example: "/var/run/docker/netns/8ab54b426c38" - # TODO is SecondaryIPAddresses actually used? SecondaryIPAddresses: - description: "" + description: "Deprecated: This field is never set and will be removed in a future release." type: "array" items: $ref: "#/definitions/Address" x-nullable: true - # TODO is SecondaryIPv6Addresses actually used? SecondaryIPv6Addresses: - description: "" + description: "Deprecated: This field is never set and will be removed in a future release." type: "array" items: $ref: "#/definitions/Address" @@ -1723,10 +1751,15 @@ definitions: The ID of the container that was used to create the image. Depending on how the image was created, this field may be empty. + + **Deprecated**: this field is kept for backward compatibility, but + will be removed in API v1.45. type: "string" - x-nullable: false example: "65974bc86f1770ae4bff79f651ebdbce166ae9aada632ee3fa9af3a264911735" ContainerConfig: + description: | + **Deprecated**: this field is kept for backward compatibility, but + will be removed in API v1.45. $ref: "#/definitions/ContainerConfig" DockerVersion: description: | @@ -1781,13 +1814,7 @@ definitions: description: | Total size of the image including all layers it is composed of. - In versions of Docker before v1.10, this field was calculated from - the image itself and all of its parent images. Images are now stored - self-contained, and no longer use a parent-chain, making this field - an equivalent of the Size field. - - > **Deprecated**: this field is kept for backward compatibility, but - > will be removed in API v1.44. + Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. type: "integer" format: "int64" example: 1239828 @@ -1829,6 +1856,7 @@ definitions: x-nullable: true ImageSummary: type: "object" + x-go-name: "Summary" required: - Id - ParentId @@ -1925,12 +1953,7 @@ definitions: description: |- Total size of the image including all layers it is composed of. - In versions of Docker before v1.10, this field was calculated from - the image itself and all of its parent images. Images are now stored - self-contained, and no longer use a parent-chain, making this field - an equivalent of the Size field. - - Deprecated: this field is kept for backward compatibility, and will be removed in API v1.44. + Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. type: "integer" format: "int64" example: 172064416 @@ -2448,6 +2471,11 @@ definitions: example: - "container_1" - "container_2" + MacAddress: + description: | + MAC address for the endpoint on this network. The network driver might ignore this parameter. + type: "string" + example: "02:42:ac:11:00:04" Aliases: type: "array" items: @@ -2498,11 +2526,6 @@ definitions: type: "integer" format: "int64" example: 64 - MacAddress: - description: | - MAC address for the endpoint on this network. - type: "string" - example: "02:42:ac:11:00:04" DriverOpts: description: | DriverOpts is a mapping of driver options and values. These options @@ -2514,6 +2537,21 @@ definitions: example: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" + DNSNames: + description: | + List of all DNS names an endpoint has on a specific network. This + list is based on the container name, network aliases, container short + ID, and hostname. + + These DNS names are non-fully qualified but can contain several dots. + You can get fully qualified DNS names by appending `.`. + For instance, if container name is `my.ctr` and the network is named + `testnet`, `DNSNames` will contain `my.ctr` and the FQDN will be + `my.ctr.testnet`. + type: array + items: + type: string + example: ["foobar", "server_x", "server_y", "my.ctr"] EndpointIPAMConfig: description: | @@ -3011,8 +3049,6 @@ definitions: Name: "journald" - Type: "Log" Name: "json-file" - - Type: "Log" - Name: "logentries" - Type: "Log" Name: "splunk" - Type: "Log" @@ -3547,6 +3583,32 @@ definitions: Level: type: "string" description: "SELinux level label" + Seccomp: + type: "object" + description: "Options for configuring seccomp on the container" + properties: + Mode: + type: "string" + enum: + - "default" + - "unconfined" + - "custom" + Profile: + description: "The custom seccomp profile as a json object" + type: "string" + AppArmor: + type: "object" + description: "Options for configuring AppArmor on the container" + properties: + Mode: + type: "string" + enum: + - "default" + - "disabled" + NoNewPrivileges: + type: "boolean" + description: "Configuration of the no_new_privs bit in the container" + TTY: description: "Whether a pseudo-TTY should be allocated." type: "boolean" @@ -3941,6 +4003,44 @@ definitions: - "remove" - "orphaned" + ContainerStatus: + type: "object" + description: "represents the status of a container." + properties: + ContainerID: + type: "string" + PID: + type: "integer" + ExitCode: + type: "integer" + + PortStatus: + type: "object" + description: "represents the port status of a task's host ports whose service has published host ports" + properties: + Ports: + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + + TaskStatus: + type: "object" + description: "represents the status of a task." + properties: + Timestamp: + type: "string" + format: "dateTime" + State: + $ref: "#/definitions/TaskState" + Message: + type: "string" + Err: + type: "string" + ContainerStatus: + $ref: "#/definitions/ContainerStatus" + PortStatus: + $ref: "#/definitions/PortStatus" + Task: type: "object" properties: @@ -3976,26 +4076,7 @@ definitions: AssignedGenericResources: $ref: "#/definitions/GenericResources" Status: - type: "object" - properties: - Timestamp: - type: "string" - format: "dateTime" - State: - $ref: "#/definitions/TaskState" - Message: - type: "string" - Err: - type: "string" - ContainerStatus: - type: "object" - properties: - ContainerID: - type: "string" - PID: - type: "integer" - ExitCode: - type: "integer" + $ref: "#/definitions/TaskStatus" DesiredState: $ref: "#/definitions/TaskState" JobIteration: @@ -4211,7 +4292,10 @@ definitions: - "stop-first" - "start-first" Networks: - description: "Specifies which networks the service should attach to." + description: | + Specifies which networks the service should attach to. + + Deprecated: This field is deprecated since v1.44. The Networks field in TaskSpec should be used instead. type: "array" items: $ref: "#/definitions/NetworkAttachmentConfig" @@ -4445,6 +4529,7 @@ definitions: ImageDeleteResponseItem: type: "object" + x-go-name: "DeleteResponse" properties: Untagged: description: "The image ID of an image that was untagged" @@ -4453,6 +4538,29 @@ definitions: description: "The image ID of an image that was deleted" type: "string" + ServiceCreateResponse: + type: "object" + description: | + contains the information returned to a client on the + creation of a new service. + properties: + ID: + description: "The ID of the created service." + type: "string" + x-nullable: false + example: "ak7w3gjqoa3kuz8xcpnyy0pvl" + Warnings: + description: | + Optional warning message. + + FIXME(thaJeztah): this should have "omitempty" in the generated type. + type: "array" + x-nullable: true + items: + type: "string" + example: + - "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + ServiceUpdateResponse: type: "object" properties: @@ -4462,7 +4570,8 @@ definitions: items: type: "string" example: - Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + Warnings: + - "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" ContainerSummary: type: "object" @@ -5296,7 +5405,25 @@ definitions: - "WARNING: No memory limit support" - "WARNING: bridge-nf-call-iptables is disabled" - "WARNING: bridge-nf-call-ip6tables is disabled" + CDISpecDirs: + description: | + List of directories where (Container Device Interface) CDI + specifications are located. + These specifications define vendor-specific modifications to an OCI + runtime specification for a container being created. + + An empty list indicates that CDI device injection is disabled. + + Note that since using CDI device injection requires the daemon to have + experimental enabled. For non-experimental daemons an empty list will + always be returned. + type: "array" + items: + type: "string" + example: + - "/etc/cdi" + - "/var/run/cdi" # PluginsInfo is a temp struct holding Plugins name # registered with docker daemon. It is used by Info struct @@ -5334,7 +5461,7 @@ definitions: type: "array" items: type: "string" - example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "logentries", "splunk", "syslog"] + example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "splunk", "syslog"] RegistryServiceConfig: @@ -5532,6 +5659,28 @@ definitions: items: type: "string" example: ["--debug", "--systemd-cgroup=false"] + status: + description: | + Information specific to the runtime. + + While this API specification does not define data provided by runtimes, + the following well-known properties may be provided by runtimes: + + `org.opencontainers.runtime-spec.features`: features structure as defined + in the [OCI Runtime Specification](https://github.com/opencontainers/runtime-spec/blob/main/features.md), + in a JSON string representation. + +


+ + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + "org.opencontainers.runtime-spec.features": "{\"ociVersionMin\":\"1.0.0\",\"ociVersionMax\":\"1.1.0\",\"...\":\"...\"}" Commit: description: | @@ -6416,6 +6565,7 @@ paths: Aliases: - "server_x" - "server_y" + database_nw: {} required: true responses: @@ -6563,7 +6713,7 @@ paths: StopSignal: "SIGTERM" StopTimeout: 10 Created: "2015-01-06T15:47:31.485331387Z" - Driver: "devicemapper" + Driver: "overlay2" ExecIDs: - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" @@ -7994,6 +8144,7 @@ paths: - `label=key` or `label="key=value"` of an image label - `reference`=(`[:]`) - `since`=(`[:]`, `` or ``) + - `until=` type: "string" - name: "shared-size" in: "query" @@ -8176,6 +8327,16 @@ paths: description: "BuildKit output configuration" type: "string" default: "" + - name: "version" + in: "query" + type: "string" + default: "1" + enum: ["1", "2"] + description: | + Version of the builder backend to use. + + - `1` is the first generation classic (deprecated) builder in the Docker daemon (default) + - `2` is [BuildKit](https://github.com/moby/buildkit) responses: 200: description: "no error" @@ -8245,7 +8406,7 @@ paths: /images/create: post: summary: "Create an image" - description: "Create an image by either pulling it from a registry or importing it." + description: "Pull or import an image." operationId: "ImageCreate" consumes: - "text/plain" @@ -8596,28 +8757,36 @@ paths: is_official: type: "boolean" is_automated: + description: | + Whether this repository has automated builds enabled. + +


+ + > **Deprecated**: This field is deprecated and will always + > be "false" in future. type: "boolean" + example: false name: type: "string" star_count: type: "integer" examples: application/json: - - description: "" - is_official: false + - description: "A minimal Docker image based on Alpine Linux with a complete package index and only 5 MB in size!" + is_official: true is_automated: false - name: "wma55/u1210sshd" - star_count: 0 - - description: "" - is_official: false + name: "alpine" + star_count: 10093 + - description: "Busybox base image." + is_official: true is_automated: false - name: "jdswinbank/sshd" - star_count: 0 - - description: "" - is_official: false + name: "Busybox base image." + star_count: 3037 + - description: "The PostgreSQL object-relational database system provides reliability and data integrity." + is_official: true is_automated: false - name: "vgauthier/sshd" - star_count: 0 + name: "postgres" + star_count: 12408 500: description: "Server error" schema: @@ -8637,9 +8806,13 @@ paths: description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: - - `is-automated=(true|false)` + - `is-automated=(true|false)` (deprecated, see below) - `is-official=(true|false)` - `stars=` Matches images that has at least 'number' stars. + + The `is-automated` filter is deprecated. The `is_automated` field has + been deprecated by Docker Hub's search API. Consequently, searching + for `is-automated=true` will yield no results. type: "string" tags: ["Image"] /images/prune: @@ -9032,7 +9205,6 @@ paths: Created: 1466724217 Size: 1092588 SharedSize: 0 - VirtualSize: 1092588 Labels: {} Containers: 1 Containers: @@ -9895,6 +10067,10 @@ paths: example: Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" Warning: "" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" 403: description: | Forbidden operation. This happens when trying to create a network named after a pre-defined network, @@ -9924,13 +10100,7 @@ paths: type: "string" CheckDuplicate: description: | - Check for networks with duplicate names. Since Network is - primarily keyed based on a random ID and not on the name, and - network name is strictly a user-friendly alias to the network - which is uniquely identified using ID, there is no guaranteed - way to check for duplicates. CheckDuplicate is there to provide - a best effort checking of any networks which has the same name - but it is not guaranteed to catch all name collisions. + Deprecated: CheckDuplicate is now always enabled. type: "boolean" Driver: description: "Name of the network driver plugin to use." @@ -9998,14 +10168,19 @@ paths: /networks/{id}/connect: post: summary: "Connect a container to a network" + description: "The network must be either a local-scoped network or a swarm-scoped network with the `attachable` option set. A network cannot be re-attached to a running container" operationId: "NetworkConnect" consumes: - "application/json" responses: 200: description: "No error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" 403: - description: "Operation not supported for swarm scoped networks" + description: "Operation forbidden" schema: $ref: "#/definitions/ErrorResponse" 404: @@ -10040,6 +10215,7 @@ paths: IPAMConfig: IPv4Address: "172.24.56.89" IPv6Address: "2001:db8::5689" + MacAddress: "02:42:ac:12:05:02" tags: ["Network"] /networks/{id}/disconnect: @@ -11033,18 +11209,7 @@ paths: 201: description: "no error" schema: - type: "object" - title: "ServiceCreateResponse" - properties: - ID: - description: "The ID of the created service." - type: "string" - Warning: - description: "Optional warning message" - type: "string" - example: - ID: "ak7w3gjqoa3kuz8xcpnyy0pvl" - Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + $ref: "#/definitions/ServiceCreateResponse" 400: description: "bad parameter" schema: diff --git a/vendor/github.com/docker/docker/api/types/auth.go b/vendor/github.com/docker/docker/api/types/auth.go deleted file mode 100644 index 9ee329a2..00000000 --- a/vendor/github.com/docker/docker/api/types/auth.go +++ /dev/null @@ -1,7 +0,0 @@ -package types // import "github.com/docker/docker/api/types" -import "github.com/docker/docker/api/types/registry" - -// AuthConfig contains authorization information for connecting to a Registry. -// -// Deprecated: use github.com/docker/docker/api/types/registry.AuthConfig -type AuthConfig = registry.AuthConfig diff --git a/vendor/github.com/docker/docker/api/types/checkpoint/list.go b/vendor/github.com/docker/docker/api/types/checkpoint/list.go new file mode 100644 index 00000000..94a9c0a4 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/checkpoint/list.go @@ -0,0 +1,7 @@ +package checkpoint + +// Summary represents the details of a checkpoint when listing endpoints. +type Summary struct { + // Name is the name of the checkpoint. + Name string +} diff --git a/vendor/github.com/docker/docker/api/types/checkpoint/options.go b/vendor/github.com/docker/docker/api/types/checkpoint/options.go new file mode 100644 index 00000000..9477458c --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/checkpoint/options.go @@ -0,0 +1,19 @@ +package checkpoint + +// CreateOptions holds parameters to create a checkpoint from a container. +type CreateOptions struct { + CheckpointID string + CheckpointDir string + Exit bool +} + +// ListOptions holds parameters to list checkpoints for a container. +type ListOptions struct { + CheckpointDir string +} + +// DeleteOptions holds parameters to delete a checkpoint from a container. +type DeleteOptions struct { + CheckpointID string + CheckpointDir string +} diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go index d8cd3061..24b00a27 100644 --- a/vendor/github.com/docker/docker/api/types/client.go +++ b/vendor/github.com/docker/docker/api/types/client.go @@ -11,44 +11,6 @@ import ( units "github.com/docker/go-units" ) -// CheckpointCreateOptions holds parameters to create a checkpoint from a container -type CheckpointCreateOptions struct { - CheckpointID string - CheckpointDir string - Exit bool -} - -// CheckpointListOptions holds parameters to list checkpoints for a container -type CheckpointListOptions struct { - CheckpointDir string -} - -// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container -type CheckpointDeleteOptions struct { - CheckpointID string - CheckpointDir string -} - -// ContainerAttachOptions holds parameters to attach to a container. -type ContainerAttachOptions struct { - Stream bool - Stdin bool - Stdout bool - Stderr bool - DetachKeys string - Logs bool -} - -// ContainerCommitOptions holds parameters to commit changes into a container. -type ContainerCommitOptions struct { - Reference string - Comment string - Author string - Changes []string - Pause bool - Config *container.Config -} - // ContainerExecInspect holds information returned by exec inspect. type ContainerExecInspect struct { ExecID string `json:"ID"` @@ -58,42 +20,6 @@ type ContainerExecInspect struct { Pid int } -// ContainerListOptions holds parameters to list containers with. -type ContainerListOptions struct { - Size bool - All bool - Latest bool - Since string - Before string - Limit int - Filters filters.Args -} - -// ContainerLogsOptions holds parameters to filter logs with. -type ContainerLogsOptions struct { - ShowStdout bool - ShowStderr bool - Since string - Until string - Timestamps bool - Follow bool - Tail string - Details bool -} - -// ContainerRemoveOptions holds parameters to remove containers. -type ContainerRemoveOptions struct { - RemoveVolumes bool - RemoveLinks bool - Force bool -} - -// ContainerStartOptions holds parameters to start containers. -type ContainerStartOptions struct { - CheckpointID string - CheckpointDir string -} - // CopyToContainerOptions holds information // about files to copy into a container type CopyToContainerOptions struct { @@ -307,14 +233,6 @@ type ImageSearchOptions struct { Limit int } -// ResizeOptions holds parameters to resize a tty. -// It can be used to resize container ttys and -// exec process ttys too. -type ResizeOptions struct { - Height uint - Width uint -} - // NodeListOptions holds parameters to list nodes with. type NodeListOptions struct { Filters filters.Args @@ -340,15 +258,6 @@ type ServiceCreateOptions struct { QueryRegistry bool } -// ServiceCreateResponse contains the information returned to a client -// on the creation of a new service. -type ServiceCreateResponse struct { - // ID is the ID of the created service. - ID string - // Warnings is a set of non-fatal warning messages to pass on to the user. - Warnings []string `json:",omitempty"` -} - // Values for RegistryAuthFrom in ServiceUpdateOptions const ( RegistryAuthFromSpec = "spec" diff --git a/vendor/github.com/docker/docker/api/types/configs.go b/vendor/github.com/docker/docker/api/types/configs.go index 7d5930bb..945b6efa 100644 --- a/vendor/github.com/docker/docker/api/types/configs.go +++ b/vendor/github.com/docker/docker/api/types/configs.go @@ -1,32 +1,5 @@ package types // import "github.com/docker/docker/api/types" -import ( - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/network" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -// configs holds structs used for internal communication between the -// frontend (such as an http server) and the backend (such as the -// docker daemon). - -// ContainerCreateConfig is the parameter set to ContainerCreate() -type ContainerCreateConfig struct { - Name string - Config *container.Config - HostConfig *container.HostConfig - NetworkingConfig *network.NetworkingConfig - Platform *ocispec.Platform - AdjustCPUShares bool -} - -// ContainerRmConfig holds arguments for the container remove -// operation. This struct is used to tell the backend what operations -// to perform. -type ContainerRmConfig struct { - ForceRemove, RemoveVolume, RemoveLink bool -} - // ExecConfig is a small subset of the Config struct that holds the configuration // for the exec feature of docker. type ExecConfig struct { @@ -43,25 +16,3 @@ type ExecConfig struct { WorkingDir string // Working directory Cmd []string // Execution commands and args } - -// PluginRmConfig holds arguments for plugin remove. -type PluginRmConfig struct { - ForceRemove bool -} - -// PluginEnableConfig holds arguments for plugin enable -type PluginEnableConfig struct { - Timeout int -} - -// PluginDisableConfig holds arguments for plugin disable. -type PluginDisableConfig struct { - ForceDisable bool -} - -// NetworkListConfig stores the options available for listing networks -type NetworkListConfig struct { - // TODO(@cpuguy83): naming is hard, this is pulled from what was being used in the router before moving here - Detailed bool - Verbose bool -} diff --git a/vendor/github.com/docker/docker/api/types/container/change_response_deprecated.go b/vendor/github.com/docker/docker/api/types/container/change_response_deprecated.go deleted file mode 100644 index 6b4b4739..00000000 --- a/vendor/github.com/docker/docker/api/types/container/change_response_deprecated.go +++ /dev/null @@ -1,6 +0,0 @@ -package container - -// ContainerChangeResponseItem change item in response to ContainerChanges operation -// -// Deprecated: use [FilesystemChange]. -type ContainerChangeResponseItem = FilesystemChange diff --git a/vendor/github.com/docker/docker/api/types/container/config.go b/vendor/github.com/docker/docker/api/types/container/config.go index 077583e6..be41d631 100644 --- a/vendor/github.com/docker/docker/api/types/container/config.go +++ b/vendor/github.com/docker/docker/api/types/container/config.go @@ -5,6 +5,7 @@ import ( "time" "github.com/docker/docker/api/types/strslice" + dockerspec "github.com/docker/docker/image/spec/specs-go/v1" "github.com/docker/go-connections/nat" ) @@ -33,25 +34,7 @@ type StopOptions struct { } // HealthConfig holds configuration settings for the HEALTHCHECK feature. -type HealthConfig struct { - // Test is the test to perform to check that the container is healthy. - // An empty slice means to inherit the default. - // The options are: - // {} : inherit healthcheck - // {"NONE"} : disable healthcheck - // {"CMD", args...} : exec arguments directly - // {"CMD-SHELL", command} : run command with system's default shell - Test []string `json:",omitempty"` - - // Zero means to inherit. Durations are expressed as integer nanoseconds. - Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. - Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. - StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. - - // Retries is the number of consecutive failures needed to consider a container as unhealthy. - // Zero means inherit. - Retries int `json:",omitempty"` -} +type HealthConfig = dockerspec.HealthcheckConfig // ExecStartOptions holds the options to start container's exec. type ExecStartOptions struct { @@ -87,10 +70,13 @@ type Config struct { WorkingDir string // Current directory (PWD) in the command will be launched Entrypoint strslice.StrSlice // Entrypoint to run when starting the container NetworkDisabled bool `json:",omitempty"` // Is network disabled - MacAddress string `json:",omitempty"` // Mac Address of the container - OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile - Labels map[string]string // List of labels set to this container - StopSignal string `json:",omitempty"` // Signal to stop a container - StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container - Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT + // Mac Address of the container. + // + // Deprecated: this field is deprecated since API v1.44. Use EndpointSettings.MacAddress instead. + MacAddress string `json:",omitempty"` + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile + Labels map[string]string // List of labels set to this container + StopSignal string `json:",omitempty"` // Signal to stop a container + StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container + Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT } diff --git a/vendor/github.com/docker/docker/api/types/container/errors.go b/vendor/github.com/docker/docker/api/types/container/errors.go new file mode 100644 index 00000000..32c97803 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/errors.go @@ -0,0 +1,9 @@ +package container + +type errInvalidParameter struct{ error } + +func (e *errInvalidParameter) InvalidParameter() {} + +func (e *errInvalidParameter) Unwrap() error { + return e.error +} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig.go b/vendor/github.com/docker/docker/api/types/container/hostconfig.go index d4e6f553..efb96266 100644 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig.go +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig.go @@ -1,10 +1,12 @@ package container // import "github.com/docker/docker/api/types/container" import ( + "fmt" "strings" "github.com/docker/docker/api/types/blkiodev" "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/strslice" "github.com/docker/go-connections/nat" units "github.com/docker/go-units" @@ -132,12 +134,12 @@ type NetworkMode string // IsNone indicates whether container isn't using a network stack. func (n NetworkMode) IsNone() bool { - return n == "none" + return n == network.NetworkNone } // IsDefault indicates whether container uses the default network stack. func (n NetworkMode) IsDefault() bool { - return n == "default" + return n == network.NetworkDefault } // IsPrivate indicates whether container uses its private network stack. @@ -271,33 +273,42 @@ type DeviceMapping struct { // RestartPolicy represents the restart policies of the container. type RestartPolicy struct { - Name string + Name RestartPolicyMode MaximumRetryCount int } +type RestartPolicyMode string + +const ( + RestartPolicyDisabled RestartPolicyMode = "no" + RestartPolicyAlways RestartPolicyMode = "always" + RestartPolicyOnFailure RestartPolicyMode = "on-failure" + RestartPolicyUnlessStopped RestartPolicyMode = "unless-stopped" +) + // IsNone indicates whether the container has the "no" restart policy. // This means the container will not automatically restart when exiting. func (rp *RestartPolicy) IsNone() bool { - return rp.Name == "no" || rp.Name == "" + return rp.Name == RestartPolicyDisabled || rp.Name == "" } // IsAlways indicates whether the container has the "always" restart policy. // This means the container will automatically restart regardless of the exit status. func (rp *RestartPolicy) IsAlways() bool { - return rp.Name == "always" + return rp.Name == RestartPolicyAlways } // IsOnFailure indicates whether the container has the "on-failure" restart policy. // This means the container will automatically restart of exiting with a non-zero exit status. func (rp *RestartPolicy) IsOnFailure() bool { - return rp.Name == "on-failure" + return rp.Name == RestartPolicyOnFailure } // IsUnlessStopped indicates whether the container has the // "unless-stopped" restart policy. This means the container will // automatically restart unless user has put it to stopped state. func (rp *RestartPolicy) IsUnlessStopped() bool { - return rp.Name == "unless-stopped" + return rp.Name == RestartPolicyUnlessStopped } // IsSame compares two RestartPolicy to see if they are the same @@ -305,6 +316,33 @@ func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount } +// ValidateRestartPolicy validates the given RestartPolicy. +func ValidateRestartPolicy(policy RestartPolicy) error { + switch policy.Name { + case RestartPolicyAlways, RestartPolicyUnlessStopped, RestartPolicyDisabled: + if policy.MaximumRetryCount != 0 { + msg := "invalid restart policy: maximum retry count can only be used with 'on-failure'" + if policy.MaximumRetryCount < 0 { + msg += " and cannot be negative" + } + return &errInvalidParameter{fmt.Errorf(msg)} + } + return nil + case RestartPolicyOnFailure: + if policy.MaximumRetryCount < 0 { + return &errInvalidParameter{fmt.Errorf("invalid restart policy: maximum retry count cannot be negative")} + } + return nil + case "": + // Versions before v25.0.0 created an empty restart-policy "name" as + // default. Allow an empty name with "any" MaximumRetryCount for + // backward-compatibility. + return nil + default: + return &errInvalidParameter{fmt.Errorf("invalid restart policy: unknown policy '%s'; use one of '%s', '%s', '%s', or '%s'", policy.Name, RestartPolicyDisabled, RestartPolicyAlways, RestartPolicyOnFailure, RestartPolicyUnlessStopped)} + } +} + // LogMode is a type to define the available modes for logging // These modes affect how logs are handled when log messages start piling up. type LogMode string diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go index 24c4fa8d..42132923 100644 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go @@ -1,8 +1,9 @@ //go:build !windows -// +build !windows package container // import "github.com/docker/docker/api/types/container" +import "github.com/docker/docker/api/types/network" + // IsValid indicates if an isolation technology is valid func (i Isolation) IsValid() bool { return i.IsDefault() @@ -11,15 +12,15 @@ func (i Isolation) IsValid() bool { // NetworkName returns the name of the network stack. func (n NetworkMode) NetworkName() string { if n.IsBridge() { - return "bridge" + return network.NetworkBridge } else if n.IsHost() { - return "host" + return network.NetworkHost } else if n.IsContainer() { return "container" } else if n.IsNone() { - return "none" + return network.NetworkNone } else if n.IsDefault() { - return "default" + return network.NetworkDefault } else if n.IsUserDefined() { return n.UserDefined() } @@ -28,12 +29,12 @@ func (n NetworkMode) NetworkName() string { // IsBridge indicates whether container uses the bridge network stack func (n NetworkMode) IsBridge() bool { - return n == "bridge" + return n == network.NetworkBridge } // IsHost indicates whether container uses the host network stack. func (n NetworkMode) IsHost() bool { - return n == "host" + return n == network.NetworkHost } // IsUserDefined indicates user-created network diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go index 99f803a5..154667f4 100644 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go @@ -1,9 +1,11 @@ package container // import "github.com/docker/docker/api/types/container" +import "github.com/docker/docker/api/types/network" + // IsBridge indicates whether container uses the bridge network stack // in windows it is given the name NAT func (n NetworkMode) IsBridge() bool { - return n == "nat" + return n == network.NetworkNat } // IsHost indicates whether container uses the host network stack. @@ -25,11 +27,11 @@ func (i Isolation) IsValid() bool { // NetworkName returns the name of the network stack. func (n NetworkMode) NetworkName() string { if n.IsDefault() { - return "default" + return network.NetworkDefault } else if n.IsBridge() { - return "nat" + return network.NetworkNat } else if n.IsNone() { - return "none" + return network.NetworkNone } else if n.IsContainer() { return "container" } else if n.IsUserDefined() { diff --git a/vendor/github.com/docker/docker/api/types/container/options.go b/vendor/github.com/docker/docker/api/types/container/options.go new file mode 100644 index 00000000..7a230057 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/options.go @@ -0,0 +1,67 @@ +package container + +import "github.com/docker/docker/api/types/filters" + +// ResizeOptions holds parameters to resize a TTY. +// It can be used to resize container TTYs and +// exec process TTYs too. +type ResizeOptions struct { + Height uint + Width uint +} + +// AttachOptions holds parameters to attach to a container. +type AttachOptions struct { + Stream bool + Stdin bool + Stdout bool + Stderr bool + DetachKeys string + Logs bool +} + +// CommitOptions holds parameters to commit changes into a container. +type CommitOptions struct { + Reference string + Comment string + Author string + Changes []string + Pause bool + Config *Config +} + +// RemoveOptions holds parameters to remove containers. +type RemoveOptions struct { + RemoveVolumes bool + RemoveLinks bool + Force bool +} + +// StartOptions holds parameters to start containers. +type StartOptions struct { + CheckpointID string + CheckpointDir string +} + +// ListOptions holds parameters to list containers with. +type ListOptions struct { + Size bool + All bool + Latest bool + Since string + Before string + Limit int + Filters filters.Args +} + +// LogsOptions holds parameters to filter logs with. +type LogsOptions struct { + ShowStdout bool + ShowStderr bool + Since string + Until string + Timestamps bool + Follow bool + Tail string + Details bool +} diff --git a/vendor/github.com/docker/docker/api/types/events/events.go b/vendor/github.com/docker/docker/api/types/events/events.go index 9fe07e26..6dbcd922 100644 --- a/vendor/github.com/docker/docker/api/types/events/events.go +++ b/vendor/github.com/docker/docker/api/types/events/events.go @@ -1,7 +1,7 @@ package events // import "github.com/docker/docker/api/types/events" // Type is used for event-types. -type Type = string +type Type string // List of known event types. const ( @@ -18,6 +18,86 @@ const ( VolumeEventType Type = "volume" // VolumeEventType is the event type that volumes generate. ) +// Action is used for event-actions. +type Action string + +const ( + ActionCreate Action = "create" + ActionStart Action = "start" + ActionRestart Action = "restart" + ActionStop Action = "stop" + ActionCheckpoint Action = "checkpoint" + ActionPause Action = "pause" + ActionUnPause Action = "unpause" + ActionAttach Action = "attach" + ActionDetach Action = "detach" + ActionResize Action = "resize" + ActionUpdate Action = "update" + ActionRename Action = "rename" + ActionKill Action = "kill" + ActionDie Action = "die" + ActionOOM Action = "oom" + ActionDestroy Action = "destroy" + ActionRemove Action = "remove" + ActionCommit Action = "commit" + ActionTop Action = "top" + ActionCopy Action = "copy" + ActionArchivePath Action = "archive-path" + ActionExtractToDir Action = "extract-to-dir" + ActionExport Action = "export" + ActionImport Action = "import" + ActionSave Action = "save" + ActionLoad Action = "load" + ActionTag Action = "tag" + ActionUnTag Action = "untag" + ActionPush Action = "push" + ActionPull Action = "pull" + ActionPrune Action = "prune" + ActionDelete Action = "delete" + ActionEnable Action = "enable" + ActionDisable Action = "disable" + ActionConnect Action = "connect" + ActionDisconnect Action = "disconnect" + ActionReload Action = "reload" + ActionMount Action = "mount" + ActionUnmount Action = "unmount" + + // ActionExecCreate is the prefix used for exec_create events. These + // event-actions are commonly followed by a colon and space (": "), + // and the command that's defined for the exec, for example: + // + // exec_create: /bin/sh -c 'echo hello' + // + // This is far from ideal; it's a compromise to allow filtering and + // to preserve backward-compatibility. + ActionExecCreate Action = "exec_create" + // ActionExecStart is the prefix used for exec_create events. These + // event-actions are commonly followed by a colon and space (": "), + // and the command that's defined for the exec, for example: + // + // exec_start: /bin/sh -c 'echo hello' + // + // This is far from ideal; it's a compromise to allow filtering and + // to preserve backward-compatibility. + ActionExecStart Action = "exec_start" + ActionExecDie Action = "exec_die" + ActionExecDetach Action = "exec_detach" + + // ActionHealthStatus is the prefix to use for health_status events. + // + // Health-status events can either have a pre-defined status, in which + // case the "health_status" action is followed by a colon, or can be + // "free-form", in which case they're followed by the output of the + // health-check output. + // + // This is far form ideal, and a compromise to allow filtering, and + // to preserve backward-compatibility. + ActionHealthStatus Action = "health_status" + ActionHealthStatusRunning Action = "health_status: running" + ActionHealthStatusHealthy Action = "health_status: healthy" + ActionHealthStatusUnhealthy Action = "health_status: unhealthy" +) + // Actor describes something that generates events, // like a container, or a network, or a volume. // It has a defined name and a set of attributes. @@ -37,7 +117,7 @@ type Message struct { From string `json:"from,omitempty"` // Deprecated: use Actor.Attributes["image"] instead. Type Type - Action string + Action Action Actor Actor // Engine events are local scope. Cluster events are swarm scope. Scope string `json:"scope,omitempty"` diff --git a/vendor/github.com/docker/docker/api/types/image_delete_response_item.go b/vendor/github.com/docker/docker/api/types/image/delete_response.go similarity index 68% rename from vendor/github.com/docker/docker/api/types/image_delete_response_item.go rename to vendor/github.com/docker/docker/api/types/image/delete_response.go index b9a65a0d..998620dc 100644 --- a/vendor/github.com/docker/docker/api/types/image_delete_response_item.go +++ b/vendor/github.com/docker/docker/api/types/image/delete_response.go @@ -1,11 +1,11 @@ -package types +package image // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command -// ImageDeleteResponseItem image delete response item -// swagger:model ImageDeleteResponseItem -type ImageDeleteResponseItem struct { +// DeleteResponse delete response +// swagger:model DeleteResponse +type DeleteResponse struct { // The image ID of an image that was deleted Deleted string `json:"Deleted,omitempty"` diff --git a/vendor/github.com/docker/docker/api/types/image/image.go b/vendor/github.com/docker/docker/api/types/image/image.go new file mode 100644 index 00000000..167df28c --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/image/image.go @@ -0,0 +1,9 @@ +package image + +import "time" + +// Metadata contains engine-local data about the image. +type Metadata struct { + // LastTagTime is the date and time at which the image was last tagged. + LastTagTime time.Time `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/image_summary.go b/vendor/github.com/docker/docker/api/types/image/summary.go similarity index 85% rename from vendor/github.com/docker/docker/api/types/image_summary.go rename to vendor/github.com/docker/docker/api/types/image/summary.go index 0f6f1448..f1e3e2ef 100644 --- a/vendor/github.com/docker/docker/api/types/image_summary.go +++ b/vendor/github.com/docker/docker/api/types/image/summary.go @@ -1,11 +1,11 @@ -package types +package image // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command -// ImageSummary image summary -// swagger:model ImageSummary -type ImageSummary struct { +// Summary summary +// swagger:model Summary +type Summary struct { // Number of containers using this image. Includes both stopped and running // containers. @@ -84,11 +84,6 @@ type ImageSummary struct { // Total size of the image including all layers it is composed of. // - // In versions of Docker before v1.10, this field was calculated from - // the image itself and all of its parent images. Images are now stored - // self-contained, and no longer use a parent-chain, making this field - // an equivalent of the Size field. - // - // Deprecated: this field is kept for backward compatibility, and will be removed in API v1.44. + // Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. VirtualSize int64 `json:"VirtualSize,omitempty"` } diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/docker/docker/api/types/mount/mount.go index ac4ce622..57edf2ef 100644 --- a/vendor/github.com/docker/docker/api/types/mount/mount.go +++ b/vendor/github.com/docker/docker/api/types/mount/mount.go @@ -29,7 +29,7 @@ type Mount struct { // Source is not supported for tmpfs (must be an empty value) Source string `json:",omitempty"` Target string `json:",omitempty"` - ReadOnly bool `json:",omitempty"` + ReadOnly bool `json:",omitempty"` // attempts recursive read-only if possible Consistency Consistency `json:",omitempty"` BindOptions *BindOptions `json:",omitempty"` @@ -85,6 +85,11 @@ type BindOptions struct { Propagation Propagation `json:",omitempty"` NonRecursive bool `json:",omitempty"` CreateMountpoint bool `json:",omitempty"` + // ReadOnlyNonRecursive makes the mount non-recursively read-only, but still leaves the mount recursive + // (unless NonRecursive is set to true in conjunction). + ReadOnlyNonRecursive bool `json:",omitempty"` + // ReadOnlyForceRecursive raises an error if the mount cannot be made recursively read-only. + ReadOnlyForceRecursive bool `json:",omitempty"` } // VolumeOptions represents the options for a mount of type volume. diff --git a/vendor/github.com/docker/docker/api/types/network/endpoint.go b/vendor/github.com/docker/docker/api/types/network/endpoint.go new file mode 100644 index 00000000..9edd1c38 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/network/endpoint.go @@ -0,0 +1,147 @@ +package network + +import ( + "errors" + "fmt" + "net" + + "github.com/docker/docker/internal/multierror" +) + +// EndpointSettings stores the network endpoint details +type EndpointSettings struct { + // Configurations + IPAMConfig *EndpointIPAMConfig + Links []string + Aliases []string // Aliases holds the list of extra, user-specified DNS names for this endpoint. + // MacAddress may be used to specify a MAC address when the container is created. + // Once the container is running, it becomes operational data (it may contain a + // generated address). + MacAddress string + // Operational data + NetworkID string + EndpointID string + Gateway string + IPAddress string + IPPrefixLen int + IPv6Gateway string + GlobalIPv6Address string + GlobalIPv6PrefixLen int + DriverOpts map[string]string + // DNSNames holds all the (non fully qualified) DNS names associated to this endpoint. First entry is used to + // generate PTR records. + DNSNames []string +} + +// Copy makes a deep copy of `EndpointSettings` +func (es *EndpointSettings) Copy() *EndpointSettings { + epCopy := *es + if es.IPAMConfig != nil { + epCopy.IPAMConfig = es.IPAMConfig.Copy() + } + + if es.Links != nil { + links := make([]string, 0, len(es.Links)) + epCopy.Links = append(links, es.Links...) + } + + if es.Aliases != nil { + aliases := make([]string, 0, len(es.Aliases)) + epCopy.Aliases = append(aliases, es.Aliases...) + } + + if len(es.DNSNames) > 0 { + epCopy.DNSNames = make([]string, len(es.DNSNames)) + copy(epCopy.DNSNames, es.DNSNames) + } + + return &epCopy +} + +// EndpointIPAMConfig represents IPAM configurations for the endpoint +type EndpointIPAMConfig struct { + IPv4Address string `json:",omitempty"` + IPv6Address string `json:",omitempty"` + LinkLocalIPs []string `json:",omitempty"` +} + +// Copy makes a copy of the endpoint ipam config +func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig { + cfgCopy := *cfg + cfgCopy.LinkLocalIPs = make([]string, 0, len(cfg.LinkLocalIPs)) + cfgCopy.LinkLocalIPs = append(cfgCopy.LinkLocalIPs, cfg.LinkLocalIPs...) + return &cfgCopy +} + +// NetworkSubnet describes a user-defined subnet for a specific network. It's only used to validate if an +// EndpointIPAMConfig is valid for a specific network. +type NetworkSubnet interface { + // Contains checks whether the NetworkSubnet contains [addr]. + Contains(addr net.IP) bool + // IsStatic checks whether the subnet was statically allocated (ie. user-defined). + IsStatic() bool +} + +// IsInRange checks whether static IP addresses are valid in a specific network. +func (cfg *EndpointIPAMConfig) IsInRange(v4Subnets []NetworkSubnet, v6Subnets []NetworkSubnet) error { + var errs []error + + if err := validateEndpointIPAddress(cfg.IPv4Address, v4Subnets); err != nil { + errs = append(errs, err) + } + if err := validateEndpointIPAddress(cfg.IPv6Address, v6Subnets); err != nil { + errs = append(errs, err) + } + + return multierror.Join(errs...) +} + +func validateEndpointIPAddress(epAddr string, ipamSubnets []NetworkSubnet) error { + if epAddr == "" { + return nil + } + + var staticSubnet bool + parsedAddr := net.ParseIP(epAddr) + for _, subnet := range ipamSubnets { + if subnet.IsStatic() { + staticSubnet = true + if subnet.Contains(parsedAddr) { + return nil + } + } + } + + if staticSubnet { + return fmt.Errorf("no configured subnet or ip-range contain the IP address %s", epAddr) + } + + return errors.New("user specified IP address is supported only when connecting to networks with user configured subnets") +} + +// Validate checks whether cfg is valid. +func (cfg *EndpointIPAMConfig) Validate() error { + if cfg == nil { + return nil + } + + var errs []error + + if cfg.IPv4Address != "" { + if addr := net.ParseIP(cfg.IPv4Address); addr == nil || addr.To4() == nil || addr.IsUnspecified() { + errs = append(errs, fmt.Errorf("invalid IPv4 address: %s", cfg.IPv4Address)) + } + } + if cfg.IPv6Address != "" { + if addr := net.ParseIP(cfg.IPv6Address); addr == nil || addr.To4() != nil || addr.IsUnspecified() { + errs = append(errs, fmt.Errorf("invalid IPv6 address: %s", cfg.IPv6Address)) + } + } + for _, addr := range cfg.LinkLocalIPs { + if parsed := net.ParseIP(addr); parsed == nil || parsed.IsUnspecified() { + errs = append(errs, fmt.Errorf("invalid link-local IP address: %s", addr)) + } + } + + return multierror.Join(errs...) +} diff --git a/vendor/github.com/docker/docker/api/types/network/ipam.go b/vendor/github.com/docker/docker/api/types/network/ipam.go new file mode 100644 index 00000000..f319e140 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/network/ipam.go @@ -0,0 +1,134 @@ +package network + +import ( + "errors" + "fmt" + "net/netip" + + "github.com/docker/docker/internal/multierror" +) + +// IPAM represents IP Address Management +type IPAM struct { + Driver string + Options map[string]string // Per network IPAM driver options + Config []IPAMConfig +} + +// IPAMConfig represents IPAM configurations +type IPAMConfig struct { + Subnet string `json:",omitempty"` + IPRange string `json:",omitempty"` + Gateway string `json:",omitempty"` + AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` +} + +type ipFamily string + +const ( + ip4 ipFamily = "IPv4" + ip6 ipFamily = "IPv6" +) + +// ValidateIPAM checks whether the network's IPAM passed as argument is valid. It returns a joinError of the list of +// errors found. +func ValidateIPAM(ipam *IPAM, enableIPv6 bool) error { + if ipam == nil { + return nil + } + + var errs []error + for _, cfg := range ipam.Config { + subnet, err := netip.ParsePrefix(cfg.Subnet) + if err != nil { + errs = append(errs, fmt.Errorf("invalid subnet %s: invalid CIDR block notation", cfg.Subnet)) + continue + } + subnetFamily := ip4 + if subnet.Addr().Is6() { + subnetFamily = ip6 + } + + if !enableIPv6 && subnetFamily == ip6 { + continue + } + + if subnet != subnet.Masked() { + errs = append(errs, fmt.Errorf("invalid subnet %s: it should be %s", subnet, subnet.Masked())) + } + + if ipRangeErrs := validateIPRange(cfg.IPRange, subnet, subnetFamily); len(ipRangeErrs) > 0 { + errs = append(errs, ipRangeErrs...) + } + + if err := validateAddress(cfg.Gateway, subnet, subnetFamily); err != nil { + errs = append(errs, fmt.Errorf("invalid gateway %s: %w", cfg.Gateway, err)) + } + + for auxName, aux := range cfg.AuxAddress { + if err := validateAddress(aux, subnet, subnetFamily); err != nil { + errs = append(errs, fmt.Errorf("invalid auxiliary address %s: %w", auxName, err)) + } + } + } + + if err := multierror.Join(errs...); err != nil { + return fmt.Errorf("invalid network config:\n%w", err) + } + + return nil +} + +func validateIPRange(ipRange string, subnet netip.Prefix, subnetFamily ipFamily) []error { + if ipRange == "" { + return nil + } + prefix, err := netip.ParsePrefix(ipRange) + if err != nil { + return []error{fmt.Errorf("invalid ip-range %s: invalid CIDR block notation", ipRange)} + } + family := ip4 + if prefix.Addr().Is6() { + family = ip6 + } + + if family != subnetFamily { + return []error{fmt.Errorf("invalid ip-range %s: parent subnet is an %s block", ipRange, subnetFamily)} + } + + var errs []error + if prefix.Bits() < subnet.Bits() { + errs = append(errs, fmt.Errorf("invalid ip-range %s: CIDR block is bigger than its parent subnet %s", ipRange, subnet)) + } + if prefix != prefix.Masked() { + errs = append(errs, fmt.Errorf("invalid ip-range %s: it should be %s", prefix, prefix.Masked())) + } + if !subnet.Overlaps(prefix) { + errs = append(errs, fmt.Errorf("invalid ip-range %s: parent subnet %s doesn't contain ip-range", ipRange, subnet)) + } + + return errs +} + +func validateAddress(address string, subnet netip.Prefix, subnetFamily ipFamily) error { + if address == "" { + return nil + } + addr, err := netip.ParseAddr(address) + if err != nil { + return errors.New("invalid address") + } + family := ip4 + if addr.Is6() { + family = ip6 + } + + if family != subnetFamily { + return fmt.Errorf("parent subnet is an %s block", subnetFamily) + } + if !subnet.Contains(addr) { + return fmt.Errorf("parent subnet %s doesn't contain this address", subnet) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/api/types/network/network.go b/vendor/github.com/docker/docker/api/types/network/network.go index 437b184c..f1f300f3 100644 --- a/vendor/github.com/docker/docker/api/types/network/network.go +++ b/vendor/github.com/docker/docker/api/types/network/network.go @@ -1,69 +1,34 @@ package network // import "github.com/docker/docker/api/types/network" + import ( "github.com/docker/docker/api/types/filters" ) +const ( + // NetworkDefault is a platform-independent alias to choose the platform-specific default network stack. + NetworkDefault = "default" + // NetworkHost is the name of the predefined network used when the NetworkMode host is selected (only available on Linux) + NetworkHost = "host" + // NetworkNone is the name of the predefined network used when the NetworkMode none is selected (available on both Linux and Windows) + NetworkNone = "none" + // NetworkBridge is the name of the default network on Linux + NetworkBridge = "bridge" + // NetworkNat is the name of the default network on Windows + NetworkNat = "nat" +) + // Address represents an IP address type Address struct { Addr string PrefixLen int } -// IPAM represents IP Address Management -type IPAM struct { - Driver string - Options map[string]string // Per network IPAM driver options - Config []IPAMConfig -} - -// IPAMConfig represents IPAM configurations -type IPAMConfig struct { - Subnet string `json:",omitempty"` - IPRange string `json:",omitempty"` - Gateway string `json:",omitempty"` - AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` -} - -// EndpointIPAMConfig represents IPAM configurations for the endpoint -type EndpointIPAMConfig struct { - IPv4Address string `json:",omitempty"` - IPv6Address string `json:",omitempty"` - LinkLocalIPs []string `json:",omitempty"` -} - -// Copy makes a copy of the endpoint ipam config -func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig { - cfgCopy := *cfg - cfgCopy.LinkLocalIPs = make([]string, 0, len(cfg.LinkLocalIPs)) - cfgCopy.LinkLocalIPs = append(cfgCopy.LinkLocalIPs, cfg.LinkLocalIPs...) - return &cfgCopy -} - // PeerInfo represents one peer of an overlay network type PeerInfo struct { Name string IP string } -// EndpointSettings stores the network endpoint details -type EndpointSettings struct { - // Configurations - IPAMConfig *EndpointIPAMConfig - Links []string - Aliases []string - // Operational data - NetworkID string - EndpointID string - Gateway string - IPAddress string - IPPrefixLen int - IPv6Gateway string - GlobalIPv6Address string - GlobalIPv6PrefixLen int - MacAddress string - DriverOpts map[string]string -} - // Task carries the information about one backend task type Task struct { Name string @@ -80,25 +45,6 @@ type ServiceInfo struct { Tasks []Task } -// Copy makes a deep copy of `EndpointSettings` -func (es *EndpointSettings) Copy() *EndpointSettings { - epCopy := *es - if es.IPAMConfig != nil { - epCopy.IPAMConfig = es.IPAMConfig.Copy() - } - - if es.Links != nil { - links := make([]string, 0, len(es.Links)) - epCopy.Links = append(links, es.Links...) - } - - if es.Aliases != nil { - aliases := make([]string, 0, len(es.Aliases)) - epCopy.Aliases = append(aliases, es.Aliases...) - } - return &epCopy -} - // NetworkingConfig represents the container's networking configuration for each of its interfaces // Carries the networking configs specified in the `docker run` and `docker network connect` commands type NetworkingConfig struct { diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go index b83f5d7b..05cb3107 100644 --- a/vendor/github.com/docker/docker/api/types/registry/registry.go +++ b/vendor/github.com/docker/docker/api/types/registry/registry.go @@ -92,7 +92,9 @@ type SearchResult struct { IsOfficial bool `json:"is_official"` // Name is the name of the repository Name string `json:"name"` - // IsAutomated indicates whether the result is automated + // IsAutomated indicates whether the result is automated. + // + // Deprecated: the "is_automated" field is deprecated and will always be "false" in the future. IsAutomated bool `json:"is_automated"` // Description is a textual description of the repository Description string `json:"description"` diff --git a/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/docker/docker/api/types/swarm/container.go index af5e1c0b..65f61d2d 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/container.go +++ b/vendor/github.com/docker/docker/api/types/swarm/container.go @@ -32,6 +32,42 @@ type SELinuxContext struct { Level string } +// SeccompMode is the type used for the enumeration of possible seccomp modes +// in SeccompOpts +type SeccompMode string + +const ( + SeccompModeDefault SeccompMode = "default" + SeccompModeUnconfined SeccompMode = "unconfined" + SeccompModeCustom SeccompMode = "custom" +) + +// SeccompOpts defines the options for configuring seccomp on a swarm-managed +// container. +type SeccompOpts struct { + // Mode is the SeccompMode used for the container. + Mode SeccompMode `json:",omitempty"` + // Profile is the custom seccomp profile as a json object to be used with + // the container. Mode should be set to SeccompModeCustom when using a + // custom profile in this manner. + Profile []byte `json:",omitempty"` +} + +// AppArmorMode is type used for the enumeration of possible AppArmor modes in +// AppArmorOpts +type AppArmorMode string + +const ( + AppArmorModeDefault AppArmorMode = "default" + AppArmorModeDisabled AppArmorMode = "disabled" +) + +// AppArmorOpts defines the options for configuring AppArmor on a swarm-managed +// container. Currently, custom AppArmor profiles are not supported. +type AppArmorOpts struct { + Mode AppArmorMode `json:",omitempty"` +} + // CredentialSpec for managed service account (Windows only) type CredentialSpec struct { Config string @@ -41,8 +77,11 @@ type CredentialSpec struct { // Privileges defines the security options for the container. type Privileges struct { - CredentialSpec *CredentialSpec - SELinuxContext *SELinuxContext + CredentialSpec *CredentialSpec + SELinuxContext *SELinuxContext + Seccomp *SeccompOpts `json:",omitempty"` + AppArmor *AppArmorOpts `json:",omitempty"` + NoNewPrivileges bool } // ContainerSpec represents the spec of a container. diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go index 98c2806c..292bd7af 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go @@ -1,3 +1,3 @@ -//go:generate protoc -I . --gogofast_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto +//go:generate protoc --gogofaster_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto package runtime // import "github.com/docker/docker/api/types/swarm/runtime" diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go index e4504586..32aaf0d5 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go @@ -1,23 +1,15 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: plugin.proto -/* - Package runtime is a generated protocol buffer package. - - It is generated from these files: - plugin.proto - - It has these top-level messages: - PluginSpec - PluginPrivilege -*/ package runtime -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import io "io" +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -28,22 +20,50 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // PluginSpec defines the base payload which clients can specify for creating // a service with the plugin runtime. type PluginSpec struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"` - Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges" json:"privileges,omitempty"` + Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges,proto3" json:"privileges,omitempty"` Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"` - Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"` + Env []string `protobuf:"bytes,5,rep,name=env,proto3" json:"env,omitempty"` +} + +func (m *PluginSpec) Reset() { *m = PluginSpec{} } +func (m *PluginSpec) String() string { return proto.CompactTextString(m) } +func (*PluginSpec) ProtoMessage() {} +func (*PluginSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_22a625af4bc1cc87, []int{0} +} +func (m *PluginSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PluginSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PluginSpec.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PluginSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PluginSpec.Merge(m, src) +} +func (m *PluginSpec) XXX_Size() int { + return m.Size() +} +func (m *PluginSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PluginSpec.DiscardUnknown(m) } -func (m *PluginSpec) Reset() { *m = PluginSpec{} } -func (m *PluginSpec) String() string { return proto.CompactTextString(m) } -func (*PluginSpec) ProtoMessage() {} -func (*PluginSpec) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} } +var xxx_messageInfo_PluginSpec proto.InternalMessageInfo func (m *PluginSpec) GetName() string { if m != nil { @@ -85,13 +105,41 @@ func (m *PluginSpec) GetEnv() []string { type PluginPrivilege struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - Value []string `protobuf:"bytes,3,rep,name=value" json:"value,omitempty"` + Value []string `protobuf:"bytes,3,rep,name=value,proto3" json:"value,omitempty"` } -func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} } -func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) } -func (*PluginPrivilege) ProtoMessage() {} -func (*PluginPrivilege) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{1} } +func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} } +func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) } +func (*PluginPrivilege) ProtoMessage() {} +func (*PluginPrivilege) Descriptor() ([]byte, []int) { + return fileDescriptor_22a625af4bc1cc87, []int{1} +} +func (m *PluginPrivilege) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PluginPrivilege) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PluginPrivilege.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PluginPrivilege) XXX_Merge(src proto.Message) { + xxx_messageInfo_PluginPrivilege.Merge(m, src) +} +func (m *PluginPrivilege) XXX_Size() int { + return m.Size() +} +func (m *PluginPrivilege) XXX_DiscardUnknown() { + xxx_messageInfo_PluginPrivilege.DiscardUnknown(m) +} + +var xxx_messageInfo_PluginPrivilege proto.InternalMessageInfo func (m *PluginPrivilege) GetName() string { if m != nil { @@ -118,10 +166,32 @@ func init() { proto.RegisterType((*PluginSpec)(nil), "PluginSpec") proto.RegisterType((*PluginPrivilege)(nil), "PluginPrivilege") } + +func init() { proto.RegisterFile("plugin.proto", fileDescriptor_22a625af4bc1cc87) } + +var fileDescriptor_22a625af4bc1cc87 = []byte{ + // 225 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0xc8, 0x29, 0x4d, + 0xcf, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0x9a, 0xc1, 0xc8, 0xc5, 0x15, 0x00, 0x16, + 0x08, 0x2e, 0x48, 0x4d, 0x16, 0x12, 0xe2, 0x62, 0xc9, 0x4b, 0xcc, 0x4d, 0x95, 0x60, 0x54, 0x60, + 0xd4, 0xe0, 0x0c, 0x02, 0xb3, 0x85, 0xc4, 0xb8, 0xd8, 0x8a, 0x52, 0x73, 0xf3, 0x4b, 0x52, 0x25, + 0x98, 0xc0, 0xa2, 0x50, 0x9e, 0x90, 0x01, 0x17, 0x57, 0x41, 0x51, 0x66, 0x59, 0x66, 0x4e, 0x6a, + 0x7a, 0x6a, 0xb1, 0x04, 0xb3, 0x02, 0xb3, 0x06, 0xb7, 0x91, 0x80, 0x1e, 0xc4, 0xb0, 0x00, 0x98, + 0x44, 0x10, 0x92, 0x1a, 0x21, 0x29, 0x2e, 0x8e, 0x94, 0xcc, 0xe2, 0xc4, 0xa4, 0x9c, 0xd4, 0x14, + 0x09, 0x16, 0x05, 0x46, 0x0d, 0x8e, 0x20, 0x38, 0x5f, 0x48, 0x80, 0x8b, 0x39, 0x35, 0xaf, 0x4c, + 0x82, 0x55, 0x81, 0x59, 0x83, 0x33, 0x08, 0xc4, 0x54, 0x8a, 0xe5, 0xe2, 0x47, 0x33, 0x0c, 0xab, + 0xf3, 0x14, 0xb8, 0xb8, 0x53, 0x52, 0x8b, 0x93, 0x8b, 0x32, 0x0b, 0x4a, 0x32, 0xf3, 0xf3, 0xa0, + 0x6e, 0x44, 0x16, 0x12, 0x12, 0xe1, 0x62, 0x2d, 0x4b, 0xcc, 0x29, 0x4d, 0x05, 0xbb, 0x91, 0x33, + 0x08, 0xc2, 0x71, 0x92, 0x38, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, + 0x18, 0x27, 0x3c, 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x24, 0x36, + 0x70, 0xd0, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x37, 0xea, 0xe2, 0xca, 0x2a, 0x01, 0x00, + 0x00, +} + func (m *PluginSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -129,66 +199,69 @@ func (m *PluginSpec) Marshal() (dAtA []byte, err error) { } func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PluginSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Remote) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote))) - i += copy(dAtA[i:], m.Remote) - } - if len(m.Privileges) > 0 { - for _, msg := range m.Privileges { - dAtA[i] = 0x1a - i++ - i = encodeVarintPlugin(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Env[iNdEx]) + copy(dAtA[i:], m.Env[iNdEx]) + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Env[iNdEx]))) + i-- + dAtA[i] = 0x2a } } if m.Disabled { - dAtA[i] = 0x20 - i++ + i-- if m.Disabled { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ + i-- + dAtA[i] = 0x20 } - if len(m.Env) > 0 { - for _, s := range m.Env { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.Privileges) > 0 { + for iNdEx := len(m.Privileges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Privileges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPlugin(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } } - return i, nil + if len(m.Remote) > 0 { + i -= len(m.Remote) + copy(dAtA[i:], m.Remote) + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -196,50 +269,56 @@ func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) { } func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PluginPrivilege) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) + if len(m.Value) > 0 { + for iNdEx := len(m.Value) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Value[iNdEx]) + copy(dAtA[i:], m.Value[iNdEx]) + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Value[iNdEx]))) + i-- + dAtA[i] = 0x1a + } } if len(m.Description) > 0 { - dAtA[i] = 0x12 - i++ + i -= len(m.Description) + copy(dAtA[i:], m.Description) i = encodeVarintPlugin(dAtA, i, uint64(len(m.Description))) - i += copy(dAtA[i:], m.Description) + i-- + dAtA[i] = 0x12 } - if len(m.Value) > 0 { - for _, s := range m.Value { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int { + offset -= sovPlugin(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *PluginSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -269,6 +348,9 @@ func (m *PluginSpec) Size() (n int) { } func (m *PluginPrivilege) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -289,14 +371,7 @@ func (m *PluginPrivilege) Size() (n int) { } func sovPlugin(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozPlugin(x uint64) (n int) { return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -316,7 +391,7 @@ func (m *PluginSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -344,7 +419,7 @@ func (m *PluginSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -354,6 +429,9 @@ func (m *PluginSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthPlugin } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPlugin + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -373,7 +451,7 @@ func (m *PluginSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -383,6 +461,9 @@ func (m *PluginSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthPlugin } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPlugin + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -402,7 +483,7 @@ func (m *PluginSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -411,6 +492,9 @@ func (m *PluginSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthPlugin } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPlugin + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -433,7 +517,7 @@ func (m *PluginSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -453,7 +537,7 @@ func (m *PluginSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -463,6 +547,9 @@ func (m *PluginSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthPlugin } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPlugin + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -474,7 +561,7 @@ func (m *PluginSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthPlugin } if (iNdEx + skippy) > l { @@ -504,7 +591,7 @@ func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -532,7 +619,7 @@ func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -542,6 +629,9 @@ func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { return ErrInvalidLengthPlugin } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPlugin + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -561,7 +651,7 @@ func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -571,6 +661,9 @@ func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { return ErrInvalidLengthPlugin } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPlugin + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -590,7 +683,7 @@ func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -600,6 +693,9 @@ func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { return ErrInvalidLengthPlugin } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPlugin + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -611,7 +707,7 @@ func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthPlugin } if (iNdEx + skippy) > l { @@ -629,6 +725,7 @@ func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { func skipPlugin(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 + depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -660,10 +757,8 @@ func skipPlugin(dAtA []byte) (n int, err error) { break } } - return iNdEx, nil case 1: iNdEx += 8 - return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -680,75 +775,34 @@ func skipPlugin(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthPlugin } - return iNdEx, nil + iNdEx += length case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipPlugin(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil + depth++ case 4: - return iNdEx, nil + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupPlugin + } + depth-- case 5: iNdEx += 4 - return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } + if iNdEx < 0 { + return 0, ErrInvalidLengthPlugin + } + if depth == 0 { + return iNdEx, nil + } } - panic("unreachable") + return 0, io.ErrUnexpectedEOF } var ( - ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow") + ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupPlugin = fmt.Errorf("proto: unexpected end of group") ) - -func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) } - -var fileDescriptorPlugin = []byte{ - // 256 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x4d, 0x4b, 0xc3, 0x30, - 0x18, 0xc7, 0x89, 0xdd, 0xc6, 0xfa, 0x4c, 0x70, 0x04, 0x91, 0xe2, 0xa1, 0x94, 0x9d, 0x7a, 0x6a, - 0x45, 0x2f, 0x82, 0x37, 0x0f, 0x9e, 0x47, 0xbc, 0x09, 0x1e, 0xd2, 0xf6, 0xa1, 0x06, 0x9b, 0x17, - 0x92, 0xb4, 0xe2, 0x37, 0xf1, 0x23, 0x79, 0xf4, 0x23, 0x48, 0x3f, 0x89, 0x98, 0x75, 0x32, 0x64, - 0xa7, 0xff, 0x4b, 0xc2, 0x9f, 0x1f, 0x0f, 0x9c, 0x9a, 0xae, 0x6f, 0x85, 0x2a, 0x8c, 0xd5, 0x5e, - 0x6f, 0x3e, 0x08, 0xc0, 0x36, 0x14, 0x8f, 0x06, 0x6b, 0x4a, 0x61, 0xa6, 0xb8, 0xc4, 0x84, 0x64, - 0x24, 0x8f, 0x59, 0xf0, 0xf4, 0x02, 0x16, 0x16, 0xa5, 0xf6, 0x98, 0x9c, 0x84, 0x76, 0x4a, 0xf4, - 0x0a, 0xc0, 0x58, 0x31, 0x88, 0x0e, 0x5b, 0x74, 0x49, 0x94, 0x45, 0xf9, 0xea, 0x7a, 0x5d, 0xec, - 0xc6, 0xb6, 0xfb, 0x07, 0x76, 0xf0, 0x87, 0x5e, 0xc2, 0xb2, 0x11, 0x8e, 0x57, 0x1d, 0x36, 0xc9, - 0x2c, 0x23, 0xf9, 0x92, 0xfd, 0x65, 0xba, 0x86, 0x08, 0xd5, 0x90, 0xcc, 0xb3, 0x28, 0x8f, 0xd9, - 0xaf, 0xdd, 0x3c, 0xc3, 0xd9, 0xbf, 0xb1, 0xa3, 0x78, 0x19, 0xac, 0x1a, 0x74, 0xb5, 0x15, 0xc6, - 0x0b, 0xad, 0x26, 0xc6, 0xc3, 0x8a, 0x9e, 0xc3, 0x7c, 0xe0, 0x5d, 0x8f, 0x81, 0x31, 0x66, 0xbb, - 0x70, 0xff, 0xf0, 0x39, 0xa6, 0xe4, 0x6b, 0x4c, 0xc9, 0xf7, 0x98, 0x92, 0xa7, 0xdb, 0x56, 0xf8, - 0x97, 0xbe, 0x2a, 0x6a, 0x2d, 0xcb, 0x46, 0xd7, 0xaf, 0x68, 0xf7, 0xc2, 0x8d, 0x28, 0xfd, 0xbb, - 0x41, 0x57, 0xba, 0x37, 0x6e, 0x65, 0x69, 0x7b, 0xe5, 0x85, 0xc4, 0xbb, 0x49, 0xab, 0x45, 0x38, - 0xe4, 0xcd, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x99, 0xa8, 0xd9, 0x9b, 0x58, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto index 9ef16904..e311b36b 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto @@ -1,7 +1,5 @@ syntax = "proto3"; -option go_package = "github.com/docker/docker/api/types/swarm/runtime;runtime"; - // PluginSpec defines the base payload which clients can specify for creating // a service with the plugin runtime. message PluginSpec { diff --git a/vendor/github.com/docker/docker/api/types/swarm/service.go b/vendor/github.com/docker/docker/api/types/swarm/service.go index 6eb452d2..5b6d5ec1 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/service.go +++ b/vendor/github.com/docker/docker/api/types/swarm/service.go @@ -34,9 +34,9 @@ type ServiceSpec struct { UpdateConfig *UpdateConfig `json:",omitempty"` RollbackConfig *UpdateConfig `json:",omitempty"` - // Networks field in ServiceSpec is deprecated. The - // same field in TaskSpec should be used instead. - // This field will be removed in a future release. + // Networks specifies which networks the service should attach to. + // + // Deprecated: This field is deprecated since v1.44. The Networks field in TaskSpec should be used instead. Networks []NetworkAttachmentConfig `json:",omitempty"` EndpointSpec *EndpointSpec `json:",omitempty"` } diff --git a/vendor/github.com/docker/docker/api/types/swarm/service_create_response.go b/vendor/github.com/docker/docker/api/types/swarm/service_create_response.go new file mode 100644 index 00000000..9a268ff1 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/service_create_response.go @@ -0,0 +1,20 @@ +package swarm + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ServiceCreateResponse contains the information returned to a client on the +// creation of a new service. +// +// swagger:model ServiceCreateResponse +type ServiceCreateResponse struct { + + // The ID of the created service. + ID string `json:"ID,omitempty"` + + // Optional warning message. + // + // FIXME(thaJeztah): this should have "omitempty" in the generated type. + // + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/service_update_response.go b/vendor/github.com/docker/docker/api/types/swarm/service_update_response.go similarity index 95% rename from vendor/github.com/docker/docker/api/types/service_update_response.go rename to vendor/github.com/docker/docker/api/types/swarm/service_update_response.go index 74ea64b1..0417467d 100644 --- a/vendor/github.com/docker/docker/api/types/service_update_response.go +++ b/vendor/github.com/docker/docker/api/types/swarm/service_update_response.go @@ -1,4 +1,4 @@ -package types +package swarm // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command diff --git a/vendor/github.com/docker/docker/api/types/system/info.go b/vendor/github.com/docker/docker/api/types/system/info.go new file mode 100644 index 00000000..89d4a009 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/system/info.go @@ -0,0 +1,116 @@ +package system + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/swarm" +) + +// Info contains response of Engine API: +// GET "/info" +type Info struct { + ID string + Containers int + ContainersRunning int + ContainersPaused int + ContainersStopped int + Images int + Driver string + DriverStatus [][2]string + SystemStatus [][2]string `json:",omitempty"` // SystemStatus is only propagated by the Swarm standalone API + Plugins PluginsInfo + MemoryLimit bool + SwapLimit bool + KernelMemory bool `json:",omitempty"` // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes + KernelMemoryTCP bool `json:",omitempty"` // KernelMemoryTCP is not supported on cgroups v2. + CPUCfsPeriod bool `json:"CpuCfsPeriod"` + CPUCfsQuota bool `json:"CpuCfsQuota"` + CPUShares bool + CPUSet bool + PidsLimit bool + IPv4Forwarding bool + BridgeNfIptables bool + BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` + Debug bool + NFd int + OomKillDisable bool + NGoroutines int + SystemTime string + LoggingDriver string + CgroupDriver string + CgroupVersion string `json:",omitempty"` + NEventsListener int + KernelVersion string + OperatingSystem string + OSVersion string + OSType string + Architecture string + IndexServerAddress string + RegistryConfig *registry.ServiceConfig + NCPU int + MemTotal int64 + GenericResources []swarm.GenericResource + DockerRootDir string + HTTPProxy string `json:"HttpProxy"` + HTTPSProxy string `json:"HttpsProxy"` + NoProxy string + Name string + Labels []string + ExperimentalBuild bool + ServerVersion string + Runtimes map[string]RuntimeWithStatus + DefaultRuntime string + Swarm swarm.Info + // LiveRestoreEnabled determines whether containers should be kept + // running when the daemon is shutdown or upon daemon start if + // running containers are detected + LiveRestoreEnabled bool + Isolation container.Isolation + InitBinary string + ContainerdCommit Commit + RuncCommit Commit + InitCommit Commit + SecurityOptions []string + ProductLicense string `json:",omitempty"` + DefaultAddressPools []NetworkAddressPool `json:",omitempty"` + CDISpecDirs []string + + // Legacy API fields for older API versions. + legacyFields + + // Warnings contains a slice of warnings that occurred while collecting + // system information. These warnings are intended to be informational + // messages for the user, and are not intended to be parsed / used for + // other purposes, as they do not have a fixed format. + Warnings []string +} + +type legacyFields struct { + ExecutionDriver string `json:",omitempty"` // Deprecated: deprecated since API v1.25, but returned for older versions. +} + +// PluginsInfo is a temp struct holding Plugins name +// registered with docker daemon. It is used by [Info] struct +type PluginsInfo struct { + // List of Volume plugins registered + Volume []string + // List of Network plugins registered + Network []string + // List of Authorization plugins registered + Authorization []string + // List of Log plugins registered + Log []string +} + +// Commit holds the Git-commit (SHA1) that a binary was built from, as reported +// in the version-string of external tools, such as containerd, or runC. +type Commit struct { + ID string // ID is the actual commit ID of external tool. + Expected string // Expected is the commit ID of external tool expected by dockerd as set at build time. +} + +// NetworkAddressPool is a temp struct used by [Info] struct. +type NetworkAddressPool struct { + Base string + Size int +} diff --git a/vendor/github.com/docker/docker/api/types/system/runtime.go b/vendor/github.com/docker/docker/api/types/system/runtime.go new file mode 100644 index 00000000..d077295a --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/system/runtime.go @@ -0,0 +1,20 @@ +package system + +// Runtime describes an OCI runtime +type Runtime struct { + // "Legacy" runtime configuration for runc-compatible runtimes. + + Path string `json:"path,omitempty"` + Args []string `json:"runtimeArgs,omitempty"` + + // Shimv2 runtime configuration. Mutually exclusive with the legacy config above. + + Type string `json:"runtimeType,omitempty"` + Options map[string]interface{} `json:"options,omitempty"` +} + +// RuntimeWithStatus extends [Runtime] to hold [RuntimeStatus]. +type RuntimeWithStatus struct { + Runtime + Status map[string]string `json:"status,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/system/security_opts.go b/vendor/github.com/docker/docker/api/types/system/security_opts.go new file mode 100644 index 00000000..edff3eb1 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/system/security_opts.go @@ -0,0 +1,48 @@ +package system + +import ( + "errors" + "fmt" + "strings" +) + +// SecurityOpt contains the name and options of a security option +type SecurityOpt struct { + Name string + Options []KeyValue +} + +// DecodeSecurityOptions decodes a security options string slice to a +// type-safe [SecurityOpt]. +func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) { + so := []SecurityOpt{} + for _, opt := range opts { + // support output from a < 1.13 docker daemon + if !strings.Contains(opt, "=") { + so = append(so, SecurityOpt{Name: opt}) + continue + } + secopt := SecurityOpt{} + for _, s := range strings.Split(opt, ",") { + k, v, ok := strings.Cut(s, "=") + if !ok { + return nil, fmt.Errorf("invalid security option %q", s) + } + if k == "" || v == "" { + return nil, errors.New("invalid empty security option") + } + if k == "name" { + secopt.Name = v + continue + } + secopt.Options = append(secopt.Options, KeyValue{Key: k, Value: v}) + } + so = append(so, secopt) + } + return so, nil +} + +// KeyValue holds a key/value pair. +type KeyValue struct { + Key, Value string +} diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go index b413e020..5c56a0ca 100644 --- a/vendor/github.com/docker/docker/api/types/types.go +++ b/vendor/github.com/docker/docker/api/types/types.go @@ -1,18 +1,15 @@ package types // import "github.com/docker/docker/api/types" import ( - "errors" - "fmt" "io" "os" - "strings" "time" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/image" "github.com/docker/docker/api/types/mount" "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/api/types/volume" "github.com/docker/go-connections/nat" @@ -80,6 +77,8 @@ type ImageInspect struct { // Container is the ID of the container that was used to create the image. // // Depending on how the image was created, this field may be empty. + // + // Deprecated: this field is omitted in API v1.45, but kept for backward compatibility. Container string // ContainerConfig is an optional field containing the configuration of the @@ -87,6 +86,8 @@ type ImageInspect struct { // // Previous versions of Docker builder used this field to store build cache, // and it is not in active use anymore. + // + // Deprecated: this field is omitted in API v1.45, but kept for backward compatibility. ContainerConfig *container.Config // DockerVersion is the version of Docker that was used to build the image. @@ -118,12 +119,7 @@ type ImageInspect struct { // VirtualSize is the total size of the image including all layers it is // composed of. // - // In versions of Docker before v1.10, this field was calculated from - // the image itself and all of its parent images. Docker v1.10 and up - // store images self-contained, and no longer use a parent-chain, making - // this field an equivalent of the Size field. - // - // Deprecated: Unused in API 1.43 and up, but kept for backward compatibility with older API versions. + // Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. VirtualSize int64 `json:"VirtualSize,omitempty"` // GraphDriver holds information about the storage driver used to store the @@ -137,13 +133,7 @@ type ImageInspect struct { // Metadata of the image in the local cache. // // This information is local to the daemon, and not part of the image itself. - Metadata ImageMetadata -} - -// ImageMetadata contains engine-local data about the image -type ImageMetadata struct { - // LastTagTime is the date and time at which the image was last tagged. - LastTagTime time.Time `json:",omitempty"` + Metadata image.Metadata } // Container contains response of Engine API: @@ -237,148 +227,6 @@ type Version struct { BuildTime string `json:",omitempty"` } -// Commit holds the Git-commit (SHA1) that a binary was built from, as reported -// in the version-string of external tools, such as containerd, or runC. -type Commit struct { - ID string // ID is the actual commit ID of external tool. - Expected string // Expected is the commit ID of external tool expected by dockerd as set at build time. -} - -// Info contains response of Engine API: -// GET "/info" -type Info struct { - ID string - Containers int - ContainersRunning int - ContainersPaused int - ContainersStopped int - Images int - Driver string - DriverStatus [][2]string - SystemStatus [][2]string `json:",omitempty"` // SystemStatus is only propagated by the Swarm standalone API - Plugins PluginsInfo - MemoryLimit bool - SwapLimit bool - KernelMemory bool `json:",omitempty"` // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes - KernelMemoryTCP bool `json:",omitempty"` // KernelMemoryTCP is not supported on cgroups v2. - CPUCfsPeriod bool `json:"CpuCfsPeriod"` - CPUCfsQuota bool `json:"CpuCfsQuota"` - CPUShares bool - CPUSet bool - PidsLimit bool - IPv4Forwarding bool - BridgeNfIptables bool - BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` - Debug bool - NFd int - OomKillDisable bool - NGoroutines int - SystemTime string - LoggingDriver string - CgroupDriver string - CgroupVersion string `json:",omitempty"` - NEventsListener int - KernelVersion string - OperatingSystem string - OSVersion string - OSType string - Architecture string - IndexServerAddress string - RegistryConfig *registry.ServiceConfig - NCPU int - MemTotal int64 - GenericResources []swarm.GenericResource - DockerRootDir string - HTTPProxy string `json:"HttpProxy"` - HTTPSProxy string `json:"HttpsProxy"` - NoProxy string - Name string - Labels []string - ExperimentalBuild bool - ServerVersion string - Runtimes map[string]Runtime - DefaultRuntime string - Swarm swarm.Info - // LiveRestoreEnabled determines whether containers should be kept - // running when the daemon is shutdown or upon daemon start if - // running containers are detected - LiveRestoreEnabled bool - Isolation container.Isolation - InitBinary string - ContainerdCommit Commit - RuncCommit Commit - InitCommit Commit - SecurityOptions []string - ProductLicense string `json:",omitempty"` - DefaultAddressPools []NetworkAddressPool `json:",omitempty"` - - // Warnings contains a slice of warnings that occurred while collecting - // system information. These warnings are intended to be informational - // messages for the user, and are not intended to be parsed / used for - // other purposes, as they do not have a fixed format. - Warnings []string -} - -// KeyValue holds a key/value pair -type KeyValue struct { - Key, Value string -} - -// NetworkAddressPool is a temp struct used by Info struct -type NetworkAddressPool struct { - Base string - Size int -} - -// SecurityOpt contains the name and options of a security option -type SecurityOpt struct { - Name string - Options []KeyValue -} - -// DecodeSecurityOptions decodes a security options string slice to a type safe -// SecurityOpt -func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) { - so := []SecurityOpt{} - for _, opt := range opts { - // support output from a < 1.13 docker daemon - if !strings.Contains(opt, "=") { - so = append(so, SecurityOpt{Name: opt}) - continue - } - secopt := SecurityOpt{} - for _, s := range strings.Split(opt, ",") { - k, v, ok := strings.Cut(s, "=") - if !ok { - return nil, fmt.Errorf("invalid security option %q", s) - } - if k == "" || v == "" { - return nil, errors.New("invalid empty security option") - } - if k == "name" { - secopt.Name = v - continue - } - secopt.Options = append(secopt.Options, KeyValue{Key: k, Value: v}) - } - so = append(so, secopt) - } - return so, nil -} - -// PluginsInfo is a temp struct holding Plugins name -// registered with docker daemon. It is used by Info struct -type PluginsInfo struct { - // List of Volume plugins registered - Volume []string - // List of Network plugins registered - Network []string - // List of Authorization plugins registered - Authorization []string - // List of Log plugins registered - Log []string -} - // ExecStartCheck is a temp struct used by execStart // Config fields is part of ExecConfig in runconfig package type ExecStartCheck struct { @@ -491,17 +339,27 @@ type SummaryNetworkSettings struct { Networks map[string]*network.EndpointSettings } -// NetworkSettingsBase holds basic information about networks +// NetworkSettingsBase holds networking state for a container when inspecting it. type NetworkSettingsBase struct { - Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`) - SandboxID string // SandboxID uniquely represents a container's network stack - HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface - LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix - LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address - Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port - SandboxKey string // SandboxKey identifies the sandbox - SecondaryIPAddresses []network.Address - SecondaryIPv6Addresses []network.Address + Bridge string // Bridge contains the name of the default bridge interface iff it was set through the daemon --bridge flag. + SandboxID string // SandboxID uniquely represents a container's network stack + SandboxKey string // SandboxKey identifies the sandbox + Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port + + // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface + // + // Deprecated: This field is never set and will be removed in a future release. + HairpinMode bool + // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix + // + // Deprecated: This field is never set and will be removed in a future release. + LinkLocalIPv6Address string + // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address + // + // Deprecated: This field is never set and will be removed in a future release. + LinkLocalIPv6PrefixLen int + SecondaryIPAddresses []network.Address // Deprecated: This field is never set and will be removed in a future release. + SecondaryIPv6Addresses []network.Address // Deprecated: This field is never set and will be removed in a future release. } // DefaultNetworkSettings holds network information @@ -594,14 +452,9 @@ type EndpointResource struct { // NetworkCreate is the expected body of the "create network" http request message type NetworkCreate struct { - // Check for networks with duplicate names. - // Network is primarily keyed based on a random ID and not on the name. - // Network name is strictly a user-friendly alias to the network - // which is uniquely identified using ID. - // And there is no guaranteed way to check for duplicates. - // Option CheckDuplicate is there to provide a best effort checking of any networks - // which has the same name but it is not guaranteed to catch all name collisions. - CheckDuplicate bool + // Deprecated: CheckDuplicate is deprecated since API v1.44, but it defaults to true when sent by the client + // package to older daemons. + CheckDuplicate bool `json:",omitempty"` Driver string Scope string EnableIPv6 bool @@ -645,33 +498,6 @@ type NetworkInspectOptions struct { Verbose bool } -// Checkpoint represents the details of a checkpoint -type Checkpoint struct { - Name string // Name is the name of the checkpoint -} - -// Runtime describes an OCI runtime -type Runtime struct { - // "Legacy" runtime configuration for runc-compatible runtimes. - - Path string `json:"path,omitempty"` - Args []string `json:"runtimeArgs,omitempty"` - - // Shimv2 runtime configuration. Mutually exclusive with the legacy config above. - - Type string `json:"runtimeType,omitempty"` - Options map[string]interface{} `json:"options,omitempty"` - - // This is exposed here only for internal use - ShimConfig *ShimConfig `json:"-"` -} - -// ShimConfig is used by runtime to configure containerd shims -type ShimConfig struct { - Binary string - Opts interface{} -} - // DiskUsageObject represents an object type used for disk usage query filtering. type DiskUsageObject string @@ -697,7 +523,7 @@ type DiskUsageOptions struct { // GET "/system/df" type DiskUsage struct { LayersSize int64 - Images []*ImageSummary + Images []*image.Summary Containers []*Container Volumes []*volume.Volume BuildCache []*BuildCache @@ -721,7 +547,7 @@ type VolumesPruneReport struct { // ImagesPruneReport contains the response for Engine API: // POST "/images/prune" type ImagesPruneReport struct { - ImagesDeleted []ImageDeleteResponseItem + ImagesDeleted []image.DeleteResponse SpaceReclaimed uint64 } diff --git a/vendor/github.com/docker/docker/api/types/types_deprecated.go b/vendor/github.com/docker/docker/api/types/types_deprecated.go new file mode 100644 index 00000000..e332a7bb --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/types_deprecated.go @@ -0,0 +1,138 @@ +package types + +import ( + "github.com/docker/docker/api/types/checkpoint" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/image" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/system" +) + +// CheckpointCreateOptions holds parameters to create a checkpoint from a container. +// +// Deprecated: use [checkpoint.CreateOptions]. +type CheckpointCreateOptions = checkpoint.CreateOptions + +// CheckpointListOptions holds parameters to list checkpoints for a container +// +// Deprecated: use [checkpoint.ListOptions]. +type CheckpointListOptions = checkpoint.ListOptions + +// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container +// +// Deprecated: use [checkpoint.DeleteOptions]. +type CheckpointDeleteOptions = checkpoint.DeleteOptions + +// Checkpoint represents the details of a checkpoint when listing endpoints. +// +// Deprecated: use [checkpoint.Summary]. +type Checkpoint = checkpoint.Summary + +// Info contains response of Engine API: +// GET "/info" +// +// Deprecated: use [system.Info]. +type Info = system.Info + +// Commit holds the Git-commit (SHA1) that a binary was built from, as reported +// in the version-string of external tools, such as containerd, or runC. +// +// Deprecated: use [system.Commit]. +type Commit = system.Commit + +// PluginsInfo is a temp struct holding Plugins name +// registered with docker daemon. It is used by [system.Info] struct +// +// Deprecated: use [system.PluginsInfo]. +type PluginsInfo = system.PluginsInfo + +// NetworkAddressPool is a temp struct used by [system.Info] struct. +// +// Deprecated: use [system.NetworkAddressPool]. +type NetworkAddressPool = system.NetworkAddressPool + +// Runtime describes an OCI runtime. +// +// Deprecated: use [system.Runtime]. +type Runtime = system.Runtime + +// SecurityOpt contains the name and options of a security option. +// +// Deprecated: use [system.SecurityOpt]. +type SecurityOpt = system.SecurityOpt + +// KeyValue holds a key/value pair. +// +// Deprecated: use [system.KeyValue]. +type KeyValue = system.KeyValue + +// ImageDeleteResponseItem image delete response item. +// +// Deprecated: use [image.DeleteResponse]. +type ImageDeleteResponseItem = image.DeleteResponse + +// ImageSummary image summary. +// +// Deprecated: use [image.Summary]. +type ImageSummary = image.Summary + +// ImageMetadata contains engine-local data about the image. +// +// Deprecated: use [image.Metadata]. +type ImageMetadata = image.Metadata + +// ServiceCreateResponse contains the information returned to a client +// on the creation of a new service. +// +// Deprecated: use [swarm.ServiceCreateResponse]. +type ServiceCreateResponse = swarm.ServiceCreateResponse + +// ServiceUpdateResponse service update response. +// +// Deprecated: use [swarm.ServiceUpdateResponse]. +type ServiceUpdateResponse = swarm.ServiceUpdateResponse + +// ContainerStartOptions holds parameters to start containers. +// +// Deprecated: use [container.StartOptions]. +type ContainerStartOptions = container.StartOptions + +// ResizeOptions holds parameters to resize a TTY. +// It can be used to resize container TTYs and +// exec process TTYs too. +// +// Deprecated: use [container.ResizeOptions]. +type ResizeOptions = container.ResizeOptions + +// ContainerAttachOptions holds parameters to attach to a container. +// +// Deprecated: use [container.AttachOptions]. +type ContainerAttachOptions = container.AttachOptions + +// ContainerCommitOptions holds parameters to commit changes into a container. +// +// Deprecated: use [container.CommitOptions]. +type ContainerCommitOptions = container.CommitOptions + +// ContainerListOptions holds parameters to list containers with. +// +// Deprecated: use [container.ListOptions]. +type ContainerListOptions = container.ListOptions + +// ContainerLogsOptions holds parameters to filter logs with. +// +// Deprecated: use [container.LogsOptions]. +type ContainerLogsOptions = container.LogsOptions + +// ContainerRemoveOptions holds parameters to remove containers. +// +// Deprecated: use [container.RemoveOptions]. +type ContainerRemoveOptions = container.RemoveOptions + +// DecodeSecurityOptions decodes a security options string slice to a type safe +// [system.SecurityOpt]. +// +// Deprecated: use [system.DecodeSecurityOptions]. +func DecodeSecurityOptions(opts []string) ([]system.SecurityOpt, error) { + return system.DecodeSecurityOptions(opts) +} diff --git a/vendor/github.com/docker/docker/client/README.md b/vendor/github.com/docker/docker/client/README.md index 992f1811..f8af3ab9 100644 --- a/vendor/github.com/docker/docker/client/README.md +++ b/vendor/github.com/docker/docker/client/README.md @@ -1,8 +1,10 @@ # Go client for the Docker Engine API -The `docker` command uses this package to communicate with the daemon. It can also be used by your own Go applications to do anything the command-line interface does – running containers, pulling images, managing swarms, etc. +The `docker` command uses this package to communicate with the daemon. It can +also be used by your own Go applications to do anything the command-line +interface does – running containers, pulling images, managing swarms, etc. -For example, to list running containers (the equivalent of `docker ps`): +For example, to list all containers (the equivalent of `docker ps --all`): ```go package main @@ -11,25 +13,26 @@ import ( "context" "fmt" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" "github.com/docker/docker/client" ) func main() { - cli, err := client.NewClientWithOpts(client.FromEnv) + apiClient, err := client.NewClientWithOpts(client.FromEnv) if err != nil { panic(err) } + defer apiClient.Close() - containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) + containers, err := apiClient.ContainerList(context.Background(), container.ListOptions{All: true}) if err != nil { panic(err) } - for _, container := range containers { - fmt.Printf("%s %s\n", container.ID[:10], container.Image) + for _, ctr := range containers { + fmt.Printf("%s %s (status: %s)\n", ctr.ID, ctr.Image, ctr.Status) } } ``` -[Full documentation is available on GoDoc.](https://godoc.org/github.com/docker/docker/client) +[Full documentation is available on pkg.go.dev.](https://pkg.go.dev/github.com/docker/docker/client) diff --git a/vendor/github.com/docker/docker/client/build_prune.go b/vendor/github.com/docker/docker/client/build_prune.go index 2b660623..1a830f41 100644 --- a/vendor/github.com/docker/docker/client/build_prune.go +++ b/vendor/github.com/docker/docker/client/build_prune.go @@ -13,7 +13,7 @@ import ( // BuildCachePrune requests the daemon to delete unused cache data func (cli *Client) BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) { - if err := cli.NewVersionError("1.31", "build prune"); err != nil { + if err := cli.NewVersionError(ctx, "1.31", "build prune"); err != nil { return nil, err } diff --git a/vendor/github.com/docker/docker/client/checkpoint_create.go b/vendor/github.com/docker/docker/client/checkpoint_create.go index 921024fe..9746d288 100644 --- a/vendor/github.com/docker/docker/client/checkpoint_create.go +++ b/vendor/github.com/docker/docker/client/checkpoint_create.go @@ -3,11 +3,11 @@ package client // import "github.com/docker/docker/client" import ( "context" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/checkpoint" ) // CheckpointCreate creates a checkpoint from the given container with the given name -func (cli *Client) CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error { +func (cli *Client) CheckpointCreate(ctx context.Context, container string, options checkpoint.CreateOptions) error { resp, err := cli.post(ctx, "/containers/"+container+"/checkpoints", nil, options, nil) ensureReaderClosed(resp) return err diff --git a/vendor/github.com/docker/docker/client/checkpoint_delete.go b/vendor/github.com/docker/docker/client/checkpoint_delete.go index 54f55fa7..b968c2b2 100644 --- a/vendor/github.com/docker/docker/client/checkpoint_delete.go +++ b/vendor/github.com/docker/docker/client/checkpoint_delete.go @@ -4,11 +4,11 @@ import ( "context" "net/url" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/checkpoint" ) // CheckpointDelete deletes the checkpoint with the given name from the given container -func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, options types.CheckpointDeleteOptions) error { +func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, options checkpoint.DeleteOptions) error { query := url.Values{} if options.CheckpointDir != "" { query.Set("dir", options.CheckpointDir) diff --git a/vendor/github.com/docker/docker/client/checkpoint_list.go b/vendor/github.com/docker/docker/client/checkpoint_list.go index 39cfb959..8feb1f3f 100644 --- a/vendor/github.com/docker/docker/client/checkpoint_list.go +++ b/vendor/github.com/docker/docker/client/checkpoint_list.go @@ -5,12 +5,12 @@ import ( "encoding/json" "net/url" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/checkpoint" ) // CheckpointList returns the checkpoints of the given container in the docker host -func (cli *Client) CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) { - var checkpoints []types.Checkpoint +func (cli *Client) CheckpointList(ctx context.Context, container string, options checkpoint.ListOptions) ([]checkpoint.Summary, error) { + var checkpoints []checkpoint.Summary query := url.Values{} if options.CheckpointDir != "" { diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go index 54fa36cc..0b496b0f 100644 --- a/vendor/github.com/docker/docker/client/client.go +++ b/vendor/github.com/docker/docker/client/client.go @@ -19,7 +19,7 @@ For example, to list running containers (the equivalent of "docker ps"): "context" "fmt" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" "github.com/docker/docker/client" ) @@ -29,13 +29,13 @@ For example, to list running containers (the equivalent of "docker ps"): panic(err) } - containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) + containers, err := cli.ContainerList(context.Background(), container.ListOptions{}) if err != nil { panic(err) } - for _, container := range containers { - fmt.Printf("%s %s\n", container.ID[:10], container.Image) + for _, ctr := range containers { + fmt.Printf("%s %s\n", ctr.ID, ctr.Image) } } */ @@ -43,17 +43,21 @@ package client // import "github.com/docker/docker/client" import ( "context" + "crypto/tls" "net" "net/http" "net/url" "path" "strings" + "time" "github.com/docker/docker/api" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/versions" "github.com/docker/go-connections/sockets" "github.com/pkg/errors" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + "go.opentelemetry.io/otel/trace" ) // DummyHost is a hostname used for local communication. @@ -86,8 +90,12 @@ import ( // [Go stdlib]: https://github.com/golang/go/blob/6244b1946bc2101b01955468f1be502dbadd6807/src/net/http/transport.go#L558-L569 const DummyHost = "api.moby.localhost" -// ErrRedirect is the error returned by checkRedirect when the request is non-GET. -var ErrRedirect = errors.New("unexpected redirect in response") +// fallbackAPIVersion is the version to fallback to if API-version negotiation +// fails. This version is the highest version of the API before API-version +// negotiation was introduced. If negotiation fails (or no API version was +// included in the API response), we assume the API server uses the most +// recent version before negotiation was introduced. +const fallbackAPIVersion = "1.24" // Client is the API client that performs all operations // against a docker server. @@ -106,7 +114,12 @@ type Client struct { client *http.Client // version of the server to talk to. version string - // custom http headers configured by users. + // userAgent is the User-Agent header to use for HTTP requests. It takes + // precedence over User-Agent headers set in customHTTPHeaders, and other + // header variables. When set to an empty string, the User-Agent header + // is removed, and no header is sent. + userAgent *string + // custom HTTP headers configured by users. customHTTPHeaders map[string]string // manualOverride is set to true when the version was set by users. manualOverride bool @@ -119,22 +132,33 @@ type Client struct { // negotiated indicates that API version negotiation took place negotiated bool + + tp trace.TracerProvider + + // When the client transport is an *http.Transport (default) we need to do some extra things (like closing idle connections). + // Store the original transport as the http.Client transport will be wrapped with tracing libs. + baseTransport *http.Transport } -// CheckRedirect specifies the policy for dealing with redirect responses: -// If the request is non-GET return ErrRedirect, otherwise use the last response. +// ErrRedirect is the error returned by checkRedirect when the request is non-GET. +var ErrRedirect = errors.New("unexpected redirect in response") + +// CheckRedirect specifies the policy for dealing with redirect responses. It +// can be set on [http.Client.CheckRedirect] to prevent HTTP redirects for +// non-GET requests. It returns an [ErrRedirect] for non-GET request, otherwise +// returns a [http.ErrUseLastResponse], which is special-cased by http.Client +// to use the last response. // -// Go 1.8 changes behavior for HTTP redirects (specifically 301, 307, and 308) -// in the client. The Docker client (and by extension docker API client) can be -// made to send a request like POST /containers//start where what would normally -// be in the name section of the URL is empty. This triggers an HTTP 301 from -// the daemon. +// Go 1.8 changed behavior for HTTP redirects (specifically 301, 307, and 308) +// in the client. The client (and by extension API client) can be made to send +// a request like "POST /containers//start" where what would normally be in the +// name section of the URL is empty. This triggers an HTTP 301 from the daemon. // -// In go 1.8 this 301 will be converted to a GET request, and ends up getting +// In go 1.8 this 301 is converted to a GET request, and ends up getting // a 404 from the daemon. This behavior change manifests in the client in that // before, the 301 was not followed and the client did not generate an error, -// but now results in a message like Error response from daemon: page not found. -func CheckRedirect(req *http.Request, via []*http.Request) error { +// but now results in a message like "Error response from daemon: page not found". +func CheckRedirect(_ *http.Request, via []*http.Request) error { if via[0].Method == http.MethodGet { return http.ErrUseLastResponse } @@ -145,11 +169,11 @@ func CheckRedirect(req *http.Request, via []*http.Request) error { // default API host and version. It also initializes the custom HTTP headers to // add to each request. // -// It takes an optional list of Opt functional arguments, which are applied in +// It takes an optional list of [Opt] functional arguments, which are applied in // the order they're provided, which allows modifying the defaults when creating // the client. For example, the following initializes a client that configures -// itself with values from environment variables (client.FromEnv), and has -// automatic API version negotiation enabled (client.WithAPIVersionNegotiation()). +// itself with values from environment variables ([FromEnv]), and has automatic +// API version negotiation enabled ([WithAPIVersionNegotiation]). // // cli, err := client.NewClientWithOpts( // client.FromEnv, @@ -179,23 +203,43 @@ func NewClientWithOpts(ops ...Opt) (*Client, error) { } } + if tr, ok := c.client.Transport.(*http.Transport); ok { + // Store the base transport before we wrap it in tracing libs below + // This is used, as an example, to close idle connections when the client is closed + c.baseTransport = tr + } + if c.scheme == "" { - c.scheme = "http" - - tlsConfig := resolveTLSConfig(c.client.Transport) - if tlsConfig != nil { - // TODO(stevvooe): This isn't really the right way to write clients in Go. - // `NewClient` should probably only take an `*http.Client` and work from there. - // Unfortunately, the model of having a host-ish/url-thingy as the connection - // string has us confusing protocol and transport layers. We continue doing - // this to avoid breaking existing clients but this should be addressed. + // TODO(stevvooe): This isn't really the right way to write clients in Go. + // `NewClient` should probably only take an `*http.Client` and work from there. + // Unfortunately, the model of having a host-ish/url-thingy as the connection + // string has us confusing protocol and transport layers. We continue doing + // this to avoid breaking existing clients but this should be addressed. + if c.tlsConfig() != nil { c.scheme = "https" + } else { + c.scheme = "http" } } + c.client.Transport = otelhttp.NewTransport( + c.client.Transport, + otelhttp.WithTracerProvider(c.tp), + otelhttp.WithSpanNameFormatter(func(_ string, req *http.Request) string { + return req.Method + " " + req.URL.Path + }), + ) + return c, nil } +func (cli *Client) tlsConfig() *tls.Config { + if cli.baseTransport == nil { + return nil + } + return cli.baseTransport.TLSClientConfig +} + func defaultHTTPClient(hostURL *url.URL) (*http.Client, error) { transport := &http.Transport{} err := sockets.ConfigureTransport(transport, hostURL.Scheme, hostURL.Host) @@ -210,19 +254,28 @@ func defaultHTTPClient(hostURL *url.URL) (*http.Client, error) { // Close the transport used by the client func (cli *Client) Close() error { - if t, ok := cli.client.Transport.(*http.Transport); ok { - t.CloseIdleConnections() + if cli.baseTransport != nil { + cli.baseTransport.CloseIdleConnections() + return nil } return nil } -// getAPIPath returns the versioned request path to call the api. -// It appends the query parameters to the path if they are not empty. -func (cli *Client) getAPIPath(ctx context.Context, p string, query url.Values) string { - var apiPath string +// checkVersion manually triggers API version negotiation (if configured). +// This allows for version-dependent code to use the same version as will +// be negotiated when making the actual requests, and for which cases +// we cannot do the negotiation lazily. +func (cli *Client) checkVersion(ctx context.Context) { if cli.negotiateVersion && !cli.negotiated { cli.NegotiateAPIVersion(ctx) } +} + +// getAPIPath returns the versioned request path to call the API. +// It appends the query parameters to the path if they are not empty. +func (cli *Client) getAPIPath(ctx context.Context, p string, query url.Values) string { + var apiPath string + cli.checkVersion(ctx) if cli.version != "" { v := strings.TrimPrefix(cli.version, "v") apiPath = path.Join(cli.basePath, "/v"+v, p) @@ -244,8 +297,8 @@ func (cli *Client) ClientVersion() string { // by the client, it uses the client's maximum version. // // If a manual override is in place, either through the "DOCKER_API_VERSION" -// (EnvOverrideAPIVersion) environment variable, or if the client is initialized -// with a fixed version (WithVersion(xx)), no negotiation is performed. +// ([EnvOverrideAPIVersion]) environment variable, or if the client is initialized +// with a fixed version ([WithVersion]), no negotiation is performed. // // If the API server's ping response does not contain an API version, or if the // client did not get a successful ping response, it assumes it is connected with @@ -265,8 +318,8 @@ func (cli *Client) NegotiateAPIVersion(ctx context.Context) { // version. // // If a manual override is in place, either through the "DOCKER_API_VERSION" -// (EnvOverrideAPIVersion) environment variable, or if the client is initialized -// with a fixed version (WithVersion(xx)), no negotiation is performed. +// ([EnvOverrideAPIVersion]) environment variable, or if the client is initialized +// with a fixed version ([WithVersion]), no negotiation is performed. // // If the API server's ping response does not contain an API version, we assume // we are connected with an old daemon without API version negotiation support, @@ -283,7 +336,7 @@ func (cli *Client) NegotiateAPIVersionPing(pingResponse types.Ping) { func (cli *Client) negotiateAPIVersionPing(pingResponse types.Ping) { // default to the latest version before versioning headers existed if pingResponse.APIVersion == "" { - pingResponse.APIVersion = "1.24" + pingResponse.APIVersion = fallbackAPIVersion } // if the client is not initialized with a version, start with the latest supported version @@ -338,17 +391,40 @@ func ParseHostURL(host string) (*url.URL, error) { }, nil } +func (cli *Client) dialerFromTransport() func(context.Context, string, string) (net.Conn, error) { + if cli.baseTransport == nil || cli.baseTransport.DialContext == nil { + return nil + } + + if cli.baseTransport.TLSClientConfig != nil { + // When using a tls config we don't use the configured dialer but instead a fallback dialer... + // Note: It seems like this should use the normal dialer and wrap the returned net.Conn in a tls.Conn + // I honestly don't know why it doesn't do that, but it doesn't and such a change is entirely unrelated to the change in this commit. + return nil + } + return cli.baseTransport.DialContext +} + // Dialer returns a dialer for a raw stream connection, with an HTTP/1.1 header, -// that can be used for proxying the daemon connection. +// that can be used for proxying the daemon connection. It is used by +// ["docker dial-stdio"]. // -// Used by `docker dial-stdio` (docker/cli#889). +// ["docker dial-stdio"]: https://github.com/docker/cli/pull/1014 func (cli *Client) Dialer() func(context.Context) (net.Conn, error) { return func(ctx context.Context) (net.Conn, error) { - if transport, ok := cli.client.Transport.(*http.Transport); ok { - if transport.DialContext != nil && transport.TLSClientConfig == nil { - return transport.DialContext(ctx, cli.proto, cli.addr) + if dialFn := cli.dialerFromTransport(); dialFn != nil { + return dialFn(ctx, cli.proto, cli.addr) + } + switch cli.proto { + case "unix": + return net.Dial(cli.proto, cli.addr) + case "npipe": + return sockets.DialPipe(cli.addr, 32*time.Second) + default: + if tlsConfig := cli.tlsConfig(); tlsConfig != nil { + return tls.Dial(cli.proto, cli.addr, tlsConfig) } + return net.Dial(cli.proto, cli.addr) } - return fallbackDial(cli.proto, cli.addr, resolveTLSConfig(cli.client.Transport)) } } diff --git a/vendor/github.com/docker/docker/client/client_unix.go b/vendor/github.com/docker/docker/client/client_unix.go index 319b738d..9fe78ea4 100644 --- a/vendor/github.com/docker/docker/client/client_unix.go +++ b/vendor/github.com/docker/docker/client/client_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package client // import "github.com/docker/docker/client" diff --git a/vendor/github.com/docker/docker/client/config_create.go b/vendor/github.com/docker/docker/client/config_create.go index f6b1881f..3deb4a8e 100644 --- a/vendor/github.com/docker/docker/client/config_create.go +++ b/vendor/github.com/docker/docker/client/config_create.go @@ -11,7 +11,7 @@ import ( // ConfigCreate creates a new config. func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) { var response types.ConfigCreateResponse - if err := cli.NewVersionError("1.30", "config create"); err != nil { + if err := cli.NewVersionError(ctx, "1.30", "config create"); err != nil { return response, err } resp, err := cli.post(ctx, "/configs/create", nil, config, nil) diff --git a/vendor/github.com/docker/docker/client/config_inspect.go b/vendor/github.com/docker/docker/client/config_inspect.go index 9be7882c..2c6c7cb3 100644 --- a/vendor/github.com/docker/docker/client/config_inspect.go +++ b/vendor/github.com/docker/docker/client/config_inspect.go @@ -14,7 +14,7 @@ func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.C if id == "" { return swarm.Config{}, nil, objectNotFoundError{object: "config", id: id} } - if err := cli.NewVersionError("1.30", "config inspect"); err != nil { + if err := cli.NewVersionError(ctx, "1.30", "config inspect"); err != nil { return swarm.Config{}, nil, err } resp, err := cli.get(ctx, "/configs/"+id, nil, nil) diff --git a/vendor/github.com/docker/docker/client/config_list.go b/vendor/github.com/docker/docker/client/config_list.go index 565acc6e..14dd3813 100644 --- a/vendor/github.com/docker/docker/client/config_list.go +++ b/vendor/github.com/docker/docker/client/config_list.go @@ -12,7 +12,7 @@ import ( // ConfigList returns the list of configs. func (cli *Client) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) { - if err := cli.NewVersionError("1.30", "config list"); err != nil { + if err := cli.NewVersionError(ctx, "1.30", "config list"); err != nil { return nil, err } query := url.Values{} diff --git a/vendor/github.com/docker/docker/client/config_remove.go b/vendor/github.com/docker/docker/client/config_remove.go index 24b94e9c..d05b0113 100644 --- a/vendor/github.com/docker/docker/client/config_remove.go +++ b/vendor/github.com/docker/docker/client/config_remove.go @@ -4,7 +4,7 @@ import "context" // ConfigRemove removes a config. func (cli *Client) ConfigRemove(ctx context.Context, id string) error { - if err := cli.NewVersionError("1.30", "config remove"); err != nil { + if err := cli.NewVersionError(ctx, "1.30", "config remove"); err != nil { return err } resp, err := cli.delete(ctx, "/configs/"+id, nil, nil) diff --git a/vendor/github.com/docker/docker/client/config_update.go b/vendor/github.com/docker/docker/client/config_update.go index 1ac29854..6995861d 100644 --- a/vendor/github.com/docker/docker/client/config_update.go +++ b/vendor/github.com/docker/docker/client/config_update.go @@ -9,7 +9,7 @@ import ( // ConfigUpdate attempts to update a config func (cli *Client) ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error { - if err := cli.NewVersionError("1.30", "config update"); err != nil { + if err := cli.NewVersionError(ctx, "1.30", "config update"); err != nil { return err } query := url.Values{} diff --git a/vendor/github.com/docker/docker/client/container_attach.go b/vendor/github.com/docker/docker/client/container_attach.go index ba92117d..6a32e5f6 100644 --- a/vendor/github.com/docker/docker/client/container_attach.go +++ b/vendor/github.com/docker/docker/client/container_attach.go @@ -2,9 +2,11 @@ package client // import "github.com/docker/docker/client" import ( "context" + "net/http" "net/url" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" ) // ContainerAttach attaches a connection to a container in the server. @@ -31,7 +33,7 @@ import ( // // You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this // stream. -func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) { +func (cli *Client) ContainerAttach(ctx context.Context, container string, options container.AttachOptions) (types.HijackedResponse, error) { query := url.Values{} if options.Stream { query.Set("stream", "1") @@ -52,8 +54,7 @@ func (cli *Client) ContainerAttach(ctx context.Context, container string, option query.Set("logs", "1") } - headers := map[string][]string{ + return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, http.Header{ "Content-Type": {"text/plain"}, - } - return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, headers) + }) } diff --git a/vendor/github.com/docker/docker/client/container_commit.go b/vendor/github.com/docker/docker/client/container_commit.go index cd7f7634..26b3f091 100644 --- a/vendor/github.com/docker/docker/client/container_commit.go +++ b/vendor/github.com/docker/docker/client/container_commit.go @@ -6,12 +6,13 @@ import ( "errors" "net/url" - "github.com/docker/distribution/reference" + "github.com/distribution/reference" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" ) // ContainerCommit applies changes to a container and creates a new tagged image. -func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) { +func (cli *Client) ContainerCommit(ctx context.Context, container string, options container.CommitOptions) (types.IDResponse, error) { var repository, tag string if options.Reference != "" { ref, err := reference.ParseNormalizedNamed(options.Reference) diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go index 193a2bb5..409f5b49 100644 --- a/vendor/github.com/docker/docker/client/container_create.go +++ b/vendor/github.com/docker/docker/client/container_create.go @@ -23,10 +23,23 @@ type configWrapper struct { func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *ocispec.Platform, containerName string) (container.CreateResponse, error) { var response container.CreateResponse - if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil { + // Make sure we negotiated (if the client is configured to do so), + // as code below contains API-version specific handling of options. + // + // Normally, version-negotiation (if enabled) would not happen until + // the API request is made. + cli.checkVersion(ctx) + + if err := cli.NewVersionError(ctx, "1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil { + return response, err + } + if err := cli.NewVersionError(ctx, "1.41", "specify container image platform"); platform != nil && err != nil { return response, err } - if err := cli.NewVersionError("1.41", "specify container image platform"); platform != nil && err != nil { + if err := cli.NewVersionError(ctx, "1.44", "specify health-check start interval"); config != nil && config.Healthcheck != nil && config.Healthcheck.StartInterval != 0 && err != nil { + return response, err + } + if err := cli.NewVersionError(ctx, "1.44", "specify mac-address per network"); hasEndpointSpecificMacAddress(networkingConfig) && err != nil { return response, err } @@ -45,6 +58,11 @@ func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config } } + // Since API 1.44, the container-wide MacAddress is deprecated and will trigger a WARNING if it's specified. + if versions.GreaterThanOrEqualTo(cli.ClientVersion(), "1.44") { + config.MacAddress = "" //nolint:staticcheck // ignore SA1019: field is deprecated, but still used on API < v1.44. + } + query := url.Values{} if p := formatPlatform(platform); p != "" { query.Set("platform", p) @@ -81,3 +99,16 @@ func formatPlatform(platform *ocispec.Platform) string { } return path.Join(platform.OS, platform.Architecture, platform.Variant) } + +// hasEndpointSpecificMacAddress checks whether one of the endpoint in networkingConfig has a MacAddress defined. +func hasEndpointSpecificMacAddress(networkingConfig *network.NetworkingConfig) bool { + if networkingConfig == nil { + return false + } + for _, endpoint := range networkingConfig.EndpointsConfig { + if endpoint.MacAddress != "" { + return true + } + } + return false +} diff --git a/vendor/github.com/docker/docker/client/container_exec.go b/vendor/github.com/docker/docker/client/container_exec.go index 6a2cb006..3fff0c82 100644 --- a/vendor/github.com/docker/docker/client/container_exec.go +++ b/vendor/github.com/docker/docker/client/container_exec.go @@ -3,6 +3,7 @@ package client // import "github.com/docker/docker/client" import ( "context" "encoding/json" + "net/http" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/versions" @@ -12,7 +13,14 @@ import ( func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) { var response types.IDResponse - if err := cli.NewVersionError("1.25", "env"); len(config.Env) != 0 && err != nil { + // Make sure we negotiated (if the client is configured to do so), + // as code below contains API-version specific handling of options. + // + // Normally, version-negotiation (if enabled) would not happen until + // the API request is made. + cli.checkVersion(ctx) + + if err := cli.NewVersionError(ctx, "1.25", "env"); len(config.Env) != 0 && err != nil { return response, err } if versions.LessThan(cli.ClientVersion(), "1.42") { @@ -46,10 +54,9 @@ func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, confi if versions.LessThan(cli.ClientVersion(), "1.42") { config.ConsoleSize = nil } - headers := map[string][]string{ + return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, http.Header{ "Content-Type": {"application/json"}, - } - return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers) + }) } // ContainerExecInspect returns information about a specific exec process on the docker host. diff --git a/vendor/github.com/docker/docker/client/container_list.go b/vendor/github.com/docker/docker/client/container_list.go index bd491b3d..782e1b3c 100644 --- a/vendor/github.com/docker/docker/client/container_list.go +++ b/vendor/github.com/docker/docker/client/container_list.go @@ -7,11 +7,12 @@ import ( "strconv" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" ) // ContainerList returns the list of containers in the docker host. -func (cli *Client) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) { +func (cli *Client) ContainerList(ctx context.Context, options container.ListOptions) ([]types.Container, error) { query := url.Values{} if options.All { @@ -37,7 +38,6 @@ func (cli *Client) ContainerList(ctx context.Context, options types.ContainerLis if options.Filters.Len() > 0 { //nolint:staticcheck // ignore SA1019 for old code filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) - if err != nil { return nil, err } diff --git a/vendor/github.com/docker/docker/client/container_logs.go b/vendor/github.com/docker/docker/client/container_logs.go index 9bdf2b0f..61197d84 100644 --- a/vendor/github.com/docker/docker/client/container_logs.go +++ b/vendor/github.com/docker/docker/client/container_logs.go @@ -6,7 +6,7 @@ import ( "net/url" "time" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" timetypes "github.com/docker/docker/api/types/time" "github.com/pkg/errors" ) @@ -33,7 +33,7 @@ import ( // // You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this // stream. -func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) { +func (cli *Client) ContainerLogs(ctx context.Context, container string, options container.LogsOptions) (io.ReadCloser, error) { query := url.Values{} if options.ShowStdout { query.Set("stdout", "1") diff --git a/vendor/github.com/docker/docker/client/container_prune.go b/vendor/github.com/docker/docker/client/container_prune.go index 04383dea..ca509238 100644 --- a/vendor/github.com/docker/docker/client/container_prune.go +++ b/vendor/github.com/docker/docker/client/container_prune.go @@ -13,7 +13,7 @@ import ( func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) { var report types.ContainersPruneReport - if err := cli.NewVersionError("1.25", "container prune"); err != nil { + if err := cli.NewVersionError(ctx, "1.25", "container prune"); err != nil { return report, err } diff --git a/vendor/github.com/docker/docker/client/container_remove.go b/vendor/github.com/docker/docker/client/container_remove.go index c21de609..39f7b106 100644 --- a/vendor/github.com/docker/docker/client/container_remove.go +++ b/vendor/github.com/docker/docker/client/container_remove.go @@ -4,11 +4,11 @@ import ( "context" "net/url" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" ) // ContainerRemove kills and removes a container from the docker host. -func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) error { +func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options container.RemoveOptions) error { query := url.Values{} if options.RemoveVolumes { query.Set("v", "1") diff --git a/vendor/github.com/docker/docker/client/container_resize.go b/vendor/github.com/docker/docker/client/container_resize.go index a9d4c0c7..5cfd01d4 100644 --- a/vendor/github.com/docker/docker/client/container_resize.go +++ b/vendor/github.com/docker/docker/client/container_resize.go @@ -5,16 +5,16 @@ import ( "net/url" "strconv" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" ) // ContainerResize changes the size of the tty for a container. -func (cli *Client) ContainerResize(ctx context.Context, containerID string, options types.ResizeOptions) error { +func (cli *Client) ContainerResize(ctx context.Context, containerID string, options container.ResizeOptions) error { return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width) } // ContainerExecResize changes the size of the tty for an exec process running inside a container. -func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error { +func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options container.ResizeOptions) error { return cli.resize(ctx, "/exec/"+execID, options.Height, options.Width) } diff --git a/vendor/github.com/docker/docker/client/container_restart.go b/vendor/github.com/docker/docker/client/container_restart.go index 1e0ad999..825d3e4e 100644 --- a/vendor/github.com/docker/docker/client/container_restart.go +++ b/vendor/github.com/docker/docker/client/container_restart.go @@ -17,8 +17,16 @@ func (cli *Client) ContainerRestart(ctx context.Context, containerID string, opt if options.Timeout != nil { query.Set("t", strconv.Itoa(*options.Timeout)) } - if options.Signal != "" && versions.GreaterThanOrEqualTo(cli.version, "1.42") { - query.Set("signal", options.Signal) + if options.Signal != "" { + // Make sure we negotiated (if the client is configured to do so), + // as code below contains API-version specific handling of options. + // + // Normally, version-negotiation (if enabled) would not happen until + // the API request is made. + cli.checkVersion(ctx) + if versions.GreaterThanOrEqualTo(cli.version, "1.42") { + query.Set("signal", options.Signal) + } } resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil) ensureReaderClosed(resp) diff --git a/vendor/github.com/docker/docker/client/container_start.go b/vendor/github.com/docker/docker/client/container_start.go index c2e0b15d..33ba85f2 100644 --- a/vendor/github.com/docker/docker/client/container_start.go +++ b/vendor/github.com/docker/docker/client/container_start.go @@ -4,11 +4,11 @@ import ( "context" "net/url" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" ) // ContainerStart sends a request to the docker daemon to start a container. -func (cli *Client) ContainerStart(ctx context.Context, containerID string, options types.ContainerStartOptions) error { +func (cli *Client) ContainerStart(ctx context.Context, containerID string, options container.StartOptions) error { query := url.Values{} if len(options.CheckpointID) != 0 { query.Set("checkpoint", options.CheckpointID) diff --git a/vendor/github.com/docker/docker/client/container_stats.go b/vendor/github.com/docker/docker/client/container_stats.go index 0a6488dd..3fabb75f 100644 --- a/vendor/github.com/docker/docker/client/container_stats.go +++ b/vendor/github.com/docker/docker/client/container_stats.go @@ -21,8 +21,10 @@ func (cli *Client) ContainerStats(ctx context.Context, containerID string, strea return types.ContainerStats{}, err } - osType := getDockerOS(resp.header.Get("Server")) - return types.ContainerStats{Body: resp.body, OSType: osType}, err + return types.ContainerStats{ + Body: resp.body, + OSType: getDockerOS(resp.header.Get("Server")), + }, nil } // ContainerStatsOneShot gets a single stat entry from a container. @@ -37,6 +39,8 @@ func (cli *Client) ContainerStatsOneShot(ctx context.Context, containerID string return types.ContainerStats{}, err } - osType := getDockerOS(resp.header.Get("Server")) - return types.ContainerStats{Body: resp.body, OSType: osType}, err + return types.ContainerStats{ + Body: resp.body, + OSType: getDockerOS(resp.header.Get("Server")), + }, nil } diff --git a/vendor/github.com/docker/docker/client/container_stop.go b/vendor/github.com/docker/docker/client/container_stop.go index 2a43ce22..ac0cab69 100644 --- a/vendor/github.com/docker/docker/client/container_stop.go +++ b/vendor/github.com/docker/docker/client/container_stop.go @@ -21,8 +21,16 @@ func (cli *Client) ContainerStop(ctx context.Context, containerID string, option if options.Timeout != nil { query.Set("t", strconv.Itoa(*options.Timeout)) } - if options.Signal != "" && versions.GreaterThanOrEqualTo(cli.version, "1.42") { - query.Set("signal", options.Signal) + if options.Signal != "" { + // Make sure we negotiated (if the client is configured to do so), + // as code below contains API-version specific handling of options. + // + // Normally, version-negotiation (if enabled) would not happen until + // the API request is made. + cli.checkVersion(ctx) + if versions.GreaterThanOrEqualTo(cli.version, "1.42") { + query.Set("signal", options.Signal) + } } resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil) ensureReaderClosed(resp) diff --git a/vendor/github.com/docker/docker/client/container_wait.go b/vendor/github.com/docker/docker/client/container_wait.go index 2375eb1e..b8d3bdef 100644 --- a/vendor/github.com/docker/docker/client/container_wait.go +++ b/vendor/github.com/docker/docker/client/container_wait.go @@ -30,6 +30,12 @@ const containerWaitErrorMsgLimit = 2 * 1024 /* Max: 2KiB */ // synchronize ContainerWait with other calls, such as specifying a // "next-exit" condition before issuing a ContainerStart request. func (cli *Client) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.WaitResponse, <-chan error) { + // Make sure we negotiated (if the client is configured to do so), + // as code below contains API-version specific handling of options. + // + // Normally, version-negotiation (if enabled) would not happen until + // the API request is made. + cli.checkVersion(ctx) if versions.LessThan(cli.ClientVersion(), "1.30") { return cli.legacyContainerWait(ctx, containerID) } @@ -66,8 +72,12 @@ func (cli *Client) ContainerWait(ctx context.Context, containerID string, condit // // If there's a JSON parsing error, read the real error message // off the body and send it to the client. - _, _ = io.ReadAll(io.LimitReader(stream, containerWaitErrorMsgLimit)) - errC <- errors.New(responseText.String()) + if errors.As(err, new(*json.SyntaxError)) { + _, _ = io.ReadAll(io.LimitReader(stream, containerWaitErrorMsgLimit)) + errC <- errors.New(responseText.String()) + } else { + errC <- err + } return } diff --git a/vendor/github.com/docker/docker/client/distribution_inspect.go b/vendor/github.com/docker/docker/client/distribution_inspect.go index efab066d..68ef31b7 100644 --- a/vendor/github.com/docker/docker/client/distribution_inspect.go +++ b/vendor/github.com/docker/docker/client/distribution_inspect.go @@ -3,6 +3,7 @@ package client // import "github.com/docker/docker/client" import ( "context" "encoding/json" + "net/http" "net/url" "github.com/docker/docker/api/types/registry" @@ -16,13 +17,13 @@ func (cli *Client) DistributionInspect(ctx context.Context, image, encodedRegist return distributionInspect, objectNotFoundError{object: "distribution", id: image} } - if err := cli.NewVersionError("1.30", "distribution inspect"); err != nil { + if err := cli.NewVersionError(ctx, "1.30", "distribution inspect"); err != nil { return distributionInspect, err } - var headers map[string][]string + var headers http.Header if encodedRegistryAuth != "" { - headers = map[string][]string{ + headers = http.Header{ registry.AuthHeader: {encodedRegistryAuth}, } } diff --git a/vendor/github.com/docker/docker/client/errors.go b/vendor/github.com/docker/docker/client/errors.go index 6878144c..4b96b020 100644 --- a/vendor/github.com/docker/docker/client/errors.go +++ b/vendor/github.com/docker/docker/client/errors.go @@ -1,6 +1,7 @@ package client // import "github.com/docker/docker/client" import ( + "context" "fmt" "github.com/docker/docker/api/types/versions" @@ -31,20 +32,10 @@ func ErrorConnectionFailed(host string) error { return errConnectionFailed{host: host} } -// Deprecated: use the errdefs.NotFound() interface instead. Kept for backward compatibility -type notFound interface { - error - NotFound() bool -} - // IsErrNotFound returns true if the error is a NotFound error, which is returned -// by the API when some object is not found. +// by the API when some object is not found. It is an alias for [errdefs.IsNotFound]. func IsErrNotFound(err error) bool { - if errdefs.IsNotFound(err) { - return true - } - var e notFound - return errors.As(err, &e) + return errdefs.IsNotFound(err) } type objectNotFoundError struct { @@ -58,9 +49,18 @@ func (e objectNotFoundError) Error() string { return fmt.Sprintf("Error: No such %s: %s", e.object, e.id) } -// NewVersionError returns an error if the APIVersion required -// if less than the current supported version -func (cli *Client) NewVersionError(APIrequired, feature string) error { +// NewVersionError returns an error if the APIVersion required is less than the +// current supported version. +// +// It performs API-version negotiation if the Client is configured with this +// option, otherwise it assumes the latest API version is used. +func (cli *Client) NewVersionError(ctx context.Context, APIrequired, feature string) error { + // Make sure we negotiated (if the client is configured to do so), + // as code below contains API-version specific handling of options. + // + // Normally, version-negotiation (if enabled) would not happen until + // the API request is made. + cli.checkVersion(ctx) if cli.version != "" && versions.LessThan(cli.version, APIrequired) { return fmt.Errorf("%q requires API version %s, but the Docker daemon API version is %s", feature, APIrequired, cli.version) } diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go index 7e84865f..839d4c5c 100644 --- a/vendor/github.com/docker/docker/client/hijack.go +++ b/vendor/github.com/docker/docker/client/hijack.go @@ -3,18 +3,16 @@ package client // import "github.com/docker/docker/client" import ( "bufio" "context" - "crypto/tls" "fmt" "net" "net/http" - "net/http/httputil" "net/url" "time" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/versions" - "github.com/docker/go-connections/sockets" "github.com/pkg/errors" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" ) // postHijacked sends a POST request and hijacks the connection. @@ -23,11 +21,11 @@ func (cli *Client) postHijacked(ctx context.Context, path string, query url.Valu if err != nil { return types.HijackedResponse{}, err } - req, err := cli.buildRequest(http.MethodPost, cli.getAPIPath(ctx, path, query), bodyEncoded, headers) + req, err := cli.buildRequest(ctx, http.MethodPost, cli.getAPIPath(ctx, path, query), bodyEncoded, headers) if err != nil { return types.HijackedResponse{}, err } - conn, mediaType, err := cli.setupHijackConn(ctx, req, "tcp") + conn, mediaType, err := cli.setupHijackConn(req, "tcp") if err != nil { return types.HijackedResponse{}, err } @@ -37,29 +35,18 @@ func (cli *Client) postHijacked(ctx context.Context, path string, query url.Valu // DialHijack returns a hijacked connection with negotiated protocol proto. func (cli *Client) DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) { - req, err := http.NewRequest(http.MethodPost, url, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, nil) if err != nil { return nil, err } req = cli.addHeaders(req, meta) - conn, _, err := cli.setupHijackConn(ctx, req, proto) + conn, _, err := cli.setupHijackConn(req, proto) return conn, err } -// fallbackDial is used when WithDialer() was not called. -// See cli.Dialer(). -func fallbackDial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { - if tlsConfig != nil && proto != "unix" && proto != "npipe" { - return tls.Dial(proto, addr, tlsConfig) - } - if proto == "npipe" { - return sockets.DialPipe(addr, 32*time.Second) - } - return net.Dial(proto, addr) -} - -func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto string) (net.Conn, string, error) { +func (cli *Client) setupHijackConn(req *http.Request, proto string) (_ net.Conn, _ string, retErr error) { + ctx := req.Context() req.Header.Set("Connection", "Upgrade") req.Header.Set("Upgrade", proto) @@ -68,6 +55,11 @@ func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto if err != nil { return nil, "", errors.Wrap(err, "cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") } + defer func() { + if retErr != nil { + conn.Close() + } + }() // When we set up a TCP connection for hijack, there could be long periods // of inactivity (a long running command with no output) that in certain @@ -79,35 +71,29 @@ func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto _ = tcpConn.SetKeepAlivePeriod(30 * time.Second) } - clientconn := httputil.NewClientConn(conn, nil) - defer clientconn.Close() + hc := &hijackedConn{conn, bufio.NewReader(conn)} // Server hijacks the connection, error 'connection closed' expected - resp, err := clientconn.Do(req) - - //nolint:staticcheck // ignore SA1019 for connecting to old (pre go1.8) daemons - if err != httputil.ErrPersistEOF { - if err != nil { - return nil, "", err - } - if resp.StatusCode != http.StatusSwitchingProtocols { - _ = resp.Body.Close() - return nil, "", fmt.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode) - } + resp, err := otelhttp.NewTransport(hc).RoundTrip(req) + if err != nil { + return nil, "", err + } + if resp.StatusCode != http.StatusSwitchingProtocols { + _ = resp.Body.Close() + return nil, "", fmt.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode) } - c, br := clientconn.Hijack() - if br.Buffered() > 0 { + if hc.r.Buffered() > 0 { // If there is buffered content, wrap the connection. We return an // object that implements CloseWrite if the underlying connection // implements it. - if _, ok := c.(types.CloseWriter); ok { - c = &hijackedConnCloseWriter{&hijackedConn{c, br}} + if _, ok := hc.Conn.(types.CloseWriter); ok { + conn = &hijackedConnCloseWriter{hc} } else { - c = &hijackedConn{c, br} + conn = hc } } else { - br.Reset(nil) + hc.r.Reset(nil) } var mediaType string @@ -116,7 +102,7 @@ func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto mediaType = resp.Header.Get("Content-Type") } - return c, mediaType, nil + return conn, mediaType, nil } // hijackedConn wraps a net.Conn and is returned by setupHijackConn in the case @@ -128,6 +114,13 @@ type hijackedConn struct { r *bufio.Reader } +func (c *hijackedConn) RoundTrip(req *http.Request) (*http.Response, error) { + if err := req.Write(c.Conn); err != nil { + return nil, err + } + return http.ReadResponse(c.r, req) +} + func (c *hijackedConn) Read(b []byte) (int, error) { return c.r.Read(b) } diff --git a/vendor/github.com/docker/docker/client/image_build.go b/vendor/github.com/docker/docker/client/image_build.go index d16e1d8e..d294ddc8 100644 --- a/vendor/github.com/docker/docker/client/image_build.go +++ b/vendor/github.com/docker/docker/client/image_build.go @@ -18,18 +18,18 @@ import ( // The Body in the response implements an io.ReadCloser and it's up to the caller to // close it. func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { - query, err := cli.imageBuildOptionsToQuery(options) + query, err := cli.imageBuildOptionsToQuery(ctx, options) if err != nil { return types.ImageBuildResponse{}, err } - headers := http.Header(make(map[string][]string)) buf, err := json.Marshal(options.AuthConfigs) if err != nil { return types.ImageBuildResponse{}, err } - headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) + headers := http.Header{} + headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) headers.Set("Content-Type", "application/x-tar") serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) @@ -37,15 +37,13 @@ func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, optio return types.ImageBuildResponse{}, err } - osType := getDockerOS(serverResp.header.Get("Server")) - return types.ImageBuildResponse{ Body: serverResp.body, - OSType: osType, + OSType: getDockerOS(serverResp.header.Get("Server")), }, nil } -func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) { +func (cli *Client) imageBuildOptionsToQuery(ctx context.Context, options types.ImageBuildOptions) (url.Values, error) { query := url.Values{ "t": options.Tags, "securityopt": options.SecurityOpt, @@ -75,7 +73,7 @@ func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (ur } if options.Squash { - if err := cli.NewVersionError("1.25", "squash"); err != nil { + if err := cli.NewVersionError(ctx, "1.25", "squash"); err != nil { return query, err } query.Set("squash", "1") @@ -125,7 +123,7 @@ func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (ur query.Set("session", options.SessionID) } if options.Platform != "" { - if err := cli.NewVersionError("1.32", "platform"); err != nil { + if err := cli.NewVersionError(ctx, "1.32", "platform"); err != nil { return query, err } query.Set("platform", strings.ToLower(options.Platform)) diff --git a/vendor/github.com/docker/docker/client/image_create.go b/vendor/github.com/docker/docker/client/image_create.go index 6a9b708f..29cd0b43 100644 --- a/vendor/github.com/docker/docker/client/image_create.go +++ b/vendor/github.com/docker/docker/client/image_create.go @@ -3,10 +3,11 @@ package client // import "github.com/docker/docker/client" import ( "context" "io" + "net/http" "net/url" "strings" - "github.com/docker/distribution/reference" + "github.com/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/registry" ) @@ -33,6 +34,7 @@ func (cli *Client) ImageCreate(ctx context.Context, parentReference string, opti } func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { - headers := map[string][]string{registry.AuthHeader: {registryAuth}} - return cli.post(ctx, "/images/create", query, nil, headers) + return cli.post(ctx, "/images/create", query, nil, http.Header{ + registry.AuthHeader: {registryAuth}, + }) } diff --git a/vendor/github.com/docker/docker/client/image_import.go b/vendor/github.com/docker/docker/client/image_import.go index c5de42cb..cd376a14 100644 --- a/vendor/github.com/docker/docker/client/image_import.go +++ b/vendor/github.com/docker/docker/client/image_import.go @@ -6,7 +6,7 @@ import ( "net/url" "strings" - "github.com/docker/distribution/reference" + "github.com/distribution/reference" "github.com/docker/docker/api/types" ) diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go index 950d5133..f3f2280e 100644 --- a/vendor/github.com/docker/docker/client/image_list.go +++ b/vendor/github.com/docker/docker/client/image_list.go @@ -7,12 +7,20 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/image" "github.com/docker/docker/api/types/versions" ) // ImageList returns a list of images in the docker host. -func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) { - var images []types.ImageSummary +func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]image.Summary, error) { + // Make sure we negotiated (if the client is configured to do so), + // as code below contains API-version specific handling of options. + // + // Normally, version-negotiation (if enabled) would not happen until + // the API request is made. + cli.checkVersion(ctx) + + var images []image.Summary query := url.Values{} optionFilters := options.Filters diff --git a/vendor/github.com/docker/docker/client/image_load.go b/vendor/github.com/docker/docker/client/image_load.go index 91016e49..c825206e 100644 --- a/vendor/github.com/docker/docker/client/image_load.go +++ b/vendor/github.com/docker/docker/client/image_load.go @@ -3,6 +3,7 @@ package client // import "github.com/docker/docker/client" import ( "context" "io" + "net/http" "net/url" "github.com/docker/docker/api/types" @@ -17,8 +18,9 @@ func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) ( if quiet { v.Set("quiet", "1") } - headers := map[string][]string{"Content-Type": {"application/x-tar"}} - resp, err := cli.postRaw(ctx, "/images/load", v, input, headers) + resp, err := cli.postRaw(ctx, "/images/load", v, input, http.Header{ + "Content-Type": {"application/x-tar"}, + }) if err != nil { return types.ImageLoadResponse{}, err } diff --git a/vendor/github.com/docker/docker/client/image_prune.go b/vendor/github.com/docker/docker/client/image_prune.go index 56af6d7f..6b82d6ab 100644 --- a/vendor/github.com/docker/docker/client/image_prune.go +++ b/vendor/github.com/docker/docker/client/image_prune.go @@ -13,7 +13,7 @@ import ( func (cli *Client) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (types.ImagesPruneReport, error) { var report types.ImagesPruneReport - if err := cli.NewVersionError("1.25", "image prune"); err != nil { + if err := cli.NewVersionError(ctx, "1.25", "image prune"); err != nil { return report, err } diff --git a/vendor/github.com/docker/docker/client/image_pull.go b/vendor/github.com/docker/docker/client/image_pull.go index a2397559..d92049d5 100644 --- a/vendor/github.com/docker/docker/client/image_pull.go +++ b/vendor/github.com/docker/docker/client/image_pull.go @@ -6,7 +6,7 @@ import ( "net/url" "strings" - "github.com/docker/distribution/reference" + "github.com/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/errdefs" ) diff --git a/vendor/github.com/docker/docker/client/image_push.go b/vendor/github.com/docker/docker/client/image_push.go index dd1b8f34..6839a89e 100644 --- a/vendor/github.com/docker/docker/client/image_push.go +++ b/vendor/github.com/docker/docker/client/image_push.go @@ -4,9 +4,10 @@ import ( "context" "errors" "io" + "net/http" "net/url" - "github.com/docker/distribution/reference" + "github.com/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/errdefs" @@ -50,6 +51,7 @@ func (cli *Client) ImagePush(ctx context.Context, image string, options types.Im } func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (serverResponse, error) { - headers := map[string][]string{registry.AuthHeader: {registryAuth}} - return cli.post(ctx, "/images/"+imageID+"/push", query, nil, headers) + return cli.post(ctx, "/images/"+imageID+"/push", query, nil, http.Header{ + registry.AuthHeader: {registryAuth}, + }) } diff --git a/vendor/github.com/docker/docker/client/image_remove.go b/vendor/github.com/docker/docker/client/image_remove.go index 6a9fb3f4..b936d208 100644 --- a/vendor/github.com/docker/docker/client/image_remove.go +++ b/vendor/github.com/docker/docker/client/image_remove.go @@ -6,10 +6,11 @@ import ( "net/url" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/image" ) // ImageRemove removes an image from the docker host. -func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) { +func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]image.DeleteResponse, error) { query := url.Values{} if options.Force { @@ -19,7 +20,7 @@ func (cli *Client) ImageRemove(ctx context.Context, imageID string, options type query.Set("noprune", "1") } - var dels []types.ImageDeleteResponseItem + var dels []image.DeleteResponse resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) defer ensureReaderClosed(resp) if err != nil { diff --git a/vendor/github.com/docker/docker/client/image_search.go b/vendor/github.com/docker/docker/client/image_search.go index 5f0c49ed..8971b139 100644 --- a/vendor/github.com/docker/docker/client/image_search.go +++ b/vendor/github.com/docker/docker/client/image_search.go @@ -3,6 +3,7 @@ package client // import "github.com/docker/docker/client" import ( "context" "encoding/json" + "net/http" "net/url" "strconv" @@ -48,6 +49,7 @@ func (cli *Client) ImageSearch(ctx context.Context, term string, options types.I } func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { - headers := map[string][]string{registry.AuthHeader: {registryAuth}} - return cli.get(ctx, "/images/search", query, headers) + return cli.get(ctx, "/images/search", query, http.Header{ + registry.AuthHeader: {registryAuth}, + }) } diff --git a/vendor/github.com/docker/docker/client/image_tag.go b/vendor/github.com/docker/docker/client/image_tag.go index 5652bfc2..ea6b4a1e 100644 --- a/vendor/github.com/docker/docker/client/image_tag.go +++ b/vendor/github.com/docker/docker/client/image_tag.go @@ -4,7 +4,7 @@ import ( "context" "net/url" - "github.com/docker/distribution/reference" + "github.com/distribution/reference" "github.com/pkg/errors" ) diff --git a/vendor/github.com/docker/docker/client/info.go b/vendor/github.com/docker/docker/client/info.go index c856704e..cc3fcc46 100644 --- a/vendor/github.com/docker/docker/client/info.go +++ b/vendor/github.com/docker/docker/client/info.go @@ -6,12 +6,12 @@ import ( "fmt" "net/url" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/system" ) // Info returns information about the docker server. -func (cli *Client) Info(ctx context.Context) (types.Info, error) { - var info types.Info +func (cli *Client) Info(ctx context.Context) (system.Info, error) { + var info system.Info serverResp, err := cli.get(ctx, "/info", url.Values{}, nil) defer ensureReaderClosed(serverResp) if err != nil { diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/interface.go index 7993c5a4..302f5fb1 100644 --- a/vendor/github.com/docker/docker/client/interface.go +++ b/vendor/github.com/docker/docker/client/interface.go @@ -14,6 +14,7 @@ import ( "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/system" "github.com/docker/docker/api/types/volume" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -45,30 +46,30 @@ type CommonAPIClient interface { // ContainerAPIClient defines API client methods for the containers type ContainerAPIClient interface { - ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) - ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) + ContainerAttach(ctx context.Context, container string, options container.AttachOptions) (types.HijackedResponse, error) + ContainerCommit(ctx context.Context, container string, options container.CommitOptions) (types.IDResponse, error) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *ocispec.Platform, containerName string) (container.CreateResponse, error) ContainerDiff(ctx context.Context, container string) ([]container.FilesystemChange, error) ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) - ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error + ContainerExecResize(ctx context.Context, execID string, options container.ResizeOptions) error ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error ContainerExport(ctx context.Context, container string) (io.ReadCloser, error) ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error) ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (types.ContainerJSON, []byte, error) ContainerKill(ctx context.Context, container, signal string) error - ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) - ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) + ContainerList(ctx context.Context, options container.ListOptions) ([]types.Container, error) + ContainerLogs(ctx context.Context, container string, options container.LogsOptions) (io.ReadCloser, error) ContainerPause(ctx context.Context, container string) error - ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error + ContainerRemove(ctx context.Context, container string, options container.RemoveOptions) error ContainerRename(ctx context.Context, container, newContainerName string) error - ContainerResize(ctx context.Context, container string, options types.ResizeOptions) error + ContainerResize(ctx context.Context, container string, options container.ResizeOptions) error ContainerRestart(ctx context.Context, container string, options container.StopOptions) error ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error) ContainerStats(ctx context.Context, container string, stream bool) (types.ContainerStats, error) ContainerStatsOneShot(ctx context.Context, container string) (types.ContainerStats, error) - ContainerStart(ctx context.Context, container string, options types.ContainerStartOptions) error + ContainerStart(ctx context.Context, container string, options container.StartOptions) error ContainerStop(ctx context.Context, container string, options container.StopOptions) error ContainerTop(ctx context.Context, container string, arguments []string) (container.ContainerTopOKBody, error) ContainerUnpause(ctx context.Context, container string) error @@ -93,11 +94,11 @@ type ImageAPIClient interface { ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) ImageInspectWithRaw(ctx context.Context, image string) (types.ImageInspect, []byte, error) - ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) + ImageList(ctx context.Context, options types.ImageListOptions) ([]image.Summary, error) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) - ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) + ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]image.DeleteResponse, error) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) ImageSave(ctx context.Context, images []string) (io.ReadCloser, error) ImageTag(ctx context.Context, image, ref string) error @@ -140,13 +141,13 @@ type PluginAPIClient interface { // ServiceAPIClient defines API client methods for the services type ServiceAPIClient interface { - ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) + ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (swarm.ServiceCreateResponse, error) ServiceInspectWithRaw(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) ServiceRemove(ctx context.Context, serviceID string) error - ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) - ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) - TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) + ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (swarm.ServiceUpdateResponse, error) + ServiceLogs(ctx context.Context, serviceID string, options container.LogsOptions) (io.ReadCloser, error) + TaskLogs(ctx context.Context, taskID string, options container.LogsOptions) (io.ReadCloser, error) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) } @@ -165,7 +166,7 @@ type SwarmAPIClient interface { // SystemAPIClient defines API client methods for the system type SystemAPIClient interface { Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) - Info(ctx context.Context) (types.Info, error) + Info(ctx context.Context) (system.Info, error) RegistryLogin(ctx context.Context, auth registry.AuthConfig) (registry.AuthenticateOKBody, error) DiskUsage(ctx context.Context, options types.DiskUsageOptions) (types.DiskUsage, error) Ping(ctx context.Context) (types.Ping, error) diff --git a/vendor/github.com/docker/docker/client/interface_experimental.go b/vendor/github.com/docker/docker/client/interface_experimental.go index 402ffb51..c585c104 100644 --- a/vendor/github.com/docker/docker/client/interface_experimental.go +++ b/vendor/github.com/docker/docker/client/interface_experimental.go @@ -3,7 +3,7 @@ package client // import "github.com/docker/docker/client" import ( "context" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/checkpoint" ) type apiClientExperimental interface { @@ -12,7 +12,7 @@ type apiClientExperimental interface { // CheckpointAPIClient defines API client methods for the checkpoints type CheckpointAPIClient interface { - CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error - CheckpointDelete(ctx context.Context, container string, options types.CheckpointDeleteOptions) error - CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) + CheckpointCreate(ctx context.Context, container string, options checkpoint.CreateOptions) error + CheckpointDelete(ctx context.Context, container string, options checkpoint.DeleteOptions) error + CheckpointList(ctx context.Context, container string, options checkpoint.ListOptions) ([]checkpoint.Summary, error) } diff --git a/vendor/github.com/docker/docker/client/network_create.go b/vendor/github.com/docker/docker/client/network_create.go index 278d9383..668e87d6 100644 --- a/vendor/github.com/docker/docker/client/network_create.go +++ b/vendor/github.com/docker/docker/client/network_create.go @@ -5,14 +5,26 @@ import ( "encoding/json" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" ) // NetworkCreate creates a new network in the docker host. func (cli *Client) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) { + // Make sure we negotiated (if the client is configured to do so), + // as code below contains API-version specific handling of options. + // + // Normally, version-negotiation (if enabled) would not happen until + // the API request is made. + cli.checkVersion(ctx) + networkCreateRequest := types.NetworkCreateRequest{ NetworkCreate: options, Name: name, } + if versions.LessThan(cli.version, "1.44") { + networkCreateRequest.CheckDuplicate = true //nolint:staticcheck // ignore SA1019: CheckDuplicate is deprecated since API v1.44. + } + var response types.NetworkCreateResponse serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil) defer ensureReaderClosed(serverResp) diff --git a/vendor/github.com/docker/docker/client/network_prune.go b/vendor/github.com/docker/docker/client/network_prune.go index cebb1882..7b5f831e 100644 --- a/vendor/github.com/docker/docker/client/network_prune.go +++ b/vendor/github.com/docker/docker/client/network_prune.go @@ -13,7 +13,7 @@ import ( func (cli *Client) NetworksPrune(ctx context.Context, pruneFilters filters.Args) (types.NetworksPruneReport, error) { var report types.NetworksPruneReport - if err := cli.NewVersionError("1.25", "network prune"); err != nil { + if err := cli.NewVersionError(ctx, "1.25", "network prune"); err != nil { return report, err } diff --git a/vendor/github.com/docker/docker/client/node_list.go b/vendor/github.com/docker/docker/client/node_list.go index c212906b..1a9e6bfb 100644 --- a/vendor/github.com/docker/docker/client/node_list.go +++ b/vendor/github.com/docker/docker/client/node_list.go @@ -16,7 +16,6 @@ func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) if options.Filters.Len() > 0 { filterJSON, err := filters.ToJSON(options.Filters) - if err != nil { return nil, err } diff --git a/vendor/github.com/docker/docker/client/options.go b/vendor/github.com/docker/docker/client/options.go index 099ad418..ddb0ca39 100644 --- a/vendor/github.com/docker/docker/client/options.go +++ b/vendor/github.com/docker/docker/client/options.go @@ -11,25 +11,25 @@ import ( "github.com/docker/go-connections/sockets" "github.com/docker/go-connections/tlsconfig" "github.com/pkg/errors" + "go.opentelemetry.io/otel/trace" ) -// Opt is a configuration option to initialize a client +// Opt is a configuration option to initialize a [Client]. type Opt func(*Client) error -// FromEnv configures the client with values from environment variables. +// FromEnv configures the client with values from environment variables. It +// is the equivalent of using the [WithTLSClientConfigFromEnv], [WithHostFromEnv], +// and [WithVersionFromEnv] options. // // FromEnv uses the following environment variables: // -// DOCKER_HOST (EnvOverrideHost) to set the URL to the docker server. -// -// DOCKER_API_VERSION (EnvOverrideAPIVersion) to set the version of the API to -// use, leave empty for latest. -// -// DOCKER_CERT_PATH (EnvOverrideCertPath) to specify the directory from which to -// load the TLS certificates (ca.pem, cert.pem, key.pem). -// -// DOCKER_TLS_VERIFY (EnvTLSVerify) to enable or disable TLS verification (off by -// default). +// - DOCKER_HOST ([EnvOverrideHost]) to set the URL to the docker server. +// - DOCKER_API_VERSION ([EnvOverrideAPIVersion]) to set the version of the +// API to use, leave empty for latest. +// - DOCKER_CERT_PATH ([EnvOverrideCertPath]) to specify the directory from +// which to load the TLS certificates ("ca.pem", "cert.pem", "key.pem'). +// - DOCKER_TLS_VERIFY ([EnvTLSVerify]) to enable or disable TLS verification +// (off by default). func FromEnv(c *Client) error { ops := []Opt{ WithTLSClientConfigFromEnv(), @@ -45,7 +45,8 @@ func FromEnv(c *Client) error { } // WithDialContext applies the dialer to the client transport. This can be -// used to set the Timeout and KeepAlive settings of the client. +// used to set the Timeout and KeepAlive settings of the client. It returns +// an error if the client does not have a [http.Transport] configured. func WithDialContext(dialContext func(ctx context.Context, network, addr string) (net.Conn, error)) Opt { return func(c *Client) error { if transport, ok := c.client.Transport.(*http.Transport); ok { @@ -75,7 +76,7 @@ func WithHost(host string) Opt { } // WithHostFromEnv overrides the client host with the host specified in the -// DOCKER_HOST (EnvOverrideHost) environment variable. If DOCKER_HOST is not set, +// DOCKER_HOST ([EnvOverrideHost]) environment variable. If DOCKER_HOST is not set, // or set to an empty value, the host is not modified. func WithHostFromEnv() Opt { return func(c *Client) error { @@ -86,7 +87,7 @@ func WithHostFromEnv() Opt { } } -// WithHTTPClient overrides the client http client with the specified one +// WithHTTPClient overrides the client's HTTP client with the specified one. func WithHTTPClient(client *http.Client) Opt { return func(c *Client) error { if client != nil { @@ -96,7 +97,7 @@ func WithHTTPClient(client *http.Client) Opt { } } -// WithTimeout configures the time limit for requests made by the HTTP client +// WithTimeout configures the time limit for requests made by the HTTP client. func WithTimeout(timeout time.Duration) Opt { return func(c *Client) error { c.client.Timeout = timeout @@ -104,7 +105,19 @@ func WithTimeout(timeout time.Duration) Opt { } } -// WithHTTPHeaders overrides the client default http headers +// WithUserAgent configures the User-Agent header to use for HTTP requests. +// It overrides any User-Agent set in headers. When set to an empty string, +// the User-Agent header is removed, and no header is sent. +func WithUserAgent(ua string) Opt { + return func(c *Client) error { + c.userAgent = &ua + return nil + } +} + +// WithHTTPHeaders appends custom HTTP headers to the client's default headers. +// It does not allow for built-in headers (such as "User-Agent", if set) to +// be overridden. Also see [WithUserAgent]. func WithHTTPHeaders(headers map[string]string) Opt { return func(c *Client) error { c.customHTTPHeaders = headers @@ -112,7 +125,7 @@ func WithHTTPHeaders(headers map[string]string) Opt { } } -// WithScheme overrides the client scheme with the specified one +// WithScheme overrides the client scheme with the specified one. func WithScheme(scheme string) Opt { return func(c *Client) error { c.scheme = scheme @@ -120,51 +133,50 @@ func WithScheme(scheme string) Opt { } } -// WithTLSClientConfig applies a tls config to the client transport. +// WithTLSClientConfig applies a TLS config to the client transport. func WithTLSClientConfig(cacertPath, certPath, keyPath string) Opt { return func(c *Client) error { - opts := tlsconfig.Options{ + transport, ok := c.client.Transport.(*http.Transport) + if !ok { + return errors.Errorf("cannot apply tls config to transport: %T", c.client.Transport) + } + config, err := tlsconfig.Client(tlsconfig.Options{ CAFile: cacertPath, CertFile: certPath, KeyFile: keyPath, ExclusiveRootPools: true, - } - config, err := tlsconfig.Client(opts) + }) if err != nil { return errors.Wrap(err, "failed to create tls config") } - if transport, ok := c.client.Transport.(*http.Transport); ok { - transport.TLSClientConfig = config - return nil - } - return errors.Errorf("cannot apply tls config to transport: %T", c.client.Transport) + transport.TLSClientConfig = config + return nil } } // WithTLSClientConfigFromEnv configures the client's TLS settings with the -// settings in the DOCKER_CERT_PATH and DOCKER_TLS_VERIFY environment variables. -// If DOCKER_CERT_PATH is not set or empty, TLS configuration is not modified. +// settings in the DOCKER_CERT_PATH ([EnvOverrideCertPath]) and DOCKER_TLS_VERIFY +// ([EnvTLSVerify]) environment variables. If DOCKER_CERT_PATH is not set or empty, +// TLS configuration is not modified. // // WithTLSClientConfigFromEnv uses the following environment variables: // -// DOCKER_CERT_PATH (EnvOverrideCertPath) to specify the directory from which to -// load the TLS certificates (ca.pem, cert.pem, key.pem). -// -// DOCKER_TLS_VERIFY (EnvTLSVerify) to enable or disable TLS verification (off by -// default). +// - DOCKER_CERT_PATH ([EnvOverrideCertPath]) to specify the directory from +// which to load the TLS certificates ("ca.pem", "cert.pem", "key.pem"). +// - DOCKER_TLS_VERIFY ([EnvTLSVerify]) to enable or disable TLS verification +// (off by default). func WithTLSClientConfigFromEnv() Opt { return func(c *Client) error { dockerCertPath := os.Getenv(EnvOverrideCertPath) if dockerCertPath == "" { return nil } - options := tlsconfig.Options{ + tlsc, err := tlsconfig.Client(tlsconfig.Options{ CAFile: filepath.Join(dockerCertPath, "ca.pem"), CertFile: filepath.Join(dockerCertPath, "cert.pem"), KeyFile: filepath.Join(dockerCertPath, "key.pem"), InsecureSkipVerify: os.Getenv(EnvTLSVerify) == "", - } - tlsc, err := tlsconfig.Client(options) + }) if err != nil { return err } @@ -178,7 +190,8 @@ func WithTLSClientConfigFromEnv() Opt { } // WithVersion overrides the client version with the specified one. If an empty -// version is specified, the value will be ignored to allow version negotiation. +// version is provided, the value is ignored to allow version negotiation +// (see [WithAPIVersionNegotiation]). func WithVersion(version string) Opt { return func(c *Client) error { if version != "" { @@ -190,8 +203,9 @@ func WithVersion(version string) Opt { } // WithVersionFromEnv overrides the client version with the version specified in -// the DOCKER_API_VERSION environment variable. If DOCKER_API_VERSION is not set, -// the version is not modified. +// the DOCKER_API_VERSION ([EnvOverrideAPIVersion]) environment variable. +// If DOCKER_API_VERSION is not set, or set to an empty value, the version +// is not modified. func WithVersionFromEnv() Opt { return func(c *Client) error { return WithVersion(os.Getenv(EnvOverrideAPIVersion))(c) @@ -201,10 +215,19 @@ func WithVersionFromEnv() Opt { // WithAPIVersionNegotiation enables automatic API version negotiation for the client. // With this option enabled, the client automatically negotiates the API version // to use when making requests. API version negotiation is performed on the first -// request; subsequent requests will not re-negotiate. +// request; subsequent requests do not re-negotiate. func WithAPIVersionNegotiation() Opt { return func(c *Client) error { c.negotiateVersion = true return nil } } + +// WithTraceProvider sets the trace provider for the client. +// If this is not set then the global trace provider will be used. +func WithTraceProvider(provider trace.TracerProvider) Opt { + return func(c *Client) error { + c.tp = provider + return nil + } +} diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go index 347ae71e..dfd1042f 100644 --- a/vendor/github.com/docker/docker/client/ping.go +++ b/vendor/github.com/docker/docker/client/ping.go @@ -21,11 +21,11 @@ func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { // Using cli.buildRequest() + cli.doRequest() instead of cli.sendRequest() // because ping requests are used during API version negotiation, so we want // to hit the non-versioned /_ping endpoint, not /v1.xx/_ping - req, err := cli.buildRequest(http.MethodHead, path.Join(cli.basePath, "/_ping"), nil, nil) + req, err := cli.buildRequest(ctx, http.MethodHead, path.Join(cli.basePath, "/_ping"), nil, nil) if err != nil { return ping, err } - serverResp, err := cli.doRequest(ctx, req) + serverResp, err := cli.doRequest(req) if err == nil { defer ensureReaderClosed(serverResp) switch serverResp.statusCode { @@ -37,11 +37,9 @@ func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { return ping, err } - req, err = cli.buildRequest(http.MethodGet, path.Join(cli.basePath, "/_ping"), nil, nil) - if err != nil { - return ping, err - } - serverResp, err = cli.doRequest(ctx, req) + // HEAD failed; fallback to GET. + req.Method = http.MethodGet + serverResp, err = cli.doRequest(req) defer ensureReaderClosed(serverResp) if err != nil { return ping, err diff --git a/vendor/github.com/docker/docker/client/plugin_install.go b/vendor/github.com/docker/docker/client/plugin_install.go index 3a740ec4..69184619 100644 --- a/vendor/github.com/docker/docker/client/plugin_install.go +++ b/vendor/github.com/docker/docker/client/plugin_install.go @@ -4,9 +4,10 @@ import ( "context" "encoding/json" "io" + "net/http" "net/url" - "github.com/docker/distribution/reference" + "github.com/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/errdefs" @@ -68,13 +69,15 @@ func (cli *Client) PluginInstall(ctx context.Context, name string, options types } func (cli *Client) tryPluginPrivileges(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { - headers := map[string][]string{registry.AuthHeader: {registryAuth}} - return cli.get(ctx, "/plugins/privileges", query, headers) + return cli.get(ctx, "/plugins/privileges", query, http.Header{ + registry.AuthHeader: {registryAuth}, + }) } func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileges types.PluginPrivileges, registryAuth string) (serverResponse, error) { - headers := map[string][]string{registry.AuthHeader: {registryAuth}} - return cli.post(ctx, "/plugins/pull", query, privileges, headers) + return cli.post(ctx, "/plugins/pull", query, privileges, http.Header{ + registry.AuthHeader: {registryAuth}, + }) } func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, options types.PluginInstallOptions) (types.PluginPrivileges, error) { diff --git a/vendor/github.com/docker/docker/client/plugin_push.go b/vendor/github.com/docker/docker/client/plugin_push.go index 18f9754c..8f68a86e 100644 --- a/vendor/github.com/docker/docker/client/plugin_push.go +++ b/vendor/github.com/docker/docker/client/plugin_push.go @@ -3,14 +3,16 @@ package client // import "github.com/docker/docker/client" import ( "context" "io" + "net/http" "github.com/docker/docker/api/types/registry" ) // PluginPush pushes a plugin to a registry func (cli *Client) PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) { - headers := map[string][]string{registry.AuthHeader: {registryAuth}} - resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, headers) + resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, http.Header{ + registry.AuthHeader: {registryAuth}, + }) if err != nil { return nil, err } diff --git a/vendor/github.com/docker/docker/client/plugin_upgrade.go b/vendor/github.com/docker/docker/client/plugin_upgrade.go index 995d1fd2..5cade450 100644 --- a/vendor/github.com/docker/docker/client/plugin_upgrade.go +++ b/vendor/github.com/docker/docker/client/plugin_upgrade.go @@ -3,9 +3,10 @@ package client // import "github.com/docker/docker/client" import ( "context" "io" + "net/http" "net/url" - "github.com/docker/distribution/reference" + "github.com/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/registry" "github.com/pkg/errors" @@ -13,7 +14,7 @@ import ( // PluginUpgrade upgrades a plugin func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { - if err := cli.NewVersionError("1.26", "plugin upgrade"); err != nil { + if err := cli.NewVersionError(ctx, "1.26", "plugin upgrade"); err != nil { return nil, err } query := url.Values{} @@ -35,6 +36,7 @@ func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types } func (cli *Client) tryPluginUpgrade(ctx context.Context, query url.Values, privileges types.PluginPrivileges, name, registryAuth string) (serverResponse, error) { - headers := map[string][]string{registry.AuthHeader: {registryAuth}} - return cli.post(ctx, "/plugins/"+name+"/upgrade", query, privileges, headers) + return cli.post(ctx, "/plugins/"+name+"/upgrade", query, privileges, http.Header{ + registry.AuthHeader: {registryAuth}, + }) } diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go index bcedcf3b..efe07bb9 100644 --- a/vendor/github.com/docker/docker/client/request.go +++ b/vendor/github.com/docker/docker/client/request.go @@ -10,6 +10,7 @@ import ( "net/http" "net/url" "os" + "reflect" "strings" "github.com/docker/docker/api/types" @@ -27,17 +28,17 @@ type serverResponse struct { } // head sends an http request to the docker API using the method HEAD. -func (cli *Client) head(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { +func (cli *Client) head(ctx context.Context, path string, query url.Values, headers http.Header) (serverResponse, error) { return cli.sendRequest(ctx, http.MethodHead, path, query, nil, headers) } // get sends an http request to the docker API using the method GET with a specific Go context. -func (cli *Client) get(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { +func (cli *Client) get(ctx context.Context, path string, query url.Values, headers http.Header) (serverResponse, error) { return cli.sendRequest(ctx, http.MethodGet, path, query, nil, headers) } // post sends an http request to the docker API using the method POST with a specific Go context. -func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { +func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers http.Header) (serverResponse, error) { body, headers, err := encodeBody(obj, headers) if err != nil { return serverResponse{}, err @@ -45,34 +46,44 @@ func (cli *Client) post(ctx context.Context, path string, query url.Values, obj return cli.sendRequest(ctx, http.MethodPost, path, query, body, headers) } -func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { +func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers http.Header) (serverResponse, error) { return cli.sendRequest(ctx, http.MethodPost, path, query, body, headers) } -func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { +func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers http.Header) (serverResponse, error) { body, headers, err := encodeBody(obj, headers) if err != nil { return serverResponse{}, err } - return cli.sendRequest(ctx, http.MethodPut, path, query, body, headers) + return cli.putRaw(ctx, path, query, body, headers) } // putRaw sends an http request to the docker API using the method PUT. -func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { +func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers http.Header) (serverResponse, error) { + // PUT requests are expected to always have a body (apparently) + // so explicitly pass an empty body to sendRequest to signal that + // it should set the Content-Type header if not already present. + if body == nil { + body = http.NoBody + } return cli.sendRequest(ctx, http.MethodPut, path, query, body, headers) } // delete sends an http request to the docker API using the method DELETE. -func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { +func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers http.Header) (serverResponse, error) { return cli.sendRequest(ctx, http.MethodDelete, path, query, nil, headers) } -type headers map[string][]string - -func encodeBody(obj interface{}, headers headers) (io.Reader, headers, error) { +func encodeBody(obj interface{}, headers http.Header) (io.Reader, http.Header, error) { if obj == nil { return nil, headers, nil } + // encoding/json encodes a nil pointer as the JSON document `null`, + // irrespective of whether the type implements json.Marshaler or encoding.TextMarshaler. + // That is almost certainly not what the caller intended as the request body. + if reflect.TypeOf(obj).Kind() == reflect.Ptr && reflect.ValueOf(obj).IsNil() { + return nil, headers, nil + } body, err := encodeData(obj) if err != nil { @@ -85,13 +96,8 @@ func encodeBody(obj interface{}, headers headers) (io.Reader, headers, error) { return body, headers, nil } -func (cli *Client) buildRequest(method, path string, body io.Reader, headers headers) (*http.Request, error) { - expectedPayload := (method == http.MethodPost || method == http.MethodPut) - if expectedPayload && body == nil { - body = bytes.NewReader([]byte{}) - } - - req, err := http.NewRequest(method, path, body) +func (cli *Client) buildRequest(ctx context.Context, method, path string, body io.Reader, headers http.Header) (*http.Request, error) { + req, err := http.NewRequestWithContext(ctx, method, path, body) if err != nil { return nil, err } @@ -104,19 +110,19 @@ func (cli *Client) buildRequest(method, path string, body io.Reader, headers hea req.Host = DummyHost } - if expectedPayload && req.Header.Get("Content-Type") == "" { + if body != nil && req.Header.Get("Content-Type") == "" { req.Header.Set("Content-Type", "text/plain") } return req, nil } -func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers headers) (serverResponse, error) { - req, err := cli.buildRequest(method, cli.getAPIPath(ctx, path, query), body, headers) +func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers http.Header) (serverResponse, error) { + req, err := cli.buildRequest(ctx, method, cli.getAPIPath(ctx, path, query), body, headers) if err != nil { return serverResponse{}, err } - resp, err := cli.doRequest(ctx, req) + resp, err := cli.doRequest(req) switch { case errors.Is(err, context.Canceled): return serverResponse{}, errdefs.Cancelled(err) @@ -128,10 +134,9 @@ func (cli *Client) sendRequest(ctx context.Context, method, path string, query u return resp, errdefs.FromStatusCode(err, resp.statusCode) } -func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResponse, error) { +func (cli *Client) doRequest(req *http.Request) (serverResponse, error) { serverResp := serverResponse{statusCode: -1, reqURL: req.URL} - req = req.WithContext(ctx) resp, err := cli.client.Do(req) if err != nil { if cli.scheme != "https" && strings.Contains(err.Error(), "malformed HTTP response") { @@ -148,19 +153,19 @@ func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResp return serverResp, err } - if nErr, ok := err.(*url.Error); ok { - if nErr, ok := nErr.Err.(*net.OpError); ok { + if uErr, ok := err.(*url.Error); ok { + if nErr, ok := uErr.Err.(*net.OpError); ok { if os.IsPermission(nErr.Err) { return serverResp, errors.Wrapf(err, "permission denied while trying to connect to the Docker daemon socket at %v", cli.host) } } } - if err, ok := err.(net.Error); ok { - if err.Timeout() { + if nErr, ok := err.(net.Error); ok { + if nErr.Timeout() { return serverResp, ErrorConnectionFailed(cli.host) } - if strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") { + if strings.Contains(nErr.Error(), "connection refused") || strings.Contains(nErr.Error(), "dial unix") { return serverResp, ErrorConnectionFailed(cli.host) } } @@ -221,26 +226,20 @@ func (cli *Client) checkResponseErr(serverResp serverResponse) error { return fmt.Errorf("request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), serverResp.reqURL) } - var ct string - if serverResp.header != nil { - ct = serverResp.header.Get("Content-Type") - } - - var errorMessage string - if (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) && ct == "application/json" { + var daemonErr error + if serverResp.header.Get("Content-Type") == "application/json" && (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) { var errorResponse types.ErrorResponse if err := json.Unmarshal(body, &errorResponse); err != nil { return errors.Wrap(err, "Error reading JSON") } - errorMessage = strings.TrimSpace(errorResponse.Message) + daemonErr = errors.New(strings.TrimSpace(errorResponse.Message)) } else { - errorMessage = strings.TrimSpace(string(body)) + daemonErr = errors.New(strings.TrimSpace(string(body))) } - - return errors.Wrap(errors.New(errorMessage), "Error response from daemon") + return errors.Wrap(daemonErr, "Error response from daemon") } -func (cli *Client) addHeaders(req *http.Request, headers headers) *http.Request { +func (cli *Client) addHeaders(req *http.Request, headers http.Header) *http.Request { // Add CLI Config's HTTP Headers BEFORE we set the Docker headers // then the user can't change OUR headers for k, v := range cli.customHTTPHeaders { @@ -253,6 +252,14 @@ func (cli *Client) addHeaders(req *http.Request, headers headers) *http.Request for k, v := range headers { req.Header[http.CanonicalHeaderKey(k)] = v } + + if cli.userAgent != nil { + if *cli.userAgent == "" { + req.Header.Del("User-Agent") + } else { + req.Header.Set("User-Agent", *cli.userAgent) + } + } return req } diff --git a/vendor/github.com/docker/docker/client/secret_create.go b/vendor/github.com/docker/docker/client/secret_create.go index c65d38a1..7b7f1ba7 100644 --- a/vendor/github.com/docker/docker/client/secret_create.go +++ b/vendor/github.com/docker/docker/client/secret_create.go @@ -11,7 +11,7 @@ import ( // SecretCreate creates a new secret. func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) { var response types.SecretCreateResponse - if err := cli.NewVersionError("1.25", "secret create"); err != nil { + if err := cli.NewVersionError(ctx, "1.25", "secret create"); err != nil { return response, err } resp, err := cli.post(ctx, "/secrets/create", nil, secret, nil) diff --git a/vendor/github.com/docker/docker/client/secret_inspect.go b/vendor/github.com/docker/docker/client/secret_inspect.go index 5906874b..a9cb5988 100644 --- a/vendor/github.com/docker/docker/client/secret_inspect.go +++ b/vendor/github.com/docker/docker/client/secret_inspect.go @@ -11,7 +11,7 @@ import ( // SecretInspectWithRaw returns the secret information with raw data func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.Secret, []byte, error) { - if err := cli.NewVersionError("1.25", "secret inspect"); err != nil { + if err := cli.NewVersionError(ctx, "1.25", "secret inspect"); err != nil { return swarm.Secret{}, nil, err } if id == "" { diff --git a/vendor/github.com/docker/docker/client/secret_list.go b/vendor/github.com/docker/docker/client/secret_list.go index a0289c9f..4d21639e 100644 --- a/vendor/github.com/docker/docker/client/secret_list.go +++ b/vendor/github.com/docker/docker/client/secret_list.go @@ -12,7 +12,7 @@ import ( // SecretList returns the list of secrets. func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) { - if err := cli.NewVersionError("1.25", "secret list"); err != nil { + if err := cli.NewVersionError(ctx, "1.25", "secret list"); err != nil { return nil, err } query := url.Values{} diff --git a/vendor/github.com/docker/docker/client/secret_remove.go b/vendor/github.com/docker/docker/client/secret_remove.go index f47f68b6..079ed673 100644 --- a/vendor/github.com/docker/docker/client/secret_remove.go +++ b/vendor/github.com/docker/docker/client/secret_remove.go @@ -4,7 +4,7 @@ import "context" // SecretRemove removes a secret. func (cli *Client) SecretRemove(ctx context.Context, id string) error { - if err := cli.NewVersionError("1.25", "secret remove"); err != nil { + if err := cli.NewVersionError(ctx, "1.25", "secret remove"); err != nil { return err } resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil) diff --git a/vendor/github.com/docker/docker/client/secret_update.go b/vendor/github.com/docker/docker/client/secret_update.go index 2e939e8c..9dfe6719 100644 --- a/vendor/github.com/docker/docker/client/secret_update.go +++ b/vendor/github.com/docker/docker/client/secret_update.go @@ -9,7 +9,7 @@ import ( // SecretUpdate attempts to update a secret. func (cli *Client) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error { - if err := cli.NewVersionError("1.25", "secret update"); err != nil { + if err := cli.NewVersionError(ctx, "1.25", "secret update"); err != nil { return err } query := url.Values{} diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go index b6065b8e..2ebb5ee3 100644 --- a/vendor/github.com/docker/docker/client/service_create.go +++ b/vendor/github.com/docker/docker/client/service_create.go @@ -4,26 +4,28 @@ import ( "context" "encoding/json" "fmt" + "net/http" "strings" - "github.com/docker/distribution/reference" + "github.com/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/versions" "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) // ServiceCreate creates a new service. -func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) { - var response types.ServiceCreateResponse - headers := map[string][]string{ - "version": {cli.version}, - } +func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (swarm.ServiceCreateResponse, error) { + var response swarm.ServiceCreateResponse - if options.EncodedRegistryAuth != "" { - headers[registry.AuthHeader] = []string{options.EncodedRegistryAuth} - } + // Make sure we negotiated (if the client is configured to do so), + // as code below contains API-version specific handling of options. + // + // Normally, version-negotiation (if enabled) would not happen until + // the API request is made. + cli.checkVersion(ctx) // Make sure containerSpec is not nil when no runtime is set or the runtime is set to container if service.TaskTemplate.ContainerSpec == nil && (service.TaskTemplate.Runtime == "" || service.TaskTemplate.Runtime == swarm.RuntimeContainer) { @@ -53,6 +55,16 @@ func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, } } + headers := http.Header{} + if versions.LessThan(cli.version, "1.30") { + // the custom "version" header was used by engine API before 20.10 + // (API 1.30) to switch between client- and server-side lookup of + // image digests. + headers["version"] = []string{cli.version} + } + if options.EncodedRegistryAuth != "" { + headers[registry.AuthHeader] = []string{options.EncodedRegistryAuth} + } resp, err := cli.post(ctx, "/services/create", nil, service, headers) defer ensureReaderClosed(resp) if err != nil { diff --git a/vendor/github.com/docker/docker/client/service_logs.go b/vendor/github.com/docker/docker/client/service_logs.go index 906fd405..e9e30a2a 100644 --- a/vendor/github.com/docker/docker/client/service_logs.go +++ b/vendor/github.com/docker/docker/client/service_logs.go @@ -6,14 +6,14 @@ import ( "net/url" "time" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" timetypes "github.com/docker/docker/api/types/time" "github.com/pkg/errors" ) // ServiceLogs returns the logs generated by a service in an io.ReadCloser. // It's up to the caller to close the stream. -func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { +func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options container.LogsOptions) (io.ReadCloser, error) { query := url.Values{} if options.ShowStdout { query.Set("stdout", "1") diff --git a/vendor/github.com/docker/docker/client/service_update.go b/vendor/github.com/docker/docker/client/service_update.go index ff8cded8..e05eebf5 100644 --- a/vendor/github.com/docker/docker/client/service_update.go +++ b/vendor/github.com/docker/docker/client/service_update.go @@ -3,30 +3,31 @@ package client // import "github.com/docker/docker/client" import ( "context" "encoding/json" + "net/http" "net/url" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/versions" ) // ServiceUpdate updates a Service. The version number is required to avoid conflicting writes. // It should be the value as set *before* the update. You can find this value in the Meta field // of swarm.Service, which can be found using ServiceInspectWithRaw. -func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) { +func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (swarm.ServiceUpdateResponse, error) { + // Make sure we negotiated (if the client is configured to do so), + // as code below contains API-version specific handling of options. + // + // Normally, version-negotiation (if enabled) would not happen until + // the API request is made. + cli.checkVersion(ctx) + var ( query = url.Values{} - response = types.ServiceUpdateResponse{} + response = swarm.ServiceUpdateResponse{} ) - headers := map[string][]string{ - "version": {cli.version}, - } - - if options.EncodedRegistryAuth != "" { - headers[registry.AuthHeader] = []string{options.EncodedRegistryAuth} - } - if options.RegistryAuthFrom != "" { query.Set("registryAuthFrom", options.RegistryAuthFrom) } @@ -60,6 +61,16 @@ func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version } } + headers := http.Header{} + if versions.LessThan(cli.version, "1.30") { + // the custom "version" header was used by engine API before 20.10 + // (API 1.30) to switch between client- and server-side lookup of + // image digests. + headers["version"] = []string{cli.version} + } + if options.EncodedRegistryAuth != "" { + headers[registry.AuthHeader] = []string{options.EncodedRegistryAuth} + } resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers) defer ensureReaderClosed(resp) if err != nil { diff --git a/vendor/github.com/docker/docker/client/task_logs.go b/vendor/github.com/docker/docker/client/task_logs.go index 6222fab5..b8c20e71 100644 --- a/vendor/github.com/docker/docker/client/task_logs.go +++ b/vendor/github.com/docker/docker/client/task_logs.go @@ -6,13 +6,13 @@ import ( "net/url" "time" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" timetypes "github.com/docker/docker/api/types/time" ) // TaskLogs returns the logs generated by a task in an io.ReadCloser. // It's up to the caller to close the stream. -func (cli *Client) TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { +func (cli *Client) TaskLogs(ctx context.Context, taskID string, options container.LogsOptions) (io.ReadCloser, error) { query := url.Values{} if options.ShowStdout { query.Set("stdout", "1") diff --git a/vendor/github.com/docker/docker/client/transport.go b/vendor/github.com/docker/docker/client/transport.go deleted file mode 100644 index 55413443..00000000 --- a/vendor/github.com/docker/docker/client/transport.go +++ /dev/null @@ -1,17 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "crypto/tls" - "net/http" -) - -// resolveTLSConfig attempts to resolve the TLS configuration from the -// RoundTripper. -func resolveTLSConfig(transport http.RoundTripper) *tls.Config { - switch tr := transport.(type) { - case *http.Transport: - return tr.TLSClientConfig - default: - return nil - } -} diff --git a/vendor/github.com/docker/docker/client/volume_prune.go b/vendor/github.com/docker/docker/client/volume_prune.go index 6e324708..9333f6ee 100644 --- a/vendor/github.com/docker/docker/client/volume_prune.go +++ b/vendor/github.com/docker/docker/client/volume_prune.go @@ -13,7 +13,7 @@ import ( func (cli *Client) VolumesPrune(ctx context.Context, pruneFilters filters.Args) (types.VolumesPruneReport, error) { var report types.VolumesPruneReport - if err := cli.NewVersionError("1.25", "volume prune"); err != nil { + if err := cli.NewVersionError(ctx, "1.25", "volume prune"); err != nil { return report, err } diff --git a/vendor/github.com/docker/docker/client/volume_remove.go b/vendor/github.com/docker/docker/client/volume_remove.go index 1f264383..31e08cb9 100644 --- a/vendor/github.com/docker/docker/client/volume_remove.go +++ b/vendor/github.com/docker/docker/client/volume_remove.go @@ -10,8 +10,14 @@ import ( // VolumeRemove removes a volume from the docker host. func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool) error { query := url.Values{} - if versions.GreaterThanOrEqualTo(cli.version, "1.25") { - if force { + if force { + // Make sure we negotiated (if the client is configured to do so), + // as code below contains API-version specific handling of options. + // + // Normally, version-negotiation (if enabled) would not happen until + // the API request is made. + cli.checkVersion(ctx) + if versions.GreaterThanOrEqualTo(cli.version, "1.25") { query.Set("force", "1") } } diff --git a/vendor/github.com/docker/docker/client/volume_update.go b/vendor/github.com/docker/docker/client/volume_update.go index 33bd31e5..151863f0 100644 --- a/vendor/github.com/docker/docker/client/volume_update.go +++ b/vendor/github.com/docker/docker/client/volume_update.go @@ -11,7 +11,7 @@ import ( // VolumeUpdate updates a volume. This only works for Cluster Volumes, and // only some fields can be updated. func (cli *Client) VolumeUpdate(ctx context.Context, volumeID string, version swarm.Version, options volume.UpdateOptions) error { - if err := cli.NewVersionError("1.42", "volume update"); err != nil { + if err := cli.NewVersionError(ctx, "1.42", "volume update"); err != nil { return err } diff --git a/vendor/github.com/docker/docker/errdefs/defs.go b/vendor/github.com/docker/docker/errdefs/defs.go index 61e7456b..a5523c3e 100644 --- a/vendor/github.com/docker/docker/errdefs/defs.go +++ b/vendor/github.com/docker/docker/errdefs/defs.go @@ -1,4 +1,4 @@ -package errdefs // import "github.com/docker/docker/errdefs" +package errdefs // ErrNotFound signals that the requested object doesn't exist type ErrNotFound interface { diff --git a/vendor/github.com/docker/docker/errdefs/helpers.go b/vendor/github.com/docker/docker/errdefs/helpers.go index fe06fb6f..042de4b7 100644 --- a/vendor/github.com/docker/docker/errdefs/helpers.go +++ b/vendor/github.com/docker/docker/errdefs/helpers.go @@ -1,4 +1,4 @@ -package errdefs // import "github.com/docker/docker/errdefs" +package errdefs import "context" diff --git a/vendor/github.com/docker/docker/errdefs/http_helpers.go b/vendor/github.com/docker/docker/errdefs/http_helpers.go index 77bda389..ebcd7893 100644 --- a/vendor/github.com/docker/docker/errdefs/http_helpers.go +++ b/vendor/github.com/docker/docker/errdefs/http_helpers.go @@ -1,4 +1,4 @@ -package errdefs // import "github.com/docker/docker/errdefs" +package errdefs import ( "net/http" diff --git a/vendor/github.com/docker/docker/errdefs/is.go b/vendor/github.com/docker/docker/errdefs/is.go index 3abf07d0..f94034cb 100644 --- a/vendor/github.com/docker/docker/errdefs/is.go +++ b/vendor/github.com/docker/docker/errdefs/is.go @@ -1,9 +1,18 @@ -package errdefs // import "github.com/docker/docker/errdefs" +package errdefs + +import ( + "context" + "errors" +) type causer interface { Cause() error } +type wrapErr interface { + Unwrap() error +} + func getImplementer(err error) error { switch e := err.(type) { case @@ -23,6 +32,8 @@ func getImplementer(err error) error { return err case causer: return getImplementer(e.Cause()) + case wrapErr: + return getImplementer(e.Unwrap()) default: return err } @@ -105,3 +116,8 @@ func IsDataLoss(err error) bool { _, ok := getImplementer(err).(ErrDataLoss) return ok } + +// IsContext returns if the passed in error is due to context cancellation or deadline exceeded. +func IsContext(err error) bool { + return errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) +} diff --git a/vendor/github.com/docker/docker/image/spec/specs-go/v1/image.go b/vendor/github.com/docker/docker/image/spec/specs-go/v1/image.go new file mode 100644 index 00000000..16726176 --- /dev/null +++ b/vendor/github.com/docker/docker/image/spec/specs-go/v1/image.go @@ -0,0 +1,54 @@ +package v1 + +import ( + "time" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +const DockerOCIImageMediaType = "application/vnd.docker.container.image.v1+json" + +// DockerOCIImage is a ocispec.Image extended with Docker specific Config. +type DockerOCIImage struct { + ocispec.Image + + // Shadow ocispec.Image.Config + Config DockerOCIImageConfig `json:"config,omitempty"` +} + +// DockerOCIImageConfig is a ocispec.ImageConfig extended with Docker specific fields. +type DockerOCIImageConfig struct { + ocispec.ImageConfig + + DockerOCIImageConfigExt +} + +// DockerOCIImageConfigExt contains Docker-specific fields in DockerImageConfig. +type DockerOCIImageConfigExt struct { + Healthcheck *HealthcheckConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy + + OnBuild []string `json:",omitempty"` // ONBUILD metadata that were defined on the image Dockerfile + Shell []string `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT +} + +// HealthcheckConfig holds configuration settings for the HEALTHCHECK feature. +type HealthcheckConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. + StartInterval time.Duration `json:",omitempty"` // The interval to attempt healthchecks at during the start period + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/internal/multierror/multierror.go b/vendor/github.com/docker/docker/internal/multierror/multierror.go new file mode 100644 index 00000000..cf4d6a59 --- /dev/null +++ b/vendor/github.com/docker/docker/internal/multierror/multierror.go @@ -0,0 +1,46 @@ +package multierror + +import ( + "strings" +) + +// Join is a drop-in replacement for errors.Join with better formatting. +func Join(errs ...error) error { + n := 0 + for _, err := range errs { + if err != nil { + n++ + } + } + if n == 0 { + return nil + } + e := &joinError{ + errs: make([]error, 0, n), + } + for _, err := range errs { + if err != nil { + e.errs = append(e.errs, err) + } + } + return e +} + +type joinError struct { + errs []error +} + +func (e *joinError) Error() string { + if len(e.errs) == 1 { + return strings.TrimSpace(e.errs[0].Error()) + } + stringErrs := make([]string, 0, len(e.errs)) + for _, subErr := range e.errs { + stringErrs = append(stringErrs, strings.Replace(subErr.Error(), "\n", "\n\t", -1)) + } + return "* " + strings.Join(stringErrs, "\n* ") +} + +func (e *joinError) Unwrap() []error { + return e.errs +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go index 34361a24..43133a09 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive.go @@ -21,6 +21,7 @@ import ( "time" "github.com/containerd/containerd/pkg/userns" + "github.com/containerd/log" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/pools" @@ -29,7 +30,6 @@ import ( "github.com/moby/patternmatcher" "github.com/moby/sys/sequential" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) // ImpliedDirectoryMode represents the mode (Unix permissions) applied to directories that are implied by files in a @@ -42,7 +42,7 @@ import ( // This value is currently implementation-defined, and not captured in any cross-runtime specification. Thus, it is // subject to change in Moby at any time -- image authors who require consistent or known directory permissions // should explicitly control them by ensuring that header entries exist for any applicable path. -const ImpliedDirectoryMode = 0755 +const ImpliedDirectoryMode = 0o755 type ( // Compression is the state represents if compressed or not. @@ -70,6 +70,12 @@ type ( // replaced with the matching name from this map. RebaseNames map[string]string InUserNS bool + // Allow unpacking to succeed in spite of failures to set extended + // attributes on the unpacked files due to the destination filesystem + // not supporting them or a lack of permissions. Extended attributes + // were probably in the archive for a reason, so set this option at + // your own peril. + BestEffortXattrs bool } ) @@ -199,21 +205,21 @@ func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { if noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ"); noPigzEnv != "" { noPigz, err := strconv.ParseBool(noPigzEnv) if err != nil { - logrus.WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var") + log.G(ctx).WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var") } if noPigz { - logrus.Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv) + log.G(ctx).Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv) return gzip.NewReader(buf) } } unpigzPath, err := exec.LookPath("unpigz") if err != nil { - logrus.Debugf("unpigz binary not found, falling back to go gzip library") + log.G(ctx).Debugf("unpigz binary not found, falling back to go gzip library") return gzip.NewReader(buf) } - logrus.Debugf("Using %s to decompress", unpigzPath) + log.G(ctx).Debugf("Using %s to decompress", unpigzPath) return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) } @@ -475,6 +481,8 @@ func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, erro return hdr, nil } +const paxSchilyXattr = "SCHILY.xattr." + // ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem // to a tar header func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { @@ -487,15 +495,16 @@ func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { ) capability, _ := system.Lgetxattr(path, "security.capability") if capability != nil { - length := len(capability) if capability[versionOffset] == vfsCapRevision3 { // Convert VFS_CAP_REVISION_3 to VFS_CAP_REVISION_2 as root UID makes no // sense outside the user namespace the archive is built in. capability[versionOffset] = vfsCapRevision2 - length = xattrCapsSz2 + capability = capability[:xattrCapsSz2] + } + if hdr.PAXRecords == nil { + hdr.PAXRecords = make(map[string]string) } - hdr.Xattrs = make(map[string]string) - hdr.Xattrs["security.capability"] = string(capability[:length]) + hdr.PAXRecords[paxSchilyXattr+"security.capability"] = string(capability) } return nil } @@ -666,7 +675,19 @@ func (ta *tarAppender) addTarFile(path, name string) error { return nil } -func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.Identity, inUserns bool) error { +func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, opts *TarOptions) error { + var ( + Lchown = true + inUserns, bestEffortXattrs bool + chownOpts *idtools.Identity + ) + if opts != nil { + Lchown = !opts.NoLchown + inUserns = opts.InUserNS + chownOpts = opts.ChownOpts + bestEffortXattrs = opts.BestEffortXattrs + } + // hdr.Mode is in linux format, which we can use for sycalls, // but for os.Foo() calls we need the mode converted to os.FileMode, // so use hdrInfo.Mode() (they differ for e.g. setuid bits) @@ -736,7 +757,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L } case tar.TypeXGlobalHeader: - logrus.Debug("PAX Global Extended Headers found and ignored") + log.G(context.TODO()).Debug("PAX Global Extended Headers found and ignored") return nil default: @@ -757,26 +778,26 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L } } - var errors []string - for key, value := range hdr.Xattrs { - if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { - if err == syscall.ENOTSUP || err == syscall.EPERM { - // We ignore errors here because not all graphdrivers support - // xattrs *cough* old versions of AUFS *cough*. However only - // ENOTSUP should be emitted in that case, otherwise we still - // bail. + var xattrErrs []string + for key, value := range hdr.PAXRecords { + xattr, ok := strings.CutPrefix(key, paxSchilyXattr) + if !ok { + continue + } + if err := system.Lsetxattr(path, xattr, []byte(value), 0); err != nil { + if bestEffortXattrs && errors.Is(err, syscall.ENOTSUP) || errors.Is(err, syscall.EPERM) { // EPERM occurs if modifying xattrs is not allowed. This can // happen when running in userns with restrictions (ChromeOS). - errors = append(errors, err.Error()) + xattrErrs = append(xattrErrs, err.Error()) continue } return err } } - if len(errors) > 0 { - logrus.WithFields(logrus.Fields{ - "errors": errors, + if len(xattrErrs) > 0 { + log.G(context.TODO()).WithFields(log.Fields{ + "errors": xattrErrs, }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") } @@ -893,13 +914,13 @@ func (t *Tarballer) Do() { defer func() { // Make sure to check the error on Close. if err := ta.TarWriter.Close(); err != nil { - logrus.Errorf("Can't close tar writer: %s", err) + log.G(context.TODO()).Errorf("Can't close tar writer: %s", err) } if err := t.compressWriter.Close(); err != nil { - logrus.Errorf("Can't close compress writer: %s", err) + log.G(context.TODO()).Errorf("Can't close compress writer: %s", err) } if err := t.pipeWriter.Close(); err != nil { - logrus.Errorf("Can't close pipe writer: %s", err) + log.G(context.TODO()).Errorf("Can't close pipe writer: %s", err) } }() @@ -922,7 +943,7 @@ func (t *Tarballer) Do() { // directory. So, we must split the source path and use the // basename as the include. if len(t.options.IncludeFiles) > 0 { - logrus.Warn("Tar: Can't archive a file with includes") + log.G(context.TODO()).Warn("Tar: Can't archive a file with includes") } dir, base := SplitPathDirEntry(t.srcPath) @@ -947,7 +968,7 @@ func (t *Tarballer) Do() { walkRoot := getWalkRoot(t.srcPath, include) filepath.WalkDir(walkRoot, func(filePath string, f os.DirEntry, err error) error { if err != nil { - logrus.Errorf("Tar: Can't stat file %s to tar: %s", t.srcPath, err) + log.G(context.TODO()).Errorf("Tar: Can't stat file %s to tar: %s", t.srcPath, err) return nil } @@ -986,7 +1007,7 @@ func (t *Tarballer) Do() { skip, matchInfo, err = t.pm.MatchesUsingParentResults(relFilePath, patternmatcher.MatchInfo{}) } if err != nil { - logrus.Errorf("Error matching %s: %v", relFilePath, err) + log.G(context.TODO()).Errorf("Error matching %s: %v", relFilePath, err) return err } @@ -1047,7 +1068,7 @@ func (t *Tarballer) Do() { } if err := ta.addTarFile(filePath, relFilePath); err != nil { - logrus.Errorf("Can't add file %s to tar: %s", filePath, err) + log.G(context.TODO()).Errorf("Can't add file %s to tar: %s", filePath, err) // if pipe is broken, stop writing tar stream to it if err == io.ErrClosedPipe { return err @@ -1084,7 +1105,7 @@ loop: // ignore XGlobalHeader early to avoid creating parent directories for them if hdr.Typeflag == tar.TypeXGlobalHeader { - logrus.Debugf("PAX Global Extended Headers found for %s and ignored", hdr.Name) + log.G(context.TODO()).Debugf("PAX Global Extended Headers found for %s and ignored", hdr.Name) continue } @@ -1158,7 +1179,7 @@ loop: } } - if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { + if err := createTarFile(path, dest, hdr, trBuf, options); err != nil { return err } @@ -1297,7 +1318,7 @@ func (archiver *Archiver) CopyWithTar(src, dst string) error { // as owner rootIDs := archiver.IDMapping.RootPair() // Create dst, copy src's content into it - if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { + if err := idtools.MkdirAllAndChownNew(dst, 0o755, rootIDs); err != nil { return err } return archiver.TarUntar(src, dst) @@ -1322,7 +1343,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { dst = filepath.Join(dst, filepath.Base(src)) } // Create the holding directory if necessary - if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { + if err := system.MkdirAll(filepath.Dir(dst), 0o700); err != nil { return err } diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go index 76321a35..2c3786cd 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go @@ -21,8 +21,7 @@ func getWhiteoutConverter(format WhiteoutFormat, inUserNS bool) (tarWhiteoutConv return nil, nil } -type overlayWhiteoutConverter struct { -} +type overlayWhiteoutConverter struct{} func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) { // convert whiteouts to AUFS format @@ -30,7 +29,7 @@ func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os // we just rename the file and make it normal dir, filename := filepath.Split(hdr.Name) hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) - hdr.Mode = 0600 + hdr.Mode = 0o600 hdr.Typeflag = tar.TypeReg hdr.Size = 0 } @@ -42,9 +41,7 @@ func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os return nil, err } if len(opaque) == 1 && opaque[0] == 'y' { - if hdr.Xattrs != nil { - delete(hdr.Xattrs, "trusted.overlay.opaque") - } + delete(hdr.PAXRecords, paxSchilyXattr+"trusted.overlay.opaque") // create a header for the whiteout file // it should inherit some properties from the parent, but be a regular file diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_other.go b/vendor/github.com/docker/docker/pkg/archive/archive_other.go index 28ae2769..3de1d64c 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_other.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_other.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package archive // import "github.com/docker/docker/pkg/archive" diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go index 92d8e23d..ff59d019 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package archive // import "github.com/docker/docker/pkg/archive" @@ -8,6 +7,7 @@ import ( "errors" "os" "path/filepath" + "runtime" "strings" "syscall" @@ -44,6 +44,20 @@ func chmodTarEntry(perm os.FileMode) os.FileMode { // statUnix populates hdr from system-dependent fields of fi without performing // any OS lookups. func statUnix(fi os.FileInfo, hdr *tar.Header) error { + // Devmajor and Devminor are only needed for special devices. + + // In FreeBSD, RDev for regular files is -1 (unless overridden by FS): + // https://cgit.freebsd.org/src/tree/sys/kern/vfs_default.c?h=stable/13#n1531 + // (NODEV is -1: https://cgit.freebsd.org/src/tree/sys/sys/param.h?h=stable/13#n241). + + // ZFS in particular does not override the default: + // https://cgit.freebsd.org/src/tree/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c?h=stable/13#n2027 + + // Since `Stat_t.Rdev` is uint64, the cast turns -1 into (2^64 - 1). + // Such large values cannot be encoded in a tar header. + if runtime.GOOS == "freebsd" && hdr.Typeflag != tar.TypeBlock && hdr.Typeflag != tar.TypeChar { + return nil + } s, ok := fi.Sys().(*syscall.Stat_t) if !ok { return nil @@ -83,7 +97,7 @@ func getFileUIDGID(stat interface{}) (idtools.Identity, error) { // handleTarTypeBlockCharFifo is an OS-specific helper function used by // createTarFile to handle the following types of header: Block; Char; Fifo func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { - mode := uint32(hdr.Mode & 07777) + mode := uint32(hdr.Mode & 0o7777) switch hdr.Typeflag { case tar.TypeBlock: mode |= unix.S_IFBLK diff --git a/vendor/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/docker/docker/pkg/archive/changes.go index 7f7242be..f9f16c92 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes.go +++ b/vendor/github.com/docker/docker/pkg/archive/changes.go @@ -3,6 +3,7 @@ package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "bytes" + "context" "fmt" "io" "os" @@ -12,10 +13,10 @@ import ( "syscall" "time" + "github.com/containerd/log" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" - "github.com/sirupsen/logrus" ) // ChangeType represents the change type. @@ -107,8 +108,10 @@ func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { return "", nil } -type skipChange func(string) (bool, error) -type deleteChange func(string, string, os.FileInfo) (string, error) +type ( + skipChange func(string) (bool, error) + deleteChange func(string, string, os.FileInfo) (string, error) +) func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) { var ( @@ -341,9 +344,7 @@ func newRootFileInfo() *FileInfo { // ChangesDirs compares two directories and generates an array of Change objects describing the changes. // If oldDir is "", then all files in newDir will be Add-Changes. func ChangesDirs(newDir, oldDir string) ([]Change, error) { - var ( - oldRoot, newRoot *FileInfo - ) + var oldRoot, newRoot *FileInfo if oldDir == "" { emptyDir, err := os.MkdirTemp("", "empty") if err != nil { @@ -371,7 +372,7 @@ func ChangesSize(newDir string, changes []Change) int64 { file := filepath.Join(newDir, change.Path) fileInfo, err := os.Lstat(file) if err != nil { - logrus.Errorf("Can not stat %q: %s", file, err) + log.G(context.TODO()).Errorf("Can not stat %q: %s", file, err) continue } @@ -420,22 +421,22 @@ func ExportChanges(dir string, changes []Change, idMap idtools.IdentityMapping) ChangeTime: timestamp, } if err := ta.TarWriter.WriteHeader(hdr); err != nil { - logrus.Debugf("Can't write whiteout header: %s", err) + log.G(context.TODO()).Debugf("Can't write whiteout header: %s", err) } } else { path := filepath.Join(dir, change.Path) if err := ta.addTarFile(path, change.Path[1:]); err != nil { - logrus.Debugf("Can't add file %s to tar: %s", path, err) + log.G(context.TODO()).Debugf("Can't add file %s to tar: %s", path, err) } } } // Make sure to check the error on Close. if err := ta.TarWriter.Close(); err != nil { - logrus.Debugf("Can't close layer: %s", err) + log.G(context.TODO()).Debugf("Can't close layer: %s", err) } if err := writer.Close(); err != nil { - logrus.Debugf("failed close Changes writer: %s", err) + log.G(context.TODO()).Debugf("failed close Changes writer: %s", err) } }() return reader, nil diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go index f8792b3d..81fcbc5b 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go +++ b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go @@ -267,7 +267,7 @@ func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) continue } bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) - var name = string(bytes[0:clen(bytes[:])]) + name := string(bytes[0:clen(bytes[:])]) if name == "." || name == ".." { // Useless names continue } diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_other.go b/vendor/github.com/docker/docker/pkg/archive/changes_other.go index 833798bd..13a7d3c0 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes_other.go +++ b/vendor/github.com/docker/docker/pkg/archive/changes_other.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package archive // import "github.com/docker/docker/pkg/archive" diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go index 54aace97..853c73ee 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go +++ b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package archive // import "github.com/docker/docker/pkg/archive" diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go index 0ea15962..01eadc30 100644 --- a/vendor/github.com/docker/docker/pkg/archive/copy.go +++ b/vendor/github.com/docker/docker/pkg/archive/copy.go @@ -2,14 +2,15 @@ package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" + "context" "errors" "io" "os" "path/filepath" "strings" + "github.com/containerd/log" "github.com/docker/docker/pkg/system" - "github.com/sirupsen/logrus" ) // Errors used or returned by this file. @@ -107,7 +108,7 @@ func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, er sourceDir, sourceBase := SplitPathDirEntry(sourcePath) opts := TarResourceRebaseOpts(sourceBase, rebaseName) - logrus.Debugf("copying %q from %q", sourceBase, sourceDir) + log.G(context.TODO()).Debugf("copying %q from %q", sourceBase, sourceDir) return TarWithOptions(sourceDir, opts) } diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go index 2ac7729f..065bd4ad 100644 --- a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go +++ b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package archive // import "github.com/docker/docker/pkg/archive" diff --git a/vendor/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go index 1a2fb971..318f5942 100644 --- a/vendor/github.com/docker/docker/pkg/archive/diff.go +++ b/vendor/github.com/docker/docker/pkg/archive/diff.go @@ -2,6 +2,7 @@ package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" + "context" "fmt" "io" "os" @@ -9,9 +10,9 @@ import ( "runtime" "strings" + "github.com/containerd/log" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" - "github.com/sirupsen/logrus" ) // UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be @@ -67,7 +68,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, // image but have it tagged as Windows inadvertently. if runtime.GOOS == "windows" { if strings.Contains(hdr.Name, ":") { - logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) + log.G(context.TODO()).Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) continue } } @@ -92,7 +93,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, } defer os.RemoveAll(aufsTempdir) } - if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil { + if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, options); err != nil { return 0, err } } @@ -183,7 +184,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, return 0, err } - if err := createTarFile(path, dest, srcHdr, srcData, !options.NoLchown, nil, options.InUserNS); err != nil { + if err := createTarFile(path, dest, srcHdr, srcData, options); err != nil { return 0, err } diff --git a/vendor/github.com/docker/docker/pkg/archive/diff_unix.go b/vendor/github.com/docker/docker/pkg/archive/diff_unix.go index d7f80644..7216f2f4 100644 --- a/vendor/github.com/docker/docker/pkg/archive/diff_unix.go +++ b/vendor/github.com/docker/docker/pkg/archive/diff_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package archive diff --git a/vendor/github.com/docker/docker/pkg/archive/path_unix.go b/vendor/github.com/docker/docker/pkg/archive/path_unix.go index 0b135aea..390264bf 100644 --- a/vendor/github.com/docker/docker/pkg/archive/path_unix.go +++ b/vendor/github.com/docker/docker/pkg/archive/path_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package archive diff --git a/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go index d0877968..14c4ceb1 100644 --- a/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go +++ b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package archive // import "github.com/docker/docker/pkg/archive" diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir.go b/vendor/github.com/docker/docker/pkg/homedir/homedir.go new file mode 100644 index 00000000..59068320 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir.go @@ -0,0 +1,44 @@ +package homedir + +import ( + "os" + "os/user" + "runtime" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on. +// +// Deprecated: this function is no longer used, and will be removed in the next release. +func Key() string { + return envKeyName +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +// +// On non-Windows platforms, it falls back to nss lookups, if the home +// directory cannot be obtained from environment-variables. +// +// If linking statically with cgo enabled against glibc, ensure the +// osusergo build tag is used. +// +// If needing to do nss lookups, do not disable cgo or set osusergo. +func Get() string { + home, _ := os.UserHomeDir() + if home == "" && runtime.GOOS != "windows" { + if u, err := user.Current(); err == nil { + return u.HomeDir + } + } + return home +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +// +// Deprecated: this function is no longer used, and will be removed in the next release. +func GetShortcutString() string { + return homeShortCut +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go index 11f1bec9..4eeb26b5 100644 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package homedir // import "github.com/docker/docker/pkg/homedir" diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go index d1732dee..feae4d73 100644 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go @@ -1,39 +1,8 @@ //go:build !windows -// +build !windows package homedir // import "github.com/docker/docker/pkg/homedir" -import ( - "os" - "os/user" +const ( + envKeyName = "HOME" + homeShortCut = "~" ) - -// Key returns the env var name for the user's home dir based on -// the platform being run on -func Key() string { - return "HOME" -} - -// Get returns the home directory of the current user with the help of -// environment variables depending on the target operating system. -// Returned path should be used with "path/filepath" to form new paths. -// -// If linking statically with cgo enabled against glibc, ensure the -// osusergo build tag is used. -// -// If needing to do nss lookups, do not disable cgo or set osusergo. -func Get() string { - home := os.Getenv(Key()) - if home == "" { - if u, err := user.Current(); err == nil { - return u.HomeDir - } - } - return home -} - -// GetShortcutString returns the string that is shortcut to user's home directory -// in the native shell of the platform running on. -func GetShortcutString() string { - return "~" -} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go index 2f81813b..37f4ee67 100644 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go @@ -1,24 +1,6 @@ package homedir // import "github.com/docker/docker/pkg/homedir" -import ( - "os" +const ( + envKeyName = "USERPROFILE" + homeShortCut = "%USERPROFILE%" // be careful while using in format functions ) - -// Key returns the env var name for the user's home dir based on -// the platform being run on -func Key() string { - return "USERPROFILE" -} - -// Get returns the home directory of the current user with the help of -// environment variables depending on the target operating system. -// Returned path should be used with "path/filepath" to form new paths. -func Get() string { - return os.Getenv(Key()) -} - -// GetShortcutString returns the string that is shortcut to user's home directory -// in the native shell of the platform running on. -func GetShortcutString() string { - return "%USERPROFILE%" // be careful while using in format functions -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go index a4001c3b..cd621bdc 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package idtools // import "github.com/docker/docker/pkg/idtools" @@ -11,15 +10,9 @@ import ( "os/exec" "path/filepath" "strconv" - "sync" "syscall" - "github.com/opencontainers/runc/libcontainer/user" -) - -var ( - entOnce sync.Once - getentCmd string + "github.com/moby/sys/user" ) func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error { @@ -162,10 +155,10 @@ func getentGroup(name string) (user.Group, error) { } func callGetent(database, key string) (io.Reader, error) { - entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") }) - // if no `getent` command on host, can't do anything else - if getentCmd == "" { - return nil, fmt.Errorf("unable to find getent command") + getentCmd, err := resolveBinary("getent") + // if no `getent` command within the execution environment, can't do anything else + if err != nil { + return nil, fmt.Errorf("unable to find getent command: %w", err) } command := exec.Command(getentCmd, database, key) // we run getent within container filesystem, but without /dev so /dev/null is not available for exec to mock stdin diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go index 5e24577e..6a9311c4 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go +++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package idtools // import "github.com/docker/docker/pkg/idtools" diff --git a/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go index 05cc6963..517a2f52 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go +++ b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package idtools // import "github.com/docker/docker/pkg/idtools" diff --git a/vendor/github.com/docker/docker/pkg/ioutils/readers.go b/vendor/github.com/docker/docker/pkg/ioutils/readers.go index de00b95e..e03d3fee 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/readers.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/readers.go @@ -3,11 +3,15 @@ package ioutils // import "github.com/docker/docker/pkg/ioutils" import ( "context" "io" + "runtime/debug" + "sync/atomic" // make sure crypto.SHA256, crypto.sha512 and crypto.SHA384 are registered // TODO remove once https://github.com/opencontainers/go-digest/pull/64 is merged. _ "crypto/sha256" _ "crypto/sha512" + + "github.com/containerd/log" ) // ReadCloserWrapper wraps an io.Reader, and implements an io.ReadCloser @@ -16,10 +20,15 @@ import ( type ReadCloserWrapper struct { io.Reader closer func() error + closed atomic.Bool } // Close calls back the passed closer function func (r *ReadCloserWrapper) Close() error { + if !r.closed.CompareAndSwap(false, true) { + subsequentCloseWarn("ReadCloserWrapper") + return nil + } return r.closer() } @@ -87,6 +96,7 @@ type cancelReadCloser struct { cancel func() pR *io.PipeReader // Stream to read from pW *io.PipeWriter + closed atomic.Bool } // NewCancelReadCloser creates a wrapper that closes the ReadCloser when the @@ -146,6 +156,17 @@ func (p *cancelReadCloser) closeWithError(err error) { // Close closes the wrapper its underlying reader. It will cause // future calls to Read to return io.EOF. func (p *cancelReadCloser) Close() error { + if !p.closed.CompareAndSwap(false, true) { + subsequentCloseWarn("cancelReadCloser") + return nil + } p.closeWithError(io.EOF) return nil } + +func subsequentCloseWarn(name string) { + log.G(context.TODO()).Error("subsequent attempt to close " + name) + if log.GetLevel() >= log.DebugLevel { + log.G(context.TODO()).Errorf("stack trace: %s", string(debug.Stack())) + } +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/tempdir_deprecated.go b/vendor/github.com/docker/docker/pkg/ioutils/tempdir_deprecated.go deleted file mode 100644 index b3321602..00000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/tempdir_deprecated.go +++ /dev/null @@ -1,10 +0,0 @@ -package ioutils - -import "github.com/docker/docker/pkg/longpath" - -// TempDir is the equivalent of [os.MkdirTemp], except that on Windows -// the result is in Windows longpath format. On Unix systems it is -// equivalent to [os.MkdirTemp]. -// -// Deprecated: use [longpath.MkdirTemp]. -var TempDir = longpath.MkdirTemp diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writers.go b/vendor/github.com/docker/docker/pkg/ioutils/writers.go index 61c67949..1f50602f 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/writers.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/writers.go @@ -1,6 +1,9 @@ package ioutils // import "github.com/docker/docker/pkg/ioutils" -import "io" +import ( + "io" + "sync/atomic" +) // NopWriter represents a type which write operation is nop. type NopWriter struct{} @@ -29,9 +32,14 @@ func (f *NopFlusher) Flush() {} type writeCloserWrapper struct { io.Writer closer func() error + closed atomic.Bool } func (r *writeCloserWrapper) Close() error { + if !r.closed.CompareAndSwap(false, true) { + subsequentCloseWarn("WriteCloserWrapper") + return nil + } return r.closer() } diff --git a/vendor/github.com/docker/docker/pkg/meminfo/meminfo_unsupported.go b/vendor/github.com/docker/docker/pkg/meminfo/meminfo_unsupported.go index ebfadd53..3d03441c 100644 --- a/vendor/github.com/docker/docker/pkg/meminfo/meminfo_unsupported.go +++ b/vendor/github.com/docker/docker/pkg/meminfo/meminfo_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux && !windows -// +build !linux,!windows package meminfo diff --git a/vendor/github.com/docker/docker/pkg/process/doc.go b/vendor/github.com/docker/docker/pkg/process/doc.go deleted file mode 100644 index dae536d7..00000000 --- a/vendor/github.com/docker/docker/pkg/process/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package process provides a set of basic functions to manage individual -// processes. -package process diff --git a/vendor/github.com/docker/docker/pkg/process/process_unix.go b/vendor/github.com/docker/docker/pkg/process/process_unix.go deleted file mode 100644 index daf39236..00000000 --- a/vendor/github.com/docker/docker/pkg/process/process_unix.go +++ /dev/null @@ -1,82 +0,0 @@ -//go:build !windows -// +build !windows - -package process - -import ( - "bytes" - "fmt" - "os" - "path/filepath" - "runtime" - "strconv" - - "golang.org/x/sys/unix" -) - -// Alive returns true if process with a given pid is running. It only considers -// positive PIDs; 0 (all processes in the current process group), -1 (all processes -// with a PID larger than 1), and negative (-n, all processes in process group -// "n") values for pid are never considered to be alive. -func Alive(pid int) bool { - if pid < 1 { - return false - } - switch runtime.GOOS { - case "darwin": - // OS X does not have a proc filesystem. Use kill -0 pid to judge if the - // process exists. From KILL(2): https://www.freebsd.org/cgi/man.cgi?query=kill&sektion=2&manpath=OpenDarwin+7.2.1 - // - // Sig may be one of the signals specified in sigaction(2) or it may - // be 0, in which case error checking is performed but no signal is - // actually sent. This can be used to check the validity of pid. - err := unix.Kill(pid, 0) - - // Either the PID was found (no error) or we get an EPERM, which means - // the PID exists, but we don't have permissions to signal it. - return err == nil || err == unix.EPERM - default: - _, err := os.Stat(filepath.Join("/proc", strconv.Itoa(pid))) - return err == nil - } -} - -// Kill force-stops a process. It only considers positive PIDs; 0 (all processes -// in the current process group), -1 (all processes with a PID larger than 1), -// and negative (-n, all processes in process group "n") values for pid are -// ignored. Refer to [KILL(2)] for details. -// -// [KILL(2)]: https://man7.org/linux/man-pages/man2/kill.2.html -func Kill(pid int) error { - if pid < 1 { - return fmt.Errorf("invalid PID (%d): only positive PIDs are allowed", pid) - } - err := unix.Kill(pid, unix.SIGKILL) - if err != nil && err != unix.ESRCH { - return err - } - return nil -} - -// Zombie return true if process has a state with "Z". It only considers positive -// PIDs; 0 (all processes in the current process group), -1 (all processes with -// a PID larger than 1), and negative (-n, all processes in process group "n") -// values for pid are ignored. Refer to [PROC(5)] for details. -// -// [PROC(5)]: https://man7.org/linux/man-pages/man5/proc.5.html -func Zombie(pid int) (bool, error) { - if pid < 1 { - return false, nil - } - data, err := os.ReadFile(fmt.Sprintf("/proc/%d/stat", pid)) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - if cols := bytes.SplitN(data, []byte(" "), 4); len(cols) >= 3 && string(cols[2]) == "Z" { - return true, nil - } - return false, nil -} diff --git a/vendor/github.com/docker/docker/pkg/process/process_windows.go b/vendor/github.com/docker/docker/pkg/process/process_windows.go deleted file mode 100644 index 26158d09..00000000 --- a/vendor/github.com/docker/docker/pkg/process/process_windows.go +++ /dev/null @@ -1,52 +0,0 @@ -package process - -import ( - "os" - - "golang.org/x/sys/windows" -) - -// Alive returns true if process with a given pid is running. -func Alive(pid int) bool { - h, err := windows.OpenProcess(windows.PROCESS_QUERY_LIMITED_INFORMATION, false, uint32(pid)) - if err != nil { - return false - } - var c uint32 - err = windows.GetExitCodeProcess(h, &c) - _ = windows.CloseHandle(h) - if err != nil { - // From the GetExitCodeProcess function (processthreadsapi.h) API docs: - // https://learn.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-getexitcodeprocess - // - // The GetExitCodeProcess function returns a valid error code defined by the - // application only after the thread terminates. Therefore, an application should - // not use STILL_ACTIVE (259) as an error code (STILL_ACTIVE is a macro for - // STATUS_PENDING (minwinbase.h)). If a thread returns STILL_ACTIVE (259) as - // an error code, then applications that test for that value could interpret it - // to mean that the thread is still running, and continue to test for the - // completion of the thread after the thread has terminated, which could put - // the application into an infinite loop. - return c == uint32(windows.STATUS_PENDING) - } - return true -} - -// Kill force-stops a process. -func Kill(pid int) error { - p, err := os.FindProcess(pid) - if err == nil { - err = p.Kill() - if err != nil && err != os.ErrProcessDone { - return err - } - } - return nil -} - -// Zombie is not supported on Windows. -// -// TODO(thaJeztah): remove once we remove the stubs from pkg/system. -func Zombie(_ int) (bool, error) { - return false, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_nowindows.go b/vendor/github.com/docker/docker/pkg/system/chtimes_nowindows.go index 84ae1570..92ff0209 100644 --- a/vendor/github.com/docker/docker/pkg/system/chtimes_nowindows.go +++ b/vendor/github.com/docker/docker/pkg/system/chtimes_nowindows.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package system // import "github.com/docker/docker/pkg/system" diff --git a/vendor/github.com/docker/docker/pkg/system/errors.go b/vendor/github.com/docker/docker/pkg/system/errors.go index 2573d716..f4bbcce7 100644 --- a/vendor/github.com/docker/docker/pkg/system/errors.go +++ b/vendor/github.com/docker/docker/pkg/system/errors.go @@ -1,13 +1,6 @@ package system // import "github.com/docker/docker/pkg/system" -import ( - "errors" -) +import "errors" -var ( - // ErrNotSupportedPlatform means the platform is not supported. - ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") - - // ErrNotSupportedOperatingSystem means the operating system is not supported. - ErrNotSupportedOperatingSystem = errors.New("operating system is not supported") -) +// ErrNotSupportedPlatform means the platform is not supported. +var ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_unix.go b/vendor/github.com/docker/docker/pkg/system/filesys_unix.go index 38011294..f01f9385 100644 --- a/vendor/github.com/docker/docker/pkg/system/filesys_unix.go +++ b/vendor/github.com/docker/docker/pkg/system/filesys_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package system // import "github.com/docker/docker/pkg/system" diff --git a/vendor/github.com/docker/docker/pkg/system/image_os.go b/vendor/github.com/docker/docker/pkg/system/image_os.go deleted file mode 100644 index e3de86be..00000000 --- a/vendor/github.com/docker/docker/pkg/system/image_os.go +++ /dev/null @@ -1,10 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" -import ( - "runtime" - "strings" -) - -// IsOSSupported determines if an operating system is supported by the host. -func IsOSSupported(os string) bool { - return strings.EqualFold(runtime.GOOS, os) -} diff --git a/vendor/github.com/docker/docker/pkg/system/image_os_deprecated.go b/vendor/github.com/docker/docker/pkg/system/image_os_deprecated.go new file mode 100644 index 00000000..afb57dae --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/image_os_deprecated.go @@ -0,0 +1,19 @@ +package system + +import ( + "errors" + "runtime" + "strings" +) + +// ErrNotSupportedOperatingSystem means the operating system is not supported. +// +// Deprecated: use [github.com/docker/docker/image.CheckOS] and check the error returned. +var ErrNotSupportedOperatingSystem = errors.New("operating system is not supported") + +// IsOSSupported determines if an operating system is supported by the host. +// +// Deprecated: use [github.com/docker/docker/image.CheckOS] and check the error returned. +func IsOSSupported(os string) bool { + return strings.EqualFold(runtime.GOOS, os) +} diff --git a/vendor/github.com/docker/docker/pkg/system/init_windows.go b/vendor/github.com/docker/docker/pkg/system/init_windows.go index 3c2a43dd..7603efbb 100644 --- a/vendor/github.com/docker/docker/pkg/system/init_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/init_windows.go @@ -1,9 +1,7 @@ package system // import "github.com/docker/docker/pkg/system" -var ( - // containerdRuntimeSupported determines if containerd should be the runtime. - containerdRuntimeSupported = false -) +// containerdRuntimeSupported determines if containerd should be the runtime. +var containerdRuntimeSupported = false // InitContainerdRuntime sets whether to use containerd for runtime on Windows. func InitContainerdRuntime(cdPath string) { diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_unix.go b/vendor/github.com/docker/docker/pkg/system/lstat_unix.go index 654b9f2c..5e29a6b3 100644 --- a/vendor/github.com/docker/docker/pkg/system/lstat_unix.go +++ b/vendor/github.com/docker/docker/pkg/system/lstat_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package system // import "github.com/docker/docker/pkg/system" diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_deprecated.go b/vendor/github.com/docker/docker/pkg/system/meminfo_deprecated.go deleted file mode 100644 index 21651992..00000000 --- a/vendor/github.com/docker/docker/pkg/system/meminfo_deprecated.go +++ /dev/null @@ -1,16 +0,0 @@ -package system - -import "github.com/docker/docker/pkg/meminfo" - -// MemInfo contains memory statistics of the host system. -// -// Deprecated: use [meminfo.Memory]. -type MemInfo = meminfo.Memory - -// ReadMemInfo retrieves memory statistics of the host system and returns a -// MemInfo type. -// -// Deprecated: use [meminfo.Read]. -func ReadMemInfo() (*meminfo.Memory, error) { - return meminfo.Read() -} diff --git a/vendor/github.com/docker/docker/pkg/system/mknod.go b/vendor/github.com/docker/docker/pkg/system/mknod.go index d27152c0..2a62237a 100644 --- a/vendor/github.com/docker/docker/pkg/system/mknod.go +++ b/vendor/github.com/docker/docker/pkg/system/mknod.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package system // import "github.com/docker/docker/pkg/system" diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_freebsd.go b/vendor/github.com/docker/docker/pkg/system/mknod_freebsd.go index c890be11..e218e742 100644 --- a/vendor/github.com/docker/docker/pkg/system/mknod_freebsd.go +++ b/vendor/github.com/docker/docker/pkg/system/mknod_freebsd.go @@ -1,5 +1,4 @@ //go:build freebsd -// +build freebsd package system // import "github.com/docker/docker/pkg/system" diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_unix.go b/vendor/github.com/docker/docker/pkg/system/mknod_unix.go index 4586aad1..34df0b92 100644 --- a/vendor/github.com/docker/docker/pkg/system/mknod_unix.go +++ b/vendor/github.com/docker/docker/pkg/system/mknod_unix.go @@ -1,5 +1,4 @@ //go:build !freebsd && !windows -// +build !freebsd,!windows package system // import "github.com/docker/docker/pkg/system" diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_windows.go b/vendor/github.com/docker/docker/pkg/system/mknod_windows.go deleted file mode 100644 index ec89d7a1..00000000 --- a/vendor/github.com/docker/docker/pkg/system/mknod_windows.go +++ /dev/null @@ -1,11 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -// Mknod is not implemented on Windows. -func Mknod(path string, mode uint32, dev int) error { - return ErrNotSupportedPlatform -} - -// Mkdev is not implemented on Windows. -func Mkdev(major int64, minor int64) uint32 { - panic("Mkdev not implemented on Windows.") -} diff --git a/vendor/github.com/docker/docker/pkg/system/path_deprecated.go b/vendor/github.com/docker/docker/pkg/system/path_deprecated.go deleted file mode 100644 index 5c95026c..00000000 --- a/vendor/github.com/docker/docker/pkg/system/path_deprecated.go +++ /dev/null @@ -1,18 +0,0 @@ -package system - -const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - -// DefaultPathEnv is unix style list of directories to search for -// executables. Each directory is separated from the next by a colon -// ':' character . -// For Windows containers, an empty string is returned as the default -// path will be set by the container, and Docker has no context of what the -// default path should be. -// -// Deprecated: use oci.DefaultPathEnv -func DefaultPathEnv(os string) string { - if os == "windows" { - return "" - } - return defaultUnixPathEnv -} diff --git a/vendor/github.com/docker/docker/pkg/system/process_deprecated.go b/vendor/github.com/docker/docker/pkg/system/process_deprecated.go deleted file mode 100644 index 7b9f19ac..00000000 --- a/vendor/github.com/docker/docker/pkg/system/process_deprecated.go +++ /dev/null @@ -1,27 +0,0 @@ -//go:build linux || freebsd || darwin || windows -// +build linux freebsd darwin windows - -package system - -import "github.com/docker/docker/pkg/process" - -var ( - // IsProcessAlive returns true if process with a given pid is running. - // - // Deprecated: use [process.Alive]. - IsProcessAlive = process.Alive - - // IsProcessZombie return true if process has a state with "Z" - // - // Deprecated: use [process.Zombie]. - // - // TODO(thaJeztah): remove the Windows implementation in process once we remove this stub. - IsProcessZombie = process.Zombie -) - -// KillProcess force-stops a process. -// -// Deprecated: use [process.Kill]. -func KillProcess(pid int) { - _ = process.Kill(pid) -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_bsd.go b/vendor/github.com/docker/docker/pkg/system/stat_bsd.go index 8e61d820..435b776e 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_bsd.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_bsd.go @@ -1,5 +1,4 @@ //go:build freebsd || netbsd -// +build freebsd netbsd package system // import "github.com/docker/docker/pkg/system" @@ -7,10 +6,12 @@ import "syscall" // fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, + return &StatT{ + size: s.Size, mode: uint32(s.Mode), uid: s.Uid, gid: s.Gid, rdev: uint64(s.Rdev), - mtim: s.Mtimespec}, nil + mtim: s.Mtimespec, + }, nil } diff --git a/vendor/github.com/docker/docker/pkg/system/stat_darwin.go b/vendor/github.com/docker/docker/pkg/system/stat_darwin.go index c1c0ee9f..e0b629df 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_darwin.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_darwin.go @@ -4,10 +4,12 @@ import "syscall" // fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, + return &StatT{ + size: s.Size, mode: uint32(s.Mode), uid: s.Uid, gid: s.Gid, rdev: uint64(s.Rdev), - mtim: s.Mtimespec}, nil + mtim: s.Mtimespec, + }, nil } diff --git a/vendor/github.com/docker/docker/pkg/system/stat_linux.go b/vendor/github.com/docker/docker/pkg/system/stat_linux.go index 3ac02393..4309d42b 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_linux.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_linux.go @@ -4,13 +4,15 @@ import "syscall" // fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, + return &StatT{ + size: s.Size, mode: s.Mode, uid: s.Uid, gid: s.Gid, // the type is 32bit on mips rdev: uint64(s.Rdev), //nolint: unconvert - mtim: s.Mtim}, nil + mtim: s.Mtim, + }, nil } // FromStatT converts a syscall.Stat_t type to a system.Stat_t type diff --git a/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go b/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go index 756b92d1..851374e5 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go @@ -4,10 +4,12 @@ import "syscall" // fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, + return &StatT{ + size: s.Size, mode: uint32(s.Mode), uid: s.Uid, gid: s.Gid, rdev: uint64(s.Rdev), - mtim: s.Mtim}, nil + mtim: s.Mtim, + }, nil } diff --git a/vendor/github.com/docker/docker/pkg/system/stat_unix.go b/vendor/github.com/docker/docker/pkg/system/stat_unix.go index a45ffddf..205e5467 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_unix.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package system // import "github.com/docker/docker/pkg/system" diff --git a/vendor/github.com/docker/docker/pkg/system/stat_windows.go b/vendor/github.com/docker/docker/pkg/system/stat_windows.go index 0ff3af2f..10876cd7 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_windows.go @@ -45,5 +45,6 @@ func fromStatT(fi *os.FileInfo) (*StatT, error) { return &StatT{ size: (*fi).Size(), mode: (*fi).Mode(), - mtim: (*fi).ModTime()}, nil + mtim: (*fi).ModTime(), + }, nil } diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_unix.go b/vendor/github.com/docker/docker/pkg/system/utimes_unix.go index 2768750a..f3a079f8 100644 --- a/vendor/github.com/docker/docker/pkg/system/utimes_unix.go +++ b/vendor/github.com/docker/docker/pkg/system/utimes_unix.go @@ -1,5 +1,4 @@ //go:build linux || freebsd -// +build linux freebsd package system // import "github.com/docker/docker/pkg/system" diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go b/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go index bfed4af0..7c19d591 100644 --- a/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go +++ b/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux && !freebsd -// +build !linux,!freebsd package system // import "github.com/docker/docker/pkg/system" diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs.go b/vendor/github.com/docker/docker/pkg/system/xattrs.go new file mode 100644 index 00000000..b3f4e8a2 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/xattrs.go @@ -0,0 +1,18 @@ +package system // import "github.com/docker/docker/pkg/system" + +type XattrError struct { + Op string + Attr string + Path string + Err error +} + +func (e *XattrError) Error() string { return e.Op + " " + e.Attr + " " + e.Path + ": " + e.Err.Error() } + +func (e *XattrError) Unwrap() error { return e.Err } + +// Timeout reports whether this error represents a timeout. +func (e *XattrError) Timeout() bool { + t, ok := e.Err.(interface{ Timeout() bool }) + return ok && t.Timeout() +} diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go index 95b609fe..facfbb31 100644 --- a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go +++ b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go @@ -1,11 +1,17 @@ package system // import "github.com/docker/docker/pkg/system" -import "golang.org/x/sys/unix" +import ( + "golang.org/x/sys/unix" +) // Lgetxattr retrieves the value of the extended attribute identified by attr // and associated with the given path in the file system. // It will returns a nil slice and nil error if the xattr is not set. func Lgetxattr(path string, attr string) ([]byte, error) { + sysErr := func(err error) ([]byte, error) { + return nil, &XattrError{Op: "lgetxattr", Attr: attr, Path: path, Err: err} + } + // Start with a 128 length byte array dest := make([]byte, 128) sz, errno := unix.Lgetxattr(path, attr, dest) @@ -14,7 +20,7 @@ func Lgetxattr(path string, attr string) ([]byte, error) { // Buffer too small, use zero-sized buffer to get the actual size sz, errno = unix.Lgetxattr(path, attr, []byte{}) if errno != nil { - return nil, errno + return sysErr(errno) } dest = make([]byte, sz) sz, errno = unix.Lgetxattr(path, attr, dest) @@ -24,7 +30,7 @@ func Lgetxattr(path string, attr string) ([]byte, error) { case errno == unix.ENODATA: return nil, nil case errno != nil: - return nil, errno + return sysErr(errno) } return dest[:sz], nil @@ -33,5 +39,9 @@ func Lgetxattr(path string, attr string) ([]byte, error) { // Lsetxattr sets the value of the extended attribute identified by attr // and associated with the given path in the file system. func Lsetxattr(path string, attr string, data []byte, flags int) error { - return unix.Lsetxattr(path, attr, data, flags) + err := unix.Lsetxattr(path, attr, data, flags) + if err != nil { + return &XattrError{Op: "lsetxattr", Attr: attr, Path: path, Err: err} + } + return nil } diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go index b165a5db..2a3698f1 100644 --- a/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go +++ b/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package system // import "github.com/docker/docker/pkg/system" diff --git a/vendor/github.com/docker/go-connections/nat/nat.go b/vendor/github.com/docker/go-connections/nat/nat.go index 296c96a6..4049d780 100644 --- a/vendor/github.com/docker/go-connections/nat/nat.go +++ b/vendor/github.com/docker/go-connections/nat/nat.go @@ -177,27 +177,27 @@ func ParsePortSpec(rawPort string) ([]PortMapping, error) { // Strip [] from IPV6 addresses rawIP, _, err := net.SplitHostPort(ip + ":") if err != nil { - return nil, fmt.Errorf("Invalid ip address %v: %s", ip, err) + return nil, fmt.Errorf("invalid IP address %v: %w", ip, err) } ip = rawIP } if ip != "" && net.ParseIP(ip) == nil { - return nil, fmt.Errorf("Invalid ip address: %s", ip) + return nil, fmt.Errorf("invalid IP address: %s", ip) } if containerPort == "" { - return nil, fmt.Errorf("No port specified: %s", rawPort) + return nil, fmt.Errorf("no port specified: %s", rawPort) } startPort, endPort, err := ParsePortRange(containerPort) if err != nil { - return nil, fmt.Errorf("Invalid containerPort: %s", containerPort) + return nil, fmt.Errorf("invalid containerPort: %s", containerPort) } var startHostPort, endHostPort uint64 = 0, 0 if len(hostPort) > 0 { startHostPort, endHostPort, err = ParsePortRange(hostPort) if err != nil { - return nil, fmt.Errorf("Invalid hostPort: %s", hostPort) + return nil, fmt.Errorf("invalid hostPort: %s", hostPort) } } @@ -206,12 +206,12 @@ func ParsePortSpec(rawPort string) ([]PortMapping, error) { // In this case, use the host port range as the dynamic // host port range to allocate into. if endPort != startPort { - return nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort) + return nil, fmt.Errorf("invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort) } } if !validateProto(strings.ToLower(proto)) { - return nil, fmt.Errorf("Invalid proto: %s", proto) + return nil, fmt.Errorf("invalid proto: %s", proto) } ports := []PortMapping{} diff --git a/vendor/github.com/docker/go-connections/nat/parse.go b/vendor/github.com/docker/go-connections/nat/parse.go index 892adf8c..e4b53e8a 100644 --- a/vendor/github.com/docker/go-connections/nat/parse.go +++ b/vendor/github.com/docker/go-connections/nat/parse.go @@ -6,34 +6,10 @@ import ( "strings" ) -// PartParser parses and validates the specified string (data) using the specified template -// e.g. ip:public:private -> 192.168.0.1:80:8000 -// DEPRECATED: do not use, this function may be removed in a future version -func PartParser(template, data string) (map[string]string, error) { - // ip:public:private - var ( - templateParts = strings.Split(template, ":") - parts = strings.Split(data, ":") - out = make(map[string]string, len(templateParts)) - ) - if len(parts) != len(templateParts) { - return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) - } - - for i, t := range templateParts { - value := "" - if len(parts) > i { - value = parts[i] - } - out[t] = value - } - return out, nil -} - // ParsePortRange parses and validates the specified string as a port-range (8000-9000) func ParsePortRange(ports string) (uint64, uint64, error) { if ports == "" { - return 0, 0, fmt.Errorf("Empty string specified for ports.") + return 0, 0, fmt.Errorf("empty string specified for ports") } if !strings.Contains(ports, "-") { start, err := strconv.ParseUint(ports, 10, 16) @@ -51,7 +27,7 @@ func ParsePortRange(ports string) (uint64, uint64, error) { return 0, 0, err } if end < start { - return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports) + return 0, 0, fmt.Errorf("invalid range specified for port: %s", ports) } return start, end, nil } diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go index 5b65c546..78a34a98 100644 --- a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go +++ b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go @@ -1,4 +1,4 @@ -// +build !windows +//go:build !windows package sockets @@ -15,7 +15,7 @@ const maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path) func configureUnixTransport(tr *http.Transport, proto, addr string) error { if len(addr) > maxUnixSocketPathSize { - return fmt.Errorf("Unix socket path %q is too long", addr) + return fmt.Errorf("unix socket path %q is too long", addr) } // No need for compression in local communications. tr.DisableCompression = true diff --git a/vendor/github.com/docker/go-connections/sockets/unix_socket.go b/vendor/github.com/docker/go-connections/sockets/unix_socket.go index e7591e6e..b9233521 100644 --- a/vendor/github.com/docker/go-connections/sockets/unix_socket.go +++ b/vendor/github.com/docker/go-connections/sockets/unix_socket.go @@ -1,9 +1,9 @@ -// +build !windows +//go:build !windows /* Package sockets is a simple unix domain socket wrapper. -Usage +# Usage For example: @@ -42,7 +42,7 @@ For example: if _, err := conn.Read(buf); err != nil { panic(err) } else if string(buf) != echoStr { - panic(fmt.Errorf("Msg may lost")) + panic(fmt.Errorf("msg may lost")) } } */ @@ -103,7 +103,7 @@ func NewUnixSocketWithOpts(path string, opts ...SockOption) (net.Listener, error // We don't use "defer" here, to reset the umask to its original value as soon // as possible. Ideally we'd be able to detect if WithChmod() was passed as // an option, and skip changing umask if default permissions are used. - origUmask := syscall.Umask(0777) + origUmask := syscall.Umask(0o777) l, err := net.Listen("unix", path) syscall.Umask(origUmask) if err != nil { @@ -122,5 +122,5 @@ func NewUnixSocketWithOpts(path string, opts ...SockOption) (net.Listener, error // NewUnixSocket creates a unix socket with the specified path and group. func NewUnixSocket(path string, gid int) (net.Listener, error) { - return NewUnixSocketWithOpts(path, WithChown(0, gid), WithChmod(0660)) + return NewUnixSocketWithOpts(path, WithChown(0, gid), WithChmod(0o660)) } diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool.go similarity index 95% rename from vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go rename to vendor/github.com/docker/go-connections/tlsconfig/certpool.go index 1ca0965e..f84c624b 100644 --- a/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go +++ b/vendor/github.com/docker/go-connections/tlsconfig/certpool.go @@ -1,5 +1,3 @@ -// +build go1.7 - package tlsconfig import ( diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go deleted file mode 100644 index 1ff81c33..00000000 --- a/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !go1.7 - -package tlsconfig - -import ( - "crypto/x509" -) - -// SystemCertPool returns an new empty cert pool, -// accessing system cert pool is supported in go 1.7 -func SystemCertPool() (*x509.CertPool, error) { - return x509.NewCertPool(), nil -} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config.go b/vendor/github.com/docker/go-connections/tlsconfig/config.go index 99296837..606c98a3 100644 --- a/vendor/github.com/docker/go-connections/tlsconfig/config.go +++ b/vendor/github.com/docker/go-connections/tlsconfig/config.go @@ -1,6 +1,7 @@ // Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. // // As a reminder from https://golang.org/pkg/crypto/tls/#Config: +// // A Config structure is used to configure a TLS client or server. After one has been passed to a TLS function it must not be modified. // A Config may be reused; the tls package will also not modify it. package tlsconfig @@ -9,11 +10,9 @@ import ( "crypto/tls" "crypto/x509" "encoding/pem" + "errors" "fmt" - "io/ioutil" "os" - - "github.com/pkg/errors" ) // Options represents the information needed to create client and server TLS configurations. @@ -36,7 +35,12 @@ type Options struct { ExclusiveRootPools bool MinVersion uint16 // If Passphrase is set, it will be used to decrypt a TLS private key - // if the key is encrypted + // if the key is encrypted. + // + // Deprecated: Use of encrypted TLS private keys has been deprecated, and + // will be removed in a future release. Golang has deprecated support for + // legacy PEM encryption (as specified in RFC 1423), as it is insecure by + // design (see https://go-review.googlesource.com/c/go/+/264159). Passphrase string } @@ -99,7 +103,7 @@ func certPool(caFile string, exclusivePool bool) (*x509.CertPool, error) { return nil, fmt.Errorf("failed to read system certificates: %v", err) } } - pemData, err := ioutil.ReadFile(caFile) + pemData, err := os.ReadFile(caFile) if err != nil { return nil, fmt.Errorf("could not read CA certificate %q: %v", caFile, err) } @@ -109,6 +113,15 @@ func certPool(caFile string, exclusivePool bool) (*x509.CertPool, error) { return certPool, nil } +// allTLSVersions lists all the TLS versions and is used by the code that validates +// a uint16 value as a TLS version. +var allTLSVersions = map[uint16]struct{}{ + tls.VersionTLS10: {}, + tls.VersionTLS11: {}, + tls.VersionTLS12: {}, + tls.VersionTLS13: {}, +} + // isValidMinVersion checks that the input value is a valid tls minimum version func isValidMinVersion(version uint16) bool { _, ok := allTLSVersions[version] @@ -120,10 +133,10 @@ func isValidMinVersion(version uint16) bool { func adjustMinVersion(options Options, config *tls.Config) error { if options.MinVersion > 0 { if !isValidMinVersion(options.MinVersion) { - return fmt.Errorf("Invalid minimum TLS version: %x", options.MinVersion) + return fmt.Errorf("invalid minimum TLS version: %x", options.MinVersion) } if options.MinVersion < config.MinVersion { - return fmt.Errorf("Requested minimum TLS version is too low. Should be at-least: %x", config.MinVersion) + return fmt.Errorf("requested minimum TLS version is too low. Should be at-least: %x", config.MinVersion) } config.MinVersion = options.MinVersion } @@ -132,9 +145,14 @@ func adjustMinVersion(options Options, config *tls.Config) error { } // IsErrEncryptedKey returns true if the 'err' is an error of incorrect -// password when trying to decrypt a TLS private key +// password when trying to decrypt a TLS private key. +// +// Deprecated: Use of encrypted TLS private keys has been deprecated, and +// will be removed in a future release. Golang has deprecated support for +// legacy PEM encryption (as specified in RFC 1423), as it is insecure by +// design (see https://go-review.googlesource.com/c/go/+/264159). func IsErrEncryptedKey(err error) bool { - return errors.Cause(err) == x509.IncorrectPasswordError + return errors.Is(err, x509.IncorrectPasswordError) } // getPrivateKey returns the private key in 'keyBytes', in PEM-encoded format. @@ -151,7 +169,7 @@ func getPrivateKey(keyBytes []byte, passphrase string) ([]byte, error) { if x509.IsEncryptedPEMBlock(pemBlock) { //nolint:staticcheck // Ignore SA1019 (IsEncryptedPEMBlock is deprecated) keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(passphrase)) //nolint:staticcheck // Ignore SA1019 (DecryptPEMBlock is deprecated) if err != nil { - return nil, errors.Wrap(err, "private key is encrypted, but could not decrypt it") + return nil, fmt.Errorf("private key is encrypted, but could not decrypt it: %w", err) } keyBytes = pem.EncodeToMemory(&pem.Block{Type: pemBlock.Type, Bytes: keyBytes}) } @@ -167,26 +185,24 @@ func getCert(options Options) ([]tls.Certificate, error) { return nil, nil } - errMessage := "Could not load X509 key pair" - - cert, err := ioutil.ReadFile(options.CertFile) + cert, err := os.ReadFile(options.CertFile) if err != nil { - return nil, errors.Wrap(err, errMessage) + return nil, err } - prKeyBytes, err := ioutil.ReadFile(options.KeyFile) + prKeyBytes, err := os.ReadFile(options.KeyFile) if err != nil { - return nil, errors.Wrap(err, errMessage) + return nil, err } prKeyBytes, err = getPrivateKey(prKeyBytes, options.Passphrase) if err != nil { - return nil, errors.Wrap(err, errMessage) + return nil, err } tlsCert, err := tls.X509KeyPair(cert, prKeyBytes) if err != nil { - return nil, errors.Wrap(err, errMessage) + return nil, err } return []tls.Certificate{tlsCert}, nil @@ -206,7 +222,7 @@ func Client(options Options) (*tls.Config, error) { tlsCerts, err := getCert(options) if err != nil { - return nil, err + return nil, fmt.Errorf("could not load X509 key pair: %w", err) } tlsConfig.Certificates = tlsCerts @@ -224,9 +240,9 @@ func Server(options Options) (*tls.Config, error) { tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) if err != nil { if os.IsNotExist(err) { - return nil, fmt.Errorf("Could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err) + return nil, fmt.Errorf("could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err) } - return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err) + return nil, fmt.Errorf("error reading X509 key pair - make sure the key is not encrypted (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err) } tlsConfig.Certificates = []tls.Certificate{tlsCert} if options.ClientAuth >= tls.VerifyClientCertIfGiven && options.CAFile != "" { diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go index 6b4c6a7c..a82f9fa5 100644 --- a/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go +++ b/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go @@ -1,7 +1,4 @@ -// +build go1.5 - // Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. -// package tlsconfig import ( diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go deleted file mode 100644 index ee22df47..00000000 --- a/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !go1.5 - -// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. -// -package tlsconfig - -import ( - "crypto/tls" -) - -// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) -var clientCipherSuites = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, -} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/versions_go113.go b/vendor/github.com/docker/go-connections/tlsconfig/versions_go113.go deleted file mode 100644 index d8215f8e..00000000 --- a/vendor/github.com/docker/go-connections/tlsconfig/versions_go113.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build go1.13 - -package tlsconfig - -import ( - "crypto/tls" -) - -// allTLSVersions lists all the TLS versions and is used by the code that validates -// a uint16 value as a TLS version. -var allTLSVersions = map[uint16]struct{}{ - tls.VersionTLS10: {}, - tls.VersionTLS11: {}, - tls.VersionTLS12: {}, - tls.VersionTLS13: {}, -} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/versions_other.go b/vendor/github.com/docker/go-connections/tlsconfig/versions_other.go deleted file mode 100644 index a5ba7f4a..00000000 --- a/vendor/github.com/docker/go-connections/tlsconfig/versions_other.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !go1.13 - -package tlsconfig - -import ( - "crypto/tls" -) - -// allTLSVersions lists all the TLS versions and is used by the code that validates -// a uint16 value as a TLS version. -var allTLSVersions = map[uint16]struct{}{ - tls.VersionTLS10: {}, - tls.VersionTLS11: {}, - tls.VersionTLS12: {}, -} diff --git a/vendor/github.com/felixge/httpsnoop/.gitignore b/vendor/github.com/felixge/httpsnoop/.gitignore new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/felixge/httpsnoop/LICENSE.txt b/vendor/github.com/felixge/httpsnoop/LICENSE.txt new file mode 100644 index 00000000..e028b46a --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2016 Felix Geisendörfer (felix@debuggable.com) + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. diff --git a/vendor/github.com/felixge/httpsnoop/Makefile b/vendor/github.com/felixge/httpsnoop/Makefile new file mode 100644 index 00000000..4e12afdd --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/Makefile @@ -0,0 +1,10 @@ +.PHONY: ci generate clean + +ci: clean generate + go test -race -v ./... + +generate: + go generate . + +clean: + rm -rf *_generated*.go diff --git a/vendor/github.com/felixge/httpsnoop/README.md b/vendor/github.com/felixge/httpsnoop/README.md new file mode 100644 index 00000000..cf6b42f3 --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/README.md @@ -0,0 +1,95 @@ +# httpsnoop + +Package httpsnoop provides an easy way to capture http related metrics (i.e. +response time, bytes written, and http status code) from your application's +http.Handlers. + +Doing this requires non-trivial wrapping of the http.ResponseWriter interface, +which is also exposed for users interested in a more low-level API. + +[![Go Reference](https://pkg.go.dev/badge/github.com/felixge/httpsnoop.svg)](https://pkg.go.dev/github.com/felixge/httpsnoop) +[![Build Status](https://github.com/felixge/httpsnoop/actions/workflows/main.yaml/badge.svg)](https://github.com/felixge/httpsnoop/actions/workflows/main.yaml) + +## Usage Example + +```go +// myH is your app's http handler, perhaps a http.ServeMux or similar. +var myH http.Handler +// wrappedH wraps myH in order to log every request. +wrappedH := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + m := httpsnoop.CaptureMetrics(myH, w, r) + log.Printf( + "%s %s (code=%d dt=%s written=%d)", + r.Method, + r.URL, + m.Code, + m.Duration, + m.Written, + ) +}) +http.ListenAndServe(":8080", wrappedH) +``` + +## Why this package exists + +Instrumenting an application's http.Handler is surprisingly difficult. + +However if you google for e.g. "capture ResponseWriter status code" you'll find +lots of advise and code examples that suggest it to be a fairly trivial +undertaking. Unfortunately everything I've seen so far has a high chance of +breaking your application. + +The main problem is that a `http.ResponseWriter` often implements additional +interfaces such as `http.Flusher`, `http.CloseNotifier`, `http.Hijacker`, `http.Pusher`, and +`io.ReaderFrom`. So the naive approach of just wrapping `http.ResponseWriter` +in your own struct that also implements the `http.ResponseWriter` interface +will hide the additional interfaces mentioned above. This has a high change of +introducing subtle bugs into any non-trivial application. + +Another approach I've seen people take is to return a struct that implements +all of the interfaces above. However, that's also problematic, because it's +difficult to fake some of these interfaces behaviors when the underlying +`http.ResponseWriter` doesn't have an implementation. It's also dangerous, +because an application may choose to operate differently, merely because it +detects the presence of these additional interfaces. + +This package solves this problem by checking which additional interfaces a +`http.ResponseWriter` implements, returning a wrapped version implementing the +exact same set of interfaces. + +Additionally this package properly handles edge cases such as `WriteHeader` not +being called, or called more than once, as well as concurrent calls to +`http.ResponseWriter` methods, and even calls happening after the wrapped +`ServeHTTP` has already returned. + +Unfortunately this package is not perfect either. It's possible that it is +still missing some interfaces provided by the go core (let me know if you find +one), and it won't work for applications adding their own interfaces into the +mix. You can however use `httpsnoop.Unwrap(w)` to access the underlying +`http.ResponseWriter` and type-assert the result to its other interfaces. + +However, hopefully the explanation above has sufficiently scared you of rolling +your own solution to this problem. httpsnoop may still break your application, +but at least it tries to avoid it as much as possible. + +Anyway, the real problem here is that smuggling additional interfaces inside +`http.ResponseWriter` is a problematic design choice, but it probably goes as +deep as the Go language specification itself. But that's okay, I still prefer +Go over the alternatives ;). + +## Performance + +``` +BenchmarkBaseline-8 20000 94912 ns/op +BenchmarkCaptureMetrics-8 20000 95461 ns/op +``` + +As you can see, using `CaptureMetrics` on a vanilla http.Handler introduces an +overhead of ~500 ns per http request on my machine. However, the margin of +error appears to be larger than that, therefor it should be reasonable to +assume that the overhead introduced by `CaptureMetrics` is absolutely +negligible. + +## License + +MIT diff --git a/vendor/github.com/felixge/httpsnoop/capture_metrics.go b/vendor/github.com/felixge/httpsnoop/capture_metrics.go new file mode 100644 index 00000000..bec7b71b --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/capture_metrics.go @@ -0,0 +1,86 @@ +package httpsnoop + +import ( + "io" + "net/http" + "time" +) + +// Metrics holds metrics captured from CaptureMetrics. +type Metrics struct { + // Code is the first http response code passed to the WriteHeader func of + // the ResponseWriter. If no such call is made, a default code of 200 is + // assumed instead. + Code int + // Duration is the time it took to execute the handler. + Duration time.Duration + // Written is the number of bytes successfully written by the Write or + // ReadFrom function of the ResponseWriter. ResponseWriters may also write + // data to their underlaying connection directly (e.g. headers), but those + // are not tracked. Therefor the number of Written bytes will usually match + // the size of the response body. + Written int64 +} + +// CaptureMetrics wraps the given hnd, executes it with the given w and r, and +// returns the metrics it captured from it. +func CaptureMetrics(hnd http.Handler, w http.ResponseWriter, r *http.Request) Metrics { + return CaptureMetricsFn(w, func(ww http.ResponseWriter) { + hnd.ServeHTTP(ww, r) + }) +} + +// CaptureMetricsFn wraps w and calls fn with the wrapped w and returns the +// resulting metrics. This is very similar to CaptureMetrics (which is just +// sugar on top of this func), but is a more usable interface if your +// application doesn't use the Go http.Handler interface. +func CaptureMetricsFn(w http.ResponseWriter, fn func(http.ResponseWriter)) Metrics { + m := Metrics{Code: http.StatusOK} + m.CaptureMetrics(w, fn) + return m +} + +// CaptureMetrics wraps w and calls fn with the wrapped w and updates +// Metrics m with the resulting metrics. This is similar to CaptureMetricsFn, +// but allows one to customize starting Metrics object. +func (m *Metrics) CaptureMetrics(w http.ResponseWriter, fn func(http.ResponseWriter)) { + var ( + start = time.Now() + headerWritten bool + hooks = Hooks{ + WriteHeader: func(next WriteHeaderFunc) WriteHeaderFunc { + return func(code int) { + next(code) + + if !(code >= 100 && code <= 199) && !headerWritten { + m.Code = code + headerWritten = true + } + } + }, + + Write: func(next WriteFunc) WriteFunc { + return func(p []byte) (int, error) { + n, err := next(p) + + m.Written += int64(n) + headerWritten = true + return n, err + } + }, + + ReadFrom: func(next ReadFromFunc) ReadFromFunc { + return func(src io.Reader) (int64, error) { + n, err := next(src) + + headerWritten = true + m.Written += n + return n, err + } + }, + } + ) + + fn(Wrap(w, hooks)) + m.Duration += time.Since(start) +} diff --git a/vendor/github.com/felixge/httpsnoop/docs.go b/vendor/github.com/felixge/httpsnoop/docs.go new file mode 100644 index 00000000..203c35b3 --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/docs.go @@ -0,0 +1,10 @@ +// Package httpsnoop provides an easy way to capture http related metrics (i.e. +// response time, bytes written, and http status code) from your application's +// http.Handlers. +// +// Doing this requires non-trivial wrapping of the http.ResponseWriter +// interface, which is also exposed for users interested in a more low-level +// API. +package httpsnoop + +//go:generate go run codegen/main.go diff --git a/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go b/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go new file mode 100644 index 00000000..101cedde --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go @@ -0,0 +1,436 @@ +// +build go1.8 +// Code generated by "httpsnoop/codegen"; DO NOT EDIT. + +package httpsnoop + +import ( + "bufio" + "io" + "net" + "net/http" +) + +// HeaderFunc is part of the http.ResponseWriter interface. +type HeaderFunc func() http.Header + +// WriteHeaderFunc is part of the http.ResponseWriter interface. +type WriteHeaderFunc func(code int) + +// WriteFunc is part of the http.ResponseWriter interface. +type WriteFunc func(b []byte) (int, error) + +// FlushFunc is part of the http.Flusher interface. +type FlushFunc func() + +// CloseNotifyFunc is part of the http.CloseNotifier interface. +type CloseNotifyFunc func() <-chan bool + +// HijackFunc is part of the http.Hijacker interface. +type HijackFunc func() (net.Conn, *bufio.ReadWriter, error) + +// ReadFromFunc is part of the io.ReaderFrom interface. +type ReadFromFunc func(src io.Reader) (int64, error) + +// PushFunc is part of the http.Pusher interface. +type PushFunc func(target string, opts *http.PushOptions) error + +// Hooks defines a set of method interceptors for methods included in +// http.ResponseWriter as well as some others. You can think of them as +// middleware for the function calls they target. See Wrap for more details. +type Hooks struct { + Header func(HeaderFunc) HeaderFunc + WriteHeader func(WriteHeaderFunc) WriteHeaderFunc + Write func(WriteFunc) WriteFunc + Flush func(FlushFunc) FlushFunc + CloseNotify func(CloseNotifyFunc) CloseNotifyFunc + Hijack func(HijackFunc) HijackFunc + ReadFrom func(ReadFromFunc) ReadFromFunc + Push func(PushFunc) PushFunc +} + +// Wrap returns a wrapped version of w that provides the exact same interface +// as w. Specifically if w implements any combination of: +// +// - http.Flusher +// - http.CloseNotifier +// - http.Hijacker +// - io.ReaderFrom +// - http.Pusher +// +// The wrapped version will implement the exact same combination. If no hooks +// are set, the wrapped version also behaves exactly as w. Hooks targeting +// methods not supported by w are ignored. Any other hooks will intercept the +// method they target and may modify the call's arguments and/or return values. +// The CaptureMetrics implementation serves as a working example for how the +// hooks can be used. +func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter { + rw := &rw{w: w, h: hooks} + _, i0 := w.(http.Flusher) + _, i1 := w.(http.CloseNotifier) + _, i2 := w.(http.Hijacker) + _, i3 := w.(io.ReaderFrom) + _, i4 := w.(http.Pusher) + switch { + // combination 1/32 + case !i0 && !i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + }{rw, rw} + // combination 2/32 + case !i0 && !i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Pusher + }{rw, rw, rw} + // combination 3/32 + case !i0 && !i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + io.ReaderFrom + }{rw, rw, rw} + // combination 4/32 + case !i0 && !i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw} + // combination 5/32 + case !i0 && !i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + }{rw, rw, rw} + // combination 6/32 + case !i0 && !i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + http.Pusher + }{rw, rw, rw, rw} + // combination 7/32 + case !i0 && !i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 8/32 + case !i0 && !i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 9/32 + case !i0 && i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + }{rw, rw, rw} + // combination 10/32 + case !i0 && i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Pusher + }{rw, rw, rw, rw} + // combination 11/32 + case !i0 && i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 12/32 + case !i0 && i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 13/32 + case !i0 && i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw} + // combination 14/32 + case !i0 && i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 15/32 + case !i0 && i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 16/32 + case !i0 && i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 17/32 + case i0 && !i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + }{rw, rw, rw} + // combination 18/32 + case i0 && !i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Pusher + }{rw, rw, rw, rw} + // combination 19/32 + case i0 && !i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 20/32 + case i0 && !i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 21/32 + case i0 && !i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + }{rw, rw, rw, rw} + // combination 22/32 + case i0 && !i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 23/32 + case i0 && !i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 24/32 + case i0 && !i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 25/32 + case i0 && i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + }{rw, rw, rw, rw} + // combination 26/32 + case i0 && i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 27/32 + case i0 && i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 28/32 + case i0 && i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 29/32 + case i0 && i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw, rw} + // combination 30/32 + case i0 && i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 31/32 + case i0 && i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw, rw} + // combination 32/32 + case i0 && i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw, rw} + } + panic("unreachable") +} + +type rw struct { + w http.ResponseWriter + h Hooks +} + +func (w *rw) Unwrap() http.ResponseWriter { + return w.w +} + +func (w *rw) Header() http.Header { + f := w.w.(http.ResponseWriter).Header + if w.h.Header != nil { + f = w.h.Header(f) + } + return f() +} + +func (w *rw) WriteHeader(code int) { + f := w.w.(http.ResponseWriter).WriteHeader + if w.h.WriteHeader != nil { + f = w.h.WriteHeader(f) + } + f(code) +} + +func (w *rw) Write(b []byte) (int, error) { + f := w.w.(http.ResponseWriter).Write + if w.h.Write != nil { + f = w.h.Write(f) + } + return f(b) +} + +func (w *rw) Flush() { + f := w.w.(http.Flusher).Flush + if w.h.Flush != nil { + f = w.h.Flush(f) + } + f() +} + +func (w *rw) CloseNotify() <-chan bool { + f := w.w.(http.CloseNotifier).CloseNotify + if w.h.CloseNotify != nil { + f = w.h.CloseNotify(f) + } + return f() +} + +func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) { + f := w.w.(http.Hijacker).Hijack + if w.h.Hijack != nil { + f = w.h.Hijack(f) + } + return f() +} + +func (w *rw) ReadFrom(src io.Reader) (int64, error) { + f := w.w.(io.ReaderFrom).ReadFrom + if w.h.ReadFrom != nil { + f = w.h.ReadFrom(f) + } + return f(src) +} + +func (w *rw) Push(target string, opts *http.PushOptions) error { + f := w.w.(http.Pusher).Push + if w.h.Push != nil { + f = w.h.Push(f) + } + return f(target, opts) +} + +type Unwrapper interface { + Unwrap() http.ResponseWriter +} + +// Unwrap returns the underlying http.ResponseWriter from within zero or more +// layers of httpsnoop wrappers. +func Unwrap(w http.ResponseWriter) http.ResponseWriter { + if rw, ok := w.(Unwrapper); ok { + // recurse until rw.Unwrap() returns a non-Unwrapper + return Unwrap(rw.Unwrap()) + } else { + return w + } +} diff --git a/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go b/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go new file mode 100644 index 00000000..e0951df1 --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go @@ -0,0 +1,278 @@ +// +build !go1.8 +// Code generated by "httpsnoop/codegen"; DO NOT EDIT. + +package httpsnoop + +import ( + "bufio" + "io" + "net" + "net/http" +) + +// HeaderFunc is part of the http.ResponseWriter interface. +type HeaderFunc func() http.Header + +// WriteHeaderFunc is part of the http.ResponseWriter interface. +type WriteHeaderFunc func(code int) + +// WriteFunc is part of the http.ResponseWriter interface. +type WriteFunc func(b []byte) (int, error) + +// FlushFunc is part of the http.Flusher interface. +type FlushFunc func() + +// CloseNotifyFunc is part of the http.CloseNotifier interface. +type CloseNotifyFunc func() <-chan bool + +// HijackFunc is part of the http.Hijacker interface. +type HijackFunc func() (net.Conn, *bufio.ReadWriter, error) + +// ReadFromFunc is part of the io.ReaderFrom interface. +type ReadFromFunc func(src io.Reader) (int64, error) + +// Hooks defines a set of method interceptors for methods included in +// http.ResponseWriter as well as some others. You can think of them as +// middleware for the function calls they target. See Wrap for more details. +type Hooks struct { + Header func(HeaderFunc) HeaderFunc + WriteHeader func(WriteHeaderFunc) WriteHeaderFunc + Write func(WriteFunc) WriteFunc + Flush func(FlushFunc) FlushFunc + CloseNotify func(CloseNotifyFunc) CloseNotifyFunc + Hijack func(HijackFunc) HijackFunc + ReadFrom func(ReadFromFunc) ReadFromFunc +} + +// Wrap returns a wrapped version of w that provides the exact same interface +// as w. Specifically if w implements any combination of: +// +// - http.Flusher +// - http.CloseNotifier +// - http.Hijacker +// - io.ReaderFrom +// +// The wrapped version will implement the exact same combination. If no hooks +// are set, the wrapped version also behaves exactly as w. Hooks targeting +// methods not supported by w are ignored. Any other hooks will intercept the +// method they target and may modify the call's arguments and/or return values. +// The CaptureMetrics implementation serves as a working example for how the +// hooks can be used. +func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter { + rw := &rw{w: w, h: hooks} + _, i0 := w.(http.Flusher) + _, i1 := w.(http.CloseNotifier) + _, i2 := w.(http.Hijacker) + _, i3 := w.(io.ReaderFrom) + switch { + // combination 1/16 + case !i0 && !i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + }{rw, rw} + // combination 2/16 + case !i0 && !i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + io.ReaderFrom + }{rw, rw, rw} + // combination 3/16 + case !i0 && !i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + }{rw, rw, rw} + // combination 4/16 + case !i0 && !i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 5/16 + case !i0 && i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + }{rw, rw, rw} + // combination 6/16 + case !i0 && i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 7/16 + case !i0 && i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw} + // combination 8/16 + case !i0 && i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 9/16 + case i0 && !i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + }{rw, rw, rw} + // combination 10/16 + case i0 && !i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 11/16 + case i0 && !i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + }{rw, rw, rw, rw} + // combination 12/16 + case i0 && !i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 13/16 + case i0 && i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + }{rw, rw, rw, rw} + // combination 14/16 + case i0 && i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 15/16 + case i0 && i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw, rw} + // combination 16/16 + case i0 && i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw, rw} + } + panic("unreachable") +} + +type rw struct { + w http.ResponseWriter + h Hooks +} + +func (w *rw) Unwrap() http.ResponseWriter { + return w.w +} + +func (w *rw) Header() http.Header { + f := w.w.(http.ResponseWriter).Header + if w.h.Header != nil { + f = w.h.Header(f) + } + return f() +} + +func (w *rw) WriteHeader(code int) { + f := w.w.(http.ResponseWriter).WriteHeader + if w.h.WriteHeader != nil { + f = w.h.WriteHeader(f) + } + f(code) +} + +func (w *rw) Write(b []byte) (int, error) { + f := w.w.(http.ResponseWriter).Write + if w.h.Write != nil { + f = w.h.Write(f) + } + return f(b) +} + +func (w *rw) Flush() { + f := w.w.(http.Flusher).Flush + if w.h.Flush != nil { + f = w.h.Flush(f) + } + f() +} + +func (w *rw) CloseNotify() <-chan bool { + f := w.w.(http.CloseNotifier).CloseNotify + if w.h.CloseNotify != nil { + f = w.h.CloseNotify(f) + } + return f() +} + +func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) { + f := w.w.(http.Hijacker).Hijack + if w.h.Hijack != nil { + f = w.h.Hijack(f) + } + return f() +} + +func (w *rw) ReadFrom(src io.Reader) (int64, error) { + f := w.w.(io.ReaderFrom).ReadFrom + if w.h.ReadFrom != nil { + f = w.h.ReadFrom(f) + } + return f(src) +} + +type Unwrapper interface { + Unwrap() http.ResponseWriter +} + +// Unwrap returns the underlying http.ResponseWriter from within zero or more +// layers of httpsnoop wrappers. +func Unwrap(w http.ResponseWriter) http.ResponseWriter { + if rw, ok := w.(Unwrapper); ok { + // recurse until rw.Unwrap() returns a non-Unwrapper + return Unwrap(rw.Unwrap()) + } else { + return w + } +} diff --git a/vendor/github.com/go-jose/go-jose/v3/BUG-BOUNTY.md b/vendor/github.com/go-jose/go-jose/v3/BUG-BOUNTY.md deleted file mode 100644 index 3305db0f..00000000 --- a/vendor/github.com/go-jose/go-jose/v3/BUG-BOUNTY.md +++ /dev/null @@ -1,10 +0,0 @@ -Serious about security -====================== - -Square recognizes the important contributions the security research community -can make. We therefore encourage reporting security issues with the code -contained in this repository. - -If you believe you have discovered a security vulnerability, please follow the -guidelines at . - diff --git a/vendor/github.com/go-jose/go-jose/v3/CHANGELOG.md b/vendor/github.com/go-jose/go-jose/v3/CHANGELOG.md index 7820c2f4..7ae6cff9 100644 --- a/vendor/github.com/go-jose/go-jose/v3/CHANGELOG.md +++ b/vendor/github.com/go-jose/go-jose/v3/CHANGELOG.md @@ -1,6 +1,23 @@ +# v3.0.2 + +## Fixed + + - DecryptMulti: handle decompression error (#19) + +## Changed + + - jwe/CompactSerialize: improve performance (#67) + - Increase the default number of PBKDF2 iterations to 600k (#48) + - Return the proper algorithm for ECDSA keys (#45) + +## Added + + - Add Thumbprint support for opaque signers (#38) + # v3.0.1 -Fixed: +## Fixed + - Security issue: an attacker specifying a large "p2c" value can cause JSONWebEncryption.Decrypt and JSONWebEncryption.DecryptMulti to consume large amounts of CPU, causing a DoS. Thanks to Matt Schwager (@mschwager) for the diff --git a/vendor/github.com/go-jose/go-jose/v3/README.md b/vendor/github.com/go-jose/go-jose/v3/README.md index b90c7e5c..57da6570 100644 --- a/vendor/github.com/go-jose/go-jose/v3/README.md +++ b/vendor/github.com/go-jose/go-jose/v3/README.md @@ -1,15 +1,18 @@ # Go JOSE -[![godoc](http://img.shields.io/badge/godoc-jose_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2) -[![godoc](http://img.shields.io/badge/godoc-jwt_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2/jwt) -[![license](http://img.shields.io/badge/license-apache_2.0-blue.svg?style=flat)](https://raw.githubusercontent.com/go-jose/go-jose/master/LICENSE) -[![build](https://travis-ci.org/go-jose/go-jose.svg?branch=master)](https://travis-ci.org/go-jose/go-jose) -[![coverage](https://coveralls.io/repos/github/go-jose/go-jose/badge.svg?branch=master)](https://coveralls.io/r/go-jose/go-jose) +[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v3.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v3) +[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v3/jwt.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v3/jwt) +[![license](https://img.shields.io/badge/license-apache_2.0-blue.svg?style=flat)](https://raw.githubusercontent.com/go-jose/go-jose/master/LICENSE) +[![test](https://img.shields.io/github/checks-status/go-jose/go-jose/v3)](https://github.com/go-jose/go-jose/actions) Package jose aims to provide an implementation of the Javascript Object Signing and Encryption set of standards. This includes support for JSON Web Encryption, JSON Web Signature, and JSON Web Token standards. +**Help Wanted!** If you'd like to help us develop this library please reach +out to css (at) css.bio. While I'm still working on keeping this maintained, +I have limited time for in-depth development and could use some additional help. + **Disclaimer**: This library contains encryption software that is subject to the U.S. Export Administration Regulations. You may not export, re-export, transfer or download this code or any part of it in violation of any United @@ -21,13 +24,13 @@ US maintained blocked list. ## Overview The implementation follows the -[JSON Web Encryption](http://dx.doi.org/10.17487/RFC7516) (RFC 7516), -[JSON Web Signature](http://dx.doi.org/10.17487/RFC7515) (RFC 7515), and -[JSON Web Token](http://dx.doi.org/10.17487/RFC7519) (RFC 7519) specifications. +[JSON Web Encryption](https://dx.doi.org/10.17487/RFC7516) (RFC 7516), +[JSON Web Signature](https://dx.doi.org/10.17487/RFC7515) (RFC 7515), and +[JSON Web Token](https://dx.doi.org/10.17487/RFC7519) (RFC 7519) specifications. Tables of supported algorithms are shown below. The library supports both the compact and JWS/JWE JSON Serialization formats, and has optional support for multiple recipients. It also comes with a small command-line utility -([`jose-util`](https://github.com/go-jose/go-jose/tree/master/jose-util)) +([`jose-util`](https://pkg.go.dev/github.com/go-jose/go-jose/jose-util)) for dealing with JOSE messages in a shell. **Note**: We use a forked version of the `encoding/json` package from the Go @@ -38,29 +41,19 @@ libraries in other languages. ### Versions -[Version 2](https://gopkg.in/go-jose/go-jose.v2) -([branch](https://github.com/go-jose/go-jose/tree/v2), -[doc](https://godoc.org/gopkg.in/go-jose/go-jose.v2)) is the current stable version: - - import "gopkg.in/go-jose/go-jose.v2" - [Version 3](https://github.com/go-jose/go-jose) -([branch](https://github.com/go-jose/go-jose/tree/master), -[doc](https://godoc.org/github.com/go-jose/go-jose)) is the under development/unstable version (not released yet): +([branch](https://github.com/go-jose/go-jose/tree/v3), +[doc](https://pkg.go.dev/github.com/go-jose/go-jose/v3), [releases](https://github.com/go-jose/go-jose/releases)) is the current stable version: import "github.com/go-jose/go-jose/v3" -All new feature development takes place on the `master` branch, which we are -preparing to release as version 3 soon. Version 2 will continue to receive -critical bug and security fixes. Note that starting with version 3 we are -using Go modules for versioning instead of `gopkg.in` as before. Version 3 also will require Go version 1.13 or higher. - -Version 1 (on the `v1` branch) is frozen and not supported anymore. +The old [square/go-jose](https://github.com/square/go-jose) repo contains the prior v1 and v2 versions, which +are still useable but not actively developed anymore. ### Supported algorithms See below for a table of supported algorithms. Algorithm identifiers match -the names in the [JSON Web Algorithms](http://dx.doi.org/10.17487/RFC7518) +the names in the [JSON Web Algorithms](https://dx.doi.org/10.17487/RFC7518) standard where possible. The Godoc reference has a list of constants. Key encryption | Algorithm identifier(s) @@ -103,20 +96,20 @@ allows attaching a key id. Algorithm(s) | Corresponding types :------------------------- | ------------------------------- - RSA | *[rsa.PublicKey](http://golang.org/pkg/crypto/rsa/#PublicKey), *[rsa.PrivateKey](http://golang.org/pkg/crypto/rsa/#PrivateKey) - ECDH, ECDSA | *[ecdsa.PublicKey](http://golang.org/pkg/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](http://golang.org/pkg/crypto/ecdsa/#PrivateKey) - EdDSA1 | [ed25519.PublicKey](https://godoc.org/pkg/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://godoc.org/pkg/crypto/ed25519#PrivateKey) + RSA | *[rsa.PublicKey](https://pkg.go.dev/crypto/rsa/#PublicKey), *[rsa.PrivateKey](https://pkg.go.dev/crypto/rsa/#PrivateKey) + ECDH, ECDSA | *[ecdsa.PublicKey](https://pkg.go.dev/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](https://pkg.go.dev/crypto/ecdsa/#PrivateKey) + EdDSA1 | [ed25519.PublicKey](https://pkg.go.dev/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://pkg.go.dev/crypto/ed25519#PrivateKey) AES, HMAC | []byte 1. Only available in version 2 or later of the package ## Examples -[![godoc](http://img.shields.io/badge/godoc-jose_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2) -[![godoc](http://img.shields.io/badge/godoc-jwt_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2/jwt) +[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v3.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v3) +[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v3/jwt.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v3/jwt) Examples can be found in the Godoc reference for this package. The -[`jose-util`](https://github.com/go-jose/go-jose/tree/master/jose-util) +[`jose-util`](https://github.com/go-jose/go-jose/tree/v3/jose-util) subdirectory also contains a small command-line utility which might be useful as an example as well. diff --git a/vendor/github.com/go-jose/go-jose/v3/SECURITY.md b/vendor/github.com/go-jose/go-jose/v3/SECURITY.md new file mode 100644 index 00000000..2f18a75a --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/SECURITY.md @@ -0,0 +1,13 @@ +# Security Policy +This document explains how to contact the Let's Encrypt security team to report security vulnerabilities. + +## Supported Versions +| Version | Supported | +| ------- | ----------| +| >= v3 | ✓ | +| v2 | ✗ | +| v1 | ✗ | + +## Reporting a vulnerability + +Please see [https://letsencrypt.org/contact/#security](https://letsencrypt.org/contact/#security) for the email address to report a vulnerability. Ensure that the subject line for your report contains the word `vulnerability` and is descriptive. Your email should be acknowledged within 24 hours. If you do not receive a response within 24 hours, please follow-up again with another email. diff --git a/vendor/github.com/go-jose/go-jose/v3/asymmetric.go b/vendor/github.com/go-jose/go-jose/v3/asymmetric.go index 78abc326..d4d4961b 100644 --- a/vendor/github.com/go-jose/go-jose/v3/asymmetric.go +++ b/vendor/github.com/go-jose/go-jose/v3/asymmetric.go @@ -285,6 +285,9 @@ func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm switch alg { case RS256, RS384, RS512: + // TODO(https://github.com/go-jose/go-jose/issues/40): As of go1.20, the + // random parameter is legacy and ignored, and it can be nil. + // https://cs.opensource.google/go/go/+/refs/tags/go1.20:src/crypto/rsa/pkcs1v15.go;l=263;bpv=0;bpt=1 out, err = rsa.SignPKCS1v15(RandReader, ctx.privateKey, hash, hashed) case PS256, PS384, PS512: out, err = rsa.SignPSS(RandReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{ diff --git a/vendor/github.com/go-jose/go-jose/v3/crypter.go b/vendor/github.com/go-jose/go-jose/v3/crypter.go index 6901137e..506d3b7b 100644 --- a/vendor/github.com/go-jose/go-jose/v3/crypter.go +++ b/vendor/github.com/go-jose/go-jose/v3/crypter.go @@ -21,7 +21,6 @@ import ( "crypto/rsa" "errors" "fmt" - "reflect" "github.com/go-jose/go-jose/v3/json" ) @@ -76,14 +75,24 @@ type recipientKeyInfo struct { type EncrypterOptions struct { Compression CompressionAlgorithm - // Optional map of additional keys to be inserted into the protected header - // of a JWS object. Some specifications which make use of JWS like to insert - // additional values here. All values must be JSON-serializable. + // Optional map of name/value pairs to be inserted into the protected + // header of a JWS object. Some specifications which make use of + // JWS require additional values here. + // + // Values will be serialized by [json.Marshal] and must be valid inputs to + // that function. + // + // [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal ExtraHeaders map[HeaderKey]interface{} } // WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it -// if necessary. It returns itself and so can be used in a fluent style. +// if necessary, and returns the updated EncrypterOptions. +// +// The v parameter will be serialized by [json.Marshal] and must be a valid +// input to that function. +// +// [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal func (eo *EncrypterOptions) WithHeader(k HeaderKey, v interface{}) *EncrypterOptions { if eo.ExtraHeaders == nil { eo.ExtraHeaders = map[HeaderKey]interface{}{} @@ -111,7 +120,17 @@ func (eo *EncrypterOptions) WithType(typ ContentType) *EncrypterOptions { // default of 100000 will be used for the count and a 128-bit random salt will // be generated. type Recipient struct { - Algorithm KeyAlgorithm + Algorithm KeyAlgorithm + // Key must have one of these types: + // - ed25519.PublicKey + // - *ecdsa.PublicKey + // - *rsa.PublicKey + // - *JSONWebKey + // - JSONWebKey + // - []byte (a symmetric key) + // - Any type that satisfies the OpaqueKeyEncrypter interface + // + // The type of Key must match the value of Algorithm. Key interface{} KeyID string PBES2Count int @@ -150,16 +169,17 @@ func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions) switch rcpt.Algorithm { case DIRECT: // Direct encryption mode must be treated differently - if reflect.TypeOf(rawKey) != reflect.TypeOf([]byte{}) { + keyBytes, ok := rawKey.([]byte) + if !ok { return nil, ErrUnsupportedKeyType } - if encrypter.cipher.keySize() != len(rawKey.([]byte)) { + if encrypter.cipher.keySize() != len(keyBytes) { return nil, ErrInvalidKeySize } encrypter.keyGenerator = staticKeyGenerator{ - key: rawKey.([]byte), + key: keyBytes, } - recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, rawKey.([]byte)) + recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, keyBytes) recipientInfo.keyID = keyID if rcpt.KeyID != "" { recipientInfo.keyID = rcpt.KeyID @@ -168,16 +188,16 @@ func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions) return encrypter, nil case ECDH_ES: // ECDH-ES (w/o key wrapping) is similar to DIRECT mode - typeOf := reflect.TypeOf(rawKey) - if typeOf != reflect.TypeOf(&ecdsa.PublicKey{}) { + keyDSA, ok := rawKey.(*ecdsa.PublicKey) + if !ok { return nil, ErrUnsupportedKeyType } encrypter.keyGenerator = ecKeyGenerator{ size: encrypter.cipher.keySize(), algID: string(enc), - publicKey: rawKey.(*ecdsa.PublicKey), + publicKey: keyDSA, } - recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, rawKey.(*ecdsa.PublicKey)) + recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, keyDSA) recipientInfo.keyID = keyID if rcpt.KeyID != "" { recipientInfo.keyID = rcpt.KeyID @@ -270,9 +290,8 @@ func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKey recipient, err := makeJWERecipient(alg, encryptionKey.Key) recipient.keyID = encryptionKey.KeyID return recipient, err - } - if encrypter, ok := encryptionKey.(OpaqueKeyEncrypter); ok { - return newOpaqueKeyEncrypter(alg, encrypter) + case OpaqueKeyEncrypter: + return newOpaqueKeyEncrypter(alg, encryptionKey) } return recipientKeyInfo{}, ErrUnsupportedKeyType } @@ -300,11 +319,11 @@ func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) { return newDecrypter(decryptionKey.Key) case *JSONWebKey: return newDecrypter(decryptionKey.Key) + case OpaqueKeyDecrypter: + return &opaqueKeyDecrypter{decrypter: decryptionKey}, nil + default: + return nil, ErrUnsupportedKeyType } - if okd, ok := decryptionKey.(OpaqueKeyDecrypter); ok { - return &opaqueKeyDecrypter{decrypter: okd}, nil - } - return nil, ErrUnsupportedKeyType } // Implementation of encrypt method producing a JWE object. @@ -403,9 +422,24 @@ func (ctx *genericEncrypter) Options() EncrypterOptions { } } -// Decrypt and validate the object and return the plaintext. Note that this -// function does not support multi-recipient, if you desire multi-recipient +// Decrypt and validate the object and return the plaintext. This +// function does not support multi-recipient. If you desire multi-recipient // decryption use DecryptMulti instead. +// +// The decryptionKey argument must contain a private or symmetric key +// and must have one of these types: +// - *ecdsa.PrivateKey +// - *rsa.PrivateKey +// - *JSONWebKey +// - JSONWebKey +// - *JSONWebKeySet +// - JSONWebKeySet +// - []byte (a symmetric key) +// - string (a symmetric key) +// - Any type that satisfies the OpaqueKeyDecrypter interface. +// +// Note that ed25519 is only available for signatures, not encryption, so is +// not an option here. func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) { headers := obj.mergedHeaders(nil) @@ -462,15 +496,21 @@ func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) // The "zip" header parameter may only be present in the protected header. if comp := obj.protected.getCompression(); comp != "" { plaintext, err = decompress(comp, plaintext) + if err != nil { + return nil, fmt.Errorf("go-jose/go-jose: failed to decompress plaintext: %v", err) + } } - return plaintext, err + return plaintext, nil } // DecryptMulti decrypts and validates the object and returns the plaintexts, // with support for multiple recipients. It returns the index of the recipient // for which the decryption was successful, the merged headers for that recipient, // and the plaintext. +// +// The decryptionKey argument must have one of the types allowed for the +// decryptionKey argument of Decrypt(). func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Header, []byte, error) { globalHeaders := obj.mergedHeaders(nil) @@ -532,7 +572,10 @@ func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Heade // The "zip" header parameter may only be present in the protected header. if comp := obj.protected.getCompression(); comp != "" { - plaintext, _ = decompress(comp, plaintext) + plaintext, err = decompress(comp, plaintext) + if err != nil { + return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: failed to decompress plaintext: %v", err) + } } sanitized, err := headers.sanitized() diff --git a/vendor/github.com/go-jose/go-jose/v3/doc.go b/vendor/github.com/go-jose/go-jose/v3/doc.go index 71ec1c41..0ad40ca0 100644 --- a/vendor/github.com/go-jose/go-jose/v3/doc.go +++ b/vendor/github.com/go-jose/go-jose/v3/doc.go @@ -15,13 +15,11 @@ */ /* - Package jose aims to provide an implementation of the Javascript Object Signing and Encryption set of standards. It implements encryption and signing based on the JSON Web Encryption and JSON Web Signature standards, with optional JSON Web Token support available in a sub-package. The library supports both the compact and JWS/JWE JSON Serialization formats, and has optional support for multiple recipients. - */ package jose diff --git a/vendor/github.com/go-jose/go-jose/v3/encoding.go b/vendor/github.com/go-jose/go-jose/v3/encoding.go index 968a4249..62f8b8ad 100644 --- a/vendor/github.com/go-jose/go-jose/v3/encoding.go +++ b/vendor/github.com/go-jose/go-jose/v3/encoding.go @@ -189,3 +189,36 @@ func base64URLDecode(value string) ([]byte, error) { value = strings.TrimRight(value, "=") return base64.RawURLEncoding.DecodeString(value) } + +func base64EncodeLen(sl []byte) int { + return base64.RawURLEncoding.EncodedLen(len(sl)) +} + +func base64JoinWithDots(inputs ...[]byte) string { + if len(inputs) == 0 { + return "" + } + + // Count of dots. + totalCount := len(inputs) - 1 + + for _, input := range inputs { + totalCount += base64EncodeLen(input) + } + + out := make([]byte, totalCount) + startEncode := 0 + for i, input := range inputs { + base64.RawURLEncoding.Encode(out[startEncode:], input) + + if i == len(inputs)-1 { + continue + } + + startEncode += base64EncodeLen(input) + out[startEncode] = '.' + startEncode++ + } + + return string(out) +} diff --git a/vendor/github.com/go-jose/go-jose/v3/json/decode.go b/vendor/github.com/go-jose/go-jose/v3/json/decode.go index 4dbc4146..50634dd8 100644 --- a/vendor/github.com/go-jose/go-jose/v3/json/decode.go +++ b/vendor/github.com/go-jose/go-jose/v3/json/decode.go @@ -75,14 +75,13 @@ import ( // // The JSON null value unmarshals into an interface, map, pointer, or slice // by setting that Go value to nil. Because null is often used in JSON to mean -// ``not present,'' unmarshaling a JSON null into any other Go type has no effect +// “not present,” unmarshaling a JSON null into any other Go type has no effect // on the value and produces no error. // // When unmarshaling quoted strings, invalid UTF-8 or // invalid UTF-16 surrogate pairs are not treated as an error. // Instead, they are replaced by the Unicode replacement // character U+FFFD. -// func Unmarshal(data []byte, v interface{}) error { // Check for well-formedness. // Avoids filling out half a data structure diff --git a/vendor/github.com/go-jose/go-jose/v3/json/encode.go b/vendor/github.com/go-jose/go-jose/v3/json/encode.go index ea0a1361..98de68ce 100644 --- a/vendor/github.com/go-jose/go-jose/v3/json/encode.go +++ b/vendor/github.com/go-jose/go-jose/v3/json/encode.go @@ -58,6 +58,7 @@ import ( // becomes a member of the object unless // - the field's tag is "-", or // - the field is empty and its tag specifies the "omitempty" option. +// // The empty values are false, 0, any // nil pointer or interface value, and any array, slice, map, or string of // length zero. The object's default key string is the struct field name @@ -65,28 +66,28 @@ import ( // the struct field's tag value is the key name, followed by an optional comma // and options. Examples: // -// // Field is ignored by this package. -// Field int `json:"-"` +// // Field is ignored by this package. +// Field int `json:"-"` // -// // Field appears in JSON as key "myName". -// Field int `json:"myName"` +// // Field appears in JSON as key "myName". +// Field int `json:"myName"` // -// // Field appears in JSON as key "myName" and -// // the field is omitted from the object if its value is empty, -// // as defined above. -// Field int `json:"myName,omitempty"` +// // Field appears in JSON as key "myName" and +// // the field is omitted from the object if its value is empty, +// // as defined above. +// Field int `json:"myName,omitempty"` // -// // Field appears in JSON as key "Field" (the default), but -// // the field is skipped if empty. -// // Note the leading comma. -// Field int `json:",omitempty"` +// // Field appears in JSON as key "Field" (the default), but +// // the field is skipped if empty. +// // Note the leading comma. +// Field int `json:",omitempty"` // // The "string" option signals that a field is stored as JSON inside a // JSON-encoded string. It applies only to fields of string, floating point, // integer, or boolean types. This extra level of encoding is sometimes used // when communicating with JavaScript programs: // -// Int64String int64 `json:",string"` +// Int64String int64 `json:",string"` // // The key name will be used if it's a non-empty string consisting of // only Unicode letters, digits, dollar signs, percent signs, hyphens, @@ -133,7 +134,6 @@ import ( // JSON cannot represent cyclic data structures and Marshal does not // handle them. Passing cyclic structures to Marshal will result in // an infinite recursion. -// func Marshal(v interface{}) ([]byte, error) { e := &encodeState{} err := e.marshal(v) diff --git a/vendor/github.com/go-jose/go-jose/v3/json/stream.go b/vendor/github.com/go-jose/go-jose/v3/json/stream.go index 9b2b926b..f03b171e 100644 --- a/vendor/github.com/go-jose/go-jose/v3/json/stream.go +++ b/vendor/github.com/go-jose/go-jose/v3/json/stream.go @@ -240,7 +240,6 @@ var _ Unmarshaler = (*RawMessage)(nil) // Number, for JSON numbers // string, for JSON string literals // nil, for JSON null -// type Token interface{} const ( diff --git a/vendor/github.com/go-jose/go-jose/v3/jwe.go b/vendor/github.com/go-jose/go-jose/v3/jwe.go index bce30450..4267ac75 100644 --- a/vendor/github.com/go-jose/go-jose/v3/jwe.go +++ b/vendor/github.com/go-jose/go-jose/v3/jwe.go @@ -252,13 +252,13 @@ func (obj JSONWebEncryption) CompactSerialize() (string, error) { serializedProtected := mustSerializeJSON(obj.protected) - return fmt.Sprintf( - "%s.%s.%s.%s.%s", - base64.RawURLEncoding.EncodeToString(serializedProtected), - base64.RawURLEncoding.EncodeToString(obj.recipients[0].encryptedKey), - base64.RawURLEncoding.EncodeToString(obj.iv), - base64.RawURLEncoding.EncodeToString(obj.ciphertext), - base64.RawURLEncoding.EncodeToString(obj.tag)), nil + return base64JoinWithDots( + serializedProtected, + obj.recipients[0].encryptedKey, + obj.iv, + obj.ciphertext, + obj.tag, + ), nil } // FullSerialize serializes an object using the full JSON serialization format. diff --git a/vendor/github.com/go-jose/go-jose/v3/jwk.go b/vendor/github.com/go-jose/go-jose/v3/jwk.go index 78ff5aca..e4021959 100644 --- a/vendor/github.com/go-jose/go-jose/v3/jwk.go +++ b/vendor/github.com/go-jose/go-jose/v3/jwk.go @@ -67,9 +67,21 @@ type rawJSONWebKey struct { X5tSHA256 string `json:"x5t#S256,omitempty"` } -// JSONWebKey represents a public or private key in JWK format. +// JSONWebKey represents a public or private key in JWK format. It can be +// marshaled into JSON and unmarshaled from JSON. type JSONWebKey struct { - // Cryptographic key, can be a symmetric or asymmetric key. + // Key is the Go in-memory representation of this key. It must have one + // of these types: + // - ed25519.PublicKey + // - ed25519.PrivateKey + // - *ecdsa.PublicKey + // - *ecdsa.PrivateKey + // - *rsa.PublicKey + // - *rsa.PrivateKey + // - []byte (a symmetric key) + // + // When marshaling this JSONWebKey into JSON, the "kty" header parameter + // will be automatically set based on the type of this field. Key interface{} // Key identifier, parsed from `kid` header. KeyID string @@ -389,6 +401,8 @@ func (k *JSONWebKey) Thumbprint(hash crypto.Hash) ([]byte, error) { input, err = rsaThumbprintInput(key.N, key.E) case ed25519.PrivateKey: input, err = edThumbprintInput(ed25519.PublicKey(key[32:])) + case OpaqueSigner: + return key.Public().Thumbprint(hash) default: return nil, fmt.Errorf("go-jose/go-jose: unknown key type '%s'", reflect.TypeOf(key)) } diff --git a/vendor/github.com/go-jose/go-jose/v3/jws.go b/vendor/github.com/go-jose/go-jose/v3/jws.go index 865f16ad..e37007db 100644 --- a/vendor/github.com/go-jose/go-jose/v3/jws.go +++ b/vendor/github.com/go-jose/go-jose/v3/jws.go @@ -314,15 +314,18 @@ func (obj JSONWebSignature) compactSerialize(detached bool) (string, error) { return "", ErrNotSupported } - serializedProtected := base64.RawURLEncoding.EncodeToString(mustSerializeJSON(obj.Signatures[0].protected)) - payload := "" - signature := base64.RawURLEncoding.EncodeToString(obj.Signatures[0].Signature) + serializedProtected := mustSerializeJSON(obj.Signatures[0].protected) + var payload []byte if !detached { - payload = base64.RawURLEncoding.EncodeToString(obj.payload) + payload = obj.payload } - return fmt.Sprintf("%s.%s.%s", serializedProtected, payload, signature), nil + return base64JoinWithDots( + serializedProtected, + payload, + obj.Signatures[0].Signature, + ), nil } // CompactSerialize serializes an object using the compact serialization format. diff --git a/vendor/github.com/go-jose/go-jose/v3/opaque.go b/vendor/github.com/go-jose/go-jose/v3/opaque.go index fc3e8d2e..68db085e 100644 --- a/vendor/github.com/go-jose/go-jose/v3/opaque.go +++ b/vendor/github.com/go-jose/go-jose/v3/opaque.go @@ -121,7 +121,7 @@ func (oke *opaqueKeyEncrypter) encryptKey(cek []byte, alg KeyAlgorithm) (recipie return oke.encrypter.encryptKey(cek, alg) } -//OpaqueKeyDecrypter is an interface that supports decrypting keys with an opaque key. +// OpaqueKeyDecrypter is an interface that supports decrypting keys with an opaque key. type OpaqueKeyDecrypter interface { DecryptKey(encryptedKey []byte, header Header) ([]byte, error) } diff --git a/vendor/github.com/go-jose/go-jose/v3/shared.go b/vendor/github.com/go-jose/go-jose/v3/shared.go index fc2505e0..489a04e3 100644 --- a/vendor/github.com/go-jose/go-jose/v3/shared.go +++ b/vendor/github.com/go-jose/go-jose/v3/shared.go @@ -183,8 +183,13 @@ type Header struct { // Unverified certificate chain parsed from x5c header. certificates []*x509.Certificate - // Any headers not recognised above get unmarshalled - // from JSON in a generic manner and placed in this map. + // At parse time, each header parameter with a name other than "kid", + // "jwk", "alg", "nonce", or "x5c" will have its value passed to + // [json.Unmarshal] to unmarshal it into an interface value. + // The resulting value will be stored in this map, with the header + // parameter name as the key. + // + // [json.Unmarshal]: https://pkg.go.dev/encoding/json#Unmarshal ExtraHeaders map[HeaderKey]interface{} } diff --git a/vendor/github.com/go-jose/go-jose/v3/signing.go b/vendor/github.com/go-jose/go-jose/v3/signing.go index 81d55f58..52f3d856 100644 --- a/vendor/github.com/go-jose/go-jose/v3/signing.go +++ b/vendor/github.com/go-jose/go-jose/v3/signing.go @@ -40,6 +40,15 @@ type Signer interface { } // SigningKey represents an algorithm/key used to sign a message. +// +// Key must have one of these types: +// - ed25519.PrivateKey +// - *ecdsa.PrivateKey +// - *rsa.PrivateKey +// - *JSONWebKey +// - JSONWebKey +// - []byte (an HMAC key) +// - Any type that satisfies the OpaqueSigner interface type SigningKey struct { Algorithm SignatureAlgorithm Key interface{} @@ -52,12 +61,22 @@ type SignerOptions struct { // Optional map of additional keys to be inserted into the protected header // of a JWS object. Some specifications which make use of JWS like to insert - // additional values here. All values must be JSON-serializable. + // additional values here. + // + // Values will be serialized by [json.Marshal] and must be valid inputs to + // that function. + // + // [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal ExtraHeaders map[HeaderKey]interface{} } // WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it -// if necessary. It returns itself and so can be used in a fluent style. +// if necessary, and returns the updated SignerOptions. +// +// The v argument will be serialized by [json.Marshal] and must be a valid +// input to that function. +// +// [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal func (so *SignerOptions) WithHeader(k HeaderKey, v interface{}) *SignerOptions { if so.ExtraHeaders == nil { so.ExtraHeaders = map[HeaderKey]interface{}{} @@ -173,11 +192,11 @@ func newVerifier(verificationKey interface{}) (payloadVerifier, error) { return newVerifier(verificationKey.Key) case *JSONWebKey: return newVerifier(verificationKey.Key) + case OpaqueVerifier: + return &opaqueVerifier{verifier: verificationKey}, nil + default: + return nil, ErrUnsupportedKeyType } - if ov, ok := verificationKey.(OpaqueVerifier); ok { - return &opaqueVerifier{verifier: ov}, nil - } - return nil, ErrUnsupportedKeyType } func (ctx *genericSigner) addRecipient(alg SignatureAlgorithm, signingKey interface{}) error { @@ -204,11 +223,11 @@ func makeJWSRecipient(alg SignatureAlgorithm, signingKey interface{}) (recipient return newJWKSigner(alg, signingKey) case *JSONWebKey: return newJWKSigner(alg, *signingKey) + case OpaqueSigner: + return newOpaqueSigner(alg, signingKey) + default: + return recipientSigInfo{}, ErrUnsupportedKeyType } - if signer, ok := signingKey.(OpaqueSigner); ok { - return newOpaqueSigner(alg, signer) - } - return recipientSigInfo{}, ErrUnsupportedKeyType } func newJWKSigner(alg SignatureAlgorithm, signingKey JSONWebKey) (recipientSigInfo, error) { @@ -321,12 +340,21 @@ func (ctx *genericSigner) Options() SignerOptions { } // Verify validates the signature on the object and returns the payload. -// This function does not support multi-signature, if you desire multi-sig +// This function does not support multi-signature. If you desire multi-signature // verification use VerifyMulti instead. // // Be careful when verifying signatures based on embedded JWKs inside the // payload header. You cannot assume that the key received in a payload is // trusted. +// +// The verificationKey argument must have one of these types: +// - ed25519.PublicKey +// - *ecdsa.PublicKey +// - *rsa.PublicKey +// - *JSONWebKey +// - JSONWebKey +// - []byte (an HMAC key) +// - Any type that implements the OpaqueVerifier interface. func (obj JSONWebSignature) Verify(verificationKey interface{}) ([]byte, error) { err := obj.DetachedVerify(obj.payload, verificationKey) if err != nil { @@ -346,6 +374,9 @@ func (obj JSONWebSignature) UnsafePayloadWithoutVerification() []byte { // most cases, you will probably want to use Verify instead. DetachedVerify // is only useful if you have a payload and signature that are separated from // each other. +// +// The verificationKey argument must have one of the types allowed for the +// verificationKey argument of JSONWebSignature.Verify(). func (obj JSONWebSignature) DetachedVerify(payload []byte, verificationKey interface{}) error { key := tryJWKS(verificationKey, obj.headers()...) verifier, err := newVerifier(key) @@ -388,6 +419,9 @@ func (obj JSONWebSignature) DetachedVerify(payload []byte, verificationKey inter // returns the index of the signature that was verified, along with the signature // object and the payload. We return the signature and index to guarantee that // callers are getting the verified value. +// +// The verificationKey argument must have one of the types allowed for the +// verificationKey argument of JSONWebSignature.Verify(). func (obj JSONWebSignature) VerifyMulti(verificationKey interface{}) (int, Signature, []byte, error) { idx, sig, err := obj.DetachedVerifyMulti(obj.payload, verificationKey) if err != nil { @@ -405,6 +439,9 @@ func (obj JSONWebSignature) VerifyMulti(verificationKey interface{}) (int, Signa // DetachedVerifyMulti is only useful if you have a payload and signature that are // separated from each other, and the signature can have multiple signers at the // same time. +// +// The verificationKey argument must have one of the types allowed for the +// verificationKey argument of JSONWebSignature.Verify(). func (obj JSONWebSignature) DetachedVerifyMulti(payload []byte, verificationKey interface{}) (int, Signature, error) { key := tryJWKS(verificationKey, obj.headers()...) verifier, err := newVerifier(key) diff --git a/vendor/github.com/go-jose/go-jose/v3/symmetric.go b/vendor/github.com/go-jose/go-jose/v3/symmetric.go index 1ffd2708..10d8e19f 100644 --- a/vendor/github.com/go-jose/go-jose/v3/symmetric.go +++ b/vendor/github.com/go-jose/go-jose/v3/symmetric.go @@ -40,12 +40,17 @@ var RandReader = rand.Reader const ( // RFC7518 recommends a minimum of 1,000 iterations: - // https://tools.ietf.org/html/rfc7518#section-4.8.1.2 + // - https://tools.ietf.org/html/rfc7518#section-4.8.1.2 + // // NIST recommends a minimum of 10,000: - // https://pages.nist.gov/800-63-3/sp800-63b.html - // 1Password uses 100,000: - // https://support.1password.com/pbkdf2/ - defaultP2C = 100000 + // - https://pages.nist.gov/800-63-3/sp800-63b.html + // + // 1Password increased in 2023 from 100,000 to 650,000: + // - https://support.1password.com/pbkdf2/ + // + // OWASP recommended 600,000 in Dec 2022: + // - https://cheatsheetseries.owasp.org/cheatsheets/Password_Storage_Cheat_Sheet.html#pbkdf2 + defaultP2C = 600000 // Default salt size: 128 bits defaultP2SSize = 16 ) diff --git a/vendor/github.com/go-logr/stdr/LICENSE b/vendor/github.com/go-logr/stdr/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/go-logr/stdr/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-logr/stdr/README.md b/vendor/github.com/go-logr/stdr/README.md new file mode 100644 index 00000000..51586678 --- /dev/null +++ b/vendor/github.com/go-logr/stdr/README.md @@ -0,0 +1,6 @@ +# Minimal Go logging using logr and Go's standard library + +[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/stdr.svg)](https://pkg.go.dev/github.com/go-logr/stdr) + +This package implements the [logr interface](https://github.com/go-logr/logr) +in terms of Go's standard log package(https://pkg.go.dev/log). diff --git a/vendor/github.com/go-logr/stdr/stdr.go b/vendor/github.com/go-logr/stdr/stdr.go new file mode 100644 index 00000000..93a8aab5 --- /dev/null +++ b/vendor/github.com/go-logr/stdr/stdr.go @@ -0,0 +1,170 @@ +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package stdr implements github.com/go-logr/logr.Logger in terms of +// Go's standard log package. +package stdr + +import ( + "log" + "os" + + "github.com/go-logr/logr" + "github.com/go-logr/logr/funcr" +) + +// The global verbosity level. See SetVerbosity(). +var globalVerbosity int + +// SetVerbosity sets the global level against which all info logs will be +// compared. If this is greater than or equal to the "V" of the logger, the +// message will be logged. A higher value here means more logs will be written. +// The previous verbosity value is returned. This is not concurrent-safe - +// callers must be sure to call it from only one goroutine. +func SetVerbosity(v int) int { + old := globalVerbosity + globalVerbosity = v + return old +} + +// New returns a logr.Logger which is implemented by Go's standard log package, +// or something like it. If std is nil, this will use a default logger +// instead. +// +// Example: stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) +func New(std StdLogger) logr.Logger { + return NewWithOptions(std, Options{}) +} + +// NewWithOptions returns a logr.Logger which is implemented by Go's standard +// log package, or something like it. See New for details. +func NewWithOptions(std StdLogger, opts Options) logr.Logger { + if std == nil { + // Go's log.Default() is only available in 1.16 and higher. + std = log.New(os.Stderr, "", log.LstdFlags) + } + + if opts.Depth < 0 { + opts.Depth = 0 + } + + fopts := funcr.Options{ + LogCaller: funcr.MessageClass(opts.LogCaller), + } + + sl := &logger{ + Formatter: funcr.NewFormatter(fopts), + std: std, + } + + // For skipping our own logger.Info/Error. + sl.Formatter.AddCallDepth(1 + opts.Depth) + + return logr.New(sl) +} + +// Options carries parameters which influence the way logs are generated. +type Options struct { + // Depth biases the assumed number of call frames to the "true" caller. + // This is useful when the calling code calls a function which then calls + // stdr (e.g. a logging shim to another API). Values less than zero will + // be treated as zero. + Depth int + + // LogCaller tells stdr to add a "caller" key to some or all log lines. + // Go's log package has options to log this natively, too. + LogCaller MessageClass + + // TODO: add an option to log the date/time +} + +// MessageClass indicates which category or categories of messages to consider. +type MessageClass int + +const ( + // None ignores all message classes. + None MessageClass = iota + // All considers all message classes. + All + // Info only considers info messages. + Info + // Error only considers error messages. + Error +) + +// StdLogger is the subset of the Go stdlib log.Logger API that is needed for +// this adapter. +type StdLogger interface { + // Output is the same as log.Output and log.Logger.Output. + Output(calldepth int, logline string) error +} + +type logger struct { + funcr.Formatter + std StdLogger +} + +var _ logr.LogSink = &logger{} +var _ logr.CallDepthLogSink = &logger{} + +func (l logger) Enabled(level int) bool { + return globalVerbosity >= level +} + +func (l logger) Info(level int, msg string, kvList ...interface{}) { + prefix, args := l.FormatInfo(level, msg, kvList) + if prefix != "" { + args = prefix + ": " + args + } + _ = l.std.Output(l.Formatter.GetDepth()+1, args) +} + +func (l logger) Error(err error, msg string, kvList ...interface{}) { + prefix, args := l.FormatError(err, msg, kvList) + if prefix != "" { + args = prefix + ": " + args + } + _ = l.std.Output(l.Formatter.GetDepth()+1, args) +} + +func (l logger) WithName(name string) logr.LogSink { + l.Formatter.AddName(name) + return &l +} + +func (l logger) WithValues(kvList ...interface{}) logr.LogSink { + l.Formatter.AddValues(kvList) + return &l +} + +func (l logger) WithCallDepth(depth int) logr.LogSink { + l.Formatter.AddCallDepth(depth) + return &l +} + +// Underlier exposes access to the underlying logging implementation. Since +// callers only have a logr.Logger, they have to know which implementation is +// in use, so this interface is less of an abstraction and more of way to test +// type conversion. +type Underlier interface { + GetUnderlying() StdLogger +} + +// GetUnderlying returns the StdLogger underneath this logger. Since StdLogger +// is itself an interface, the result may or may not be a Go log.Logger. +func (l logger) GetUnderlying() StdLogger { + return l.std +} diff --git a/vendor/github.com/go-openapi/errors/.golangci.yml b/vendor/github.com/go-openapi/errors/.golangci.yml index 4e1fc0c7..cf88ead3 100644 --- a/vendor/github.com/go-openapi/errors/.golangci.yml +++ b/vendor/github.com/go-openapi/errors/.golangci.yml @@ -4,45 +4,59 @@ linters-settings: golint: min-confidence: 0 gocyclo: - min-complexity: 30 + min-complexity: 45 maligned: suggest-new: true dupl: - threshold: 100 + threshold: 200 goconst: min-len: 2 - min-occurrences: 4 + min-occurrences: 3 + linters: enable-all: true disable: + - errname # this repo doesn't follow the convention advised by this linter - maligned + - unparam - lll + - gochecknoinits - gochecknoglobals + - funlen - godox - gocognit - whitespace - wsl - - funlen - - gochecknoglobals - - gochecknoinits - - scopelint - wrapcheck - - exhaustivestruct - - exhaustive - - nlreturn - testpackage - - gci - - gofumpt - - goerr113 + - nlreturn - gomnd - - tparallel + - exhaustivestruct + - goerr113 + - errorlint - nestif - godot - - errorlint + - gofumpt - paralleltest - tparallel - - cyclop - - errname - - varnamelen + - thelper + - ifshort - exhaustruct - - maintidx + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint + - nosnakecase diff --git a/vendor/github.com/go-openapi/errors/README.md b/vendor/github.com/go-openapi/errors/README.md index 4aac049e..6d57ea55 100644 --- a/vendor/github.com/go-openapi/errors/README.md +++ b/vendor/github.com/go-openapi/errors/README.md @@ -1,11 +1,8 @@ -# OpenAPI errors +# OpenAPI errors [![Build Status](https://github.com/go-openapi/errors/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/errors/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/errors/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/errors) -[![Build Status](https://travis-ci.org/go-openapi/errors.svg?branch=master)](https://travis-ci.org/go-openapi/errors) -[![codecov](https://codecov.io/gh/go-openapi/errors/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/errors) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) [![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/errors/master/LICENSE) [![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/errors.svg)](https://pkg.go.dev/github.com/go-openapi/errors) -[![GolangCI](https://golangci.com/badges/github.com/go-openapi/errors.svg)](https://golangci.com) [![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/errors)](https://goreportcard.com/report/github.com/go-openapi/errors) Shared errors and error interface used throughout the various libraries found in the go-openapi toolkit. diff --git a/vendor/github.com/go-openapi/errors/api.go b/vendor/github.com/go-openapi/errors/api.go index c13f3435..5320cb96 100644 --- a/vendor/github.com/go-openapi/errors/api.go +++ b/vendor/github.com/go-openapi/errors/api.go @@ -55,9 +55,15 @@ func (a apiError) MarshalJSON() ([]byte, error) { // New creates a new API error with a code and a message func New(code int32, message string, args ...interface{}) Error { if len(args) > 0 { - return &apiError{code, fmt.Sprintf(message, args...)} + return &apiError{ + code: code, + message: fmt.Sprintf(message, args...), + } + } + return &apiError{ + code: code, + message: message, } - return &apiError{code, message} } // NotFound creates a new not found error @@ -130,10 +136,14 @@ func flattenComposite(errs *CompositeError) *CompositeError { // MethodNotAllowed creates a new method not allowed error func MethodNotAllowed(requested string, allow []string) Error { msg := fmt.Sprintf("method %s is not allowed, but [%s] are", requested, strings.Join(allow, ",")) - return &MethodNotAllowedError{code: http.StatusMethodNotAllowed, Allowed: allow, message: msg} + return &MethodNotAllowedError{ + code: http.StatusMethodNotAllowed, + Allowed: allow, + message: msg, + } } -// ServeError the error handler interface implementation +// ServeError implements the http error handler interface func ServeError(rw http.ResponseWriter, r *http.Request, err error) { rw.Header().Set("Content-Type", "application/json") switch e := err.(type) { diff --git a/vendor/github.com/go-openapi/errors/schema.go b/vendor/github.com/go-openapi/errors/schema.go index da5f6c78..cf7ac2ed 100644 --- a/vendor/github.com/go-openapi/errors/schema.go +++ b/vendor/github.com/go-openapi/errors/schema.go @@ -120,6 +120,10 @@ func (c *CompositeError) Error() string { return c.message } +func (c *CompositeError) Unwrap() []error { + return c.Errors +} + // MarshalJSON implements the JSON encoding interface func (c CompositeError) MarshalJSON() ([]byte, error) { return json.Marshal(map[string]interface{}{ @@ -133,7 +137,7 @@ func (c CompositeError) MarshalJSON() ([]byte, error) { func CompositeValidationError(errors ...error) *CompositeError { return &CompositeError{ code: CompositeErrorCode, - Errors: append([]error{}, errors...), + Errors: append(make([]error, 0, len(errors)), errors...), message: "validation failure list", } } diff --git a/vendor/github.com/go-openapi/strfmt/.golangci.yml b/vendor/github.com/go-openapi/strfmt/.golangci.yml index be4899cb..22f8d21c 100644 --- a/vendor/github.com/go-openapi/strfmt/.golangci.yml +++ b/vendor/github.com/go-openapi/strfmt/.golangci.yml @@ -4,56 +4,58 @@ linters-settings: golint: min-confidence: 0 gocyclo: - min-complexity: 31 + min-complexity: 45 maligned: suggest-new: true dupl: - threshold: 100 + threshold: 200 goconst: min-len: 2 - min-occurrences: 4 + min-occurrences: 3 linters: - enable: - - revive - - goimports - - gosec + enable-all: true + disable: + - maligned - unparam - - unconvert - - predeclared - - prealloc - - misspell - - # disable: - # - maligned - # - lll - # - gochecknoinits - # - gochecknoglobals - # - godox - # - gocognit - # - whitespace - # - wsl - # - funlen - # - wrapcheck - # - testpackage - # - nlreturn - # - gofumpt - # - goerr113 - # - gci - # - gomnd - # - godot - # - exhaustivestruct - # - paralleltest - # - varnamelen - # - ireturn - # - exhaustruct - # #- thelper - -issues: - exclude-rules: - - path: bson.go - text: "should be .*ObjectID" - linters: - - golint - - stylecheck - + - lll + - gochecknoinits + - gochecknoglobals + - funlen + - godox + - gocognit + - whitespace + - wsl + - wrapcheck + - testpackage + - nlreturn + - gomnd + - exhaustivestruct + - goerr113 + - errorlint + - nestif + - godot + - gofumpt + - paralleltest + - tparallel + - thelper + - ifshort + - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint + - nosnakecase diff --git a/vendor/github.com/go-openapi/strfmt/README.md b/vendor/github.com/go-openapi/strfmt/README.md index 0cf89d77..f6b39c6c 100644 --- a/vendor/github.com/go-openapi/strfmt/README.md +++ b/vendor/github.com/go-openapi/strfmt/README.md @@ -1,8 +1,7 @@ -# Strfmt [![Build Status](https://travis-ci.org/go-openapi/strfmt.svg?branch=master)](https://travis-ci.org/go-openapi/strfmt) [![codecov](https://codecov.io/gh/go-openapi/strfmt/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/strfmt) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) - +# Strfmt [![Build Status](https://github.com/go-openapi/strfmt/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/strfmt/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/strfmt/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/strfmt) +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) [![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/strfmt/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/strfmt?status.svg)](http://godoc.org/github.com/go-openapi/strfmt) -[![GolangCI](https://golangci.com/badges/github.com/go-openapi/strfmt.svg)](https://golangci.com) [![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/strfmt)](https://goreportcard.com/report/github.com/go-openapi/strfmt) This package exposes a registry of data types to support string formats in the go-openapi toolkit. diff --git a/vendor/github.com/go-openapi/strfmt/bson.go b/vendor/github.com/go-openapi/strfmt/bson.go index a8a3604a..cfa9a526 100644 --- a/vendor/github.com/go-openapi/strfmt/bson.go +++ b/vendor/github.com/go-openapi/strfmt/bson.go @@ -39,10 +39,10 @@ func IsBSONObjectID(str string) bool { // ObjectId represents a BSON object ID (alias to go.mongodb.org/mongo-driver/bson/primitive.ObjectID) // // swagger:strfmt bsonobjectid -type ObjectId bsonprim.ObjectID //nolint:revive +type ObjectId bsonprim.ObjectID //nolint:revive,stylecheck // NewObjectId creates a ObjectId from a Hex String -func NewObjectId(hex string) ObjectId { //nolint:revive +func NewObjectId(hex string) ObjectId { //nolint:revive,stylecheck oid, err := bsonprim.ObjectIDFromHex(hex) if err != nil { panic(err) @@ -135,7 +135,7 @@ func (id *ObjectId) UnmarshalBSON(data []byte) error { // BSON document if the error is nil. func (id ObjectId) MarshalBSONValue() (bsontype.Type, []byte, error) { oid := bsonprim.ObjectID(id) - return bsontype.ObjectID, oid[:], nil + return bson.TypeObjectID, oid[:], nil } // UnmarshalBSONValue is an interface implemented by types that can unmarshal a diff --git a/vendor/github.com/go-openapi/strfmt/default.go b/vendor/github.com/go-openapi/strfmt/default.go index a89a4de3..28137140 100644 --- a/vendor/github.com/go-openapi/strfmt/default.go +++ b/vendor/github.com/go-openapi/strfmt/default.go @@ -25,6 +25,7 @@ import ( "strings" "github.com/asaskevich/govalidator" + "github.com/google/uuid" "go.mongodb.org/mongo-driver/bson" ) @@ -57,24 +58,35 @@ const ( // - long top-level domain names (e.g. example.london) are permitted // - symbol unicode points are permitted (e.g. emoji) (not for top-level domain) HostnamePattern = `^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$` + + // json null type + jsonNull = "null" +) + +const ( // UUIDPattern Regex for UUID that allows uppercase - UUIDPattern = `(?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{12}$` + // + // Deprecated: strfmt no longer uses regular expressions to validate UUIDs. + UUIDPattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$)|(^[0-9a-f]{32}$)` + // UUID3Pattern Regex for UUID3 that allows uppercase - UUID3Pattern = `(?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?3[0-9a-f]{3}-?[0-9a-f]{4}-?[0-9a-f]{12}$` + // + // Deprecated: strfmt no longer uses regular expressions to validate UUIDs. + UUID3Pattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$)|(^[0-9a-f]{12}3[0-9a-f]{3}?[0-9a-f]{16}$)` + // UUID4Pattern Regex for UUID4 that allows uppercase - UUID4Pattern = `(?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?4[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$` + // + // Deprecated: strfmt no longer uses regular expressions to validate UUIDs. + UUID4Pattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$)|(^[0-9a-f]{12}4[0-9a-f]{3}[89ab][0-9a-f]{15}$)` + // UUID5Pattern Regex for UUID5 that allows uppercase - UUID5Pattern = `(?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?5[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$` - // json null type - jsonNull = "null" + // + // Deprecated: strfmt no longer uses regular expressions to validate UUIDs. + UUID5Pattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$)|(^[0-9a-f]{12}5[0-9a-f]{3}[89ab][0-9a-f]{15}$)` ) var ( rxHostname = regexp.MustCompile(HostnamePattern) - rxUUID = regexp.MustCompile(UUIDPattern) - rxUUID3 = regexp.MustCompile(UUID3Pattern) - rxUUID4 = regexp.MustCompile(UUID4Pattern) - rxUUID5 = regexp.MustCompile(UUID5Pattern) ) // IsHostname returns true when the string is a valid hostname @@ -99,24 +111,28 @@ func IsHostname(str string) bool { return valid } -// IsUUID returns true is the string matches a UUID, upper case is allowed +// IsUUID returns true is the string matches a UUID (in any version, including v6 and v7), upper case is allowed func IsUUID(str string) bool { - return rxUUID.MatchString(str) + _, err := uuid.Parse(str) + return err == nil } -// IsUUID3 returns true is the string matches a UUID, upper case is allowed +// IsUUID3 returns true is the string matches a UUID v3, upper case is allowed func IsUUID3(str string) bool { - return rxUUID3.MatchString(str) + id, err := uuid.Parse(str) + return err == nil && id.Version() == uuid.Version(3) } -// IsUUID4 returns true is the string matches a UUID, upper case is allowed +// IsUUID4 returns true is the string matches a UUID v4, upper case is allowed func IsUUID4(str string) bool { - return rxUUID4.MatchString(str) + id, err := uuid.Parse(str) + return err == nil && id.Version() == uuid.Version(4) } -// IsUUID5 returns true is the string matches a UUID, upper case is allowed +// IsUUID5 returns true is the string matches a UUID v5, upper case is allowed func IsUUID5(str string) bool { - return rxUUID5.MatchString(str) + id, err := uuid.Parse(str) + return err == nil && id.Version() == uuid.Version(5) } // IsEmail validates an email address. diff --git a/vendor/github.com/go-openapi/strfmt/format.go b/vendor/github.com/go-openapi/strfmt/format.go index ad3b3c35..888e107c 100644 --- a/vendor/github.com/go-openapi/strfmt/format.go +++ b/vendor/github.com/go-openapi/strfmt/format.go @@ -16,6 +16,7 @@ package strfmt import ( "encoding" + stderrors "errors" "fmt" "reflect" "strings" @@ -94,7 +95,7 @@ func NewSeededFormats(seeds []knownFormat, normalizer NameNormalizer) Registry { } // MapStructureHookFunc is a decode hook function for mapstructure -func (f *defaultFormats) MapStructureHookFunc() mapstructure.DecodeHookFunc { //nolint:gocyclo,cyclop +func (f *defaultFormats) MapStructureHookFunc() mapstructure.DecodeHookFunc { return func(from reflect.Type, to reflect.Type, obj interface{}) (interface{}, error) { if from.Kind() != reflect.String { return obj, nil @@ -117,7 +118,7 @@ func (f *defaultFormats) MapStructureHookFunc() mapstructure.DecodeHookFunc { // case "datetime": input := data if len(input) == 0 { - return nil, fmt.Errorf("empty string is an invalid datetime format") + return nil, stderrors.New("empty string is an invalid datetime format") } return ParseDateTime(input) case "duration": diff --git a/vendor/github.com/go-openapi/strfmt/time.go b/vendor/github.com/go-openapi/strfmt/time.go index 9bef4c3b..f08ba4da 100644 --- a/vendor/github.com/go-openapi/strfmt/time.go +++ b/vendor/github.com/go-openapi/strfmt/time.go @@ -76,6 +76,8 @@ const ( ISO8601TimeWithReducedPrecisionLocaltime = "2006-01-02T15:04" // ISO8601TimeUniversalSortableDateTimePattern represents a ISO8601 universal sortable date time pattern. ISO8601TimeUniversalSortableDateTimePattern = "2006-01-02 15:04:05" + // short form of ISO8601TimeUniversalSortableDateTimePattern + ISO8601TimeUniversalSortableDateTimePatternShortForm = "2006-01-02" // DateTimePattern pattern to match for the date-time format from http://tools.ietf.org/html/rfc3339#section-5.6 DateTimePattern = `^([0-9]{2}):([0-9]{2}):([0-9]{2})(.[0-9]+)?(z|([+-][0-9]{2}:[0-9]{2}))$` ) @@ -84,7 +86,7 @@ var ( rxDateTime = regexp.MustCompile(DateTimePattern) // DateTimeFormats is the collection of formats used by ParseDateTime() - DateTimeFormats = []string{RFC3339Micro, RFC3339MicroNoColon, RFC3339Millis, RFC3339MillisNoColon, time.RFC3339, time.RFC3339Nano, ISO8601LocalTime, ISO8601TimeWithReducedPrecision, ISO8601TimeWithReducedPrecisionLocaltime, ISO8601TimeUniversalSortableDateTimePattern} + DateTimeFormats = []string{RFC3339Micro, RFC3339MicroNoColon, RFC3339Millis, RFC3339MillisNoColon, time.RFC3339, time.RFC3339Nano, ISO8601LocalTime, ISO8601TimeWithReducedPrecision, ISO8601TimeWithReducedPrecisionLocaltime, ISO8601TimeUniversalSortableDateTimePattern, ISO8601TimeUniversalSortableDateTimePatternShortForm} // MarshalFormat sets the time resolution format used for marshaling time (set to milliseconds) MarshalFormat = RFC3339Millis @@ -245,7 +247,7 @@ func (t DateTime) MarshalBSONValue() (bsontype.Type, []byte, error) { buf := make([]byte, 8) binary.LittleEndian.PutUint64(buf, uint64(i64)) - return bsontype.DateTime, buf, nil + return bson.TypeDateTime, buf, nil } // UnmarshalBSONValue is an interface implemented by types that can unmarshal a @@ -253,7 +255,7 @@ func (t DateTime) MarshalBSONValue() (bsontype.Type, []byte, error) { // assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it // wishes to retain the data after returning. func (t *DateTime) UnmarshalBSONValue(tpe bsontype.Type, data []byte) error { - if tpe == bsontype.Null { + if tpe == bson.TypeNull { *t = DateTime{} return nil } diff --git a/vendor/github.com/go-openapi/swag/.gitignore b/vendor/github.com/go-openapi/swag/.gitignore index d69b53ac..c4b1b64f 100644 --- a/vendor/github.com/go-openapi/swag/.gitignore +++ b/vendor/github.com/go-openapi/swag/.gitignore @@ -2,3 +2,4 @@ secrets.yml vendor Godeps .idea +*.out diff --git a/vendor/github.com/go-openapi/swag/.golangci.yml b/vendor/github.com/go-openapi/swag/.golangci.yml index bf503e40..80e2be00 100644 --- a/vendor/github.com/go-openapi/swag/.golangci.yml +++ b/vendor/github.com/go-openapi/swag/.golangci.yml @@ -4,14 +4,14 @@ linters-settings: golint: min-confidence: 0 gocyclo: - min-complexity: 25 + min-complexity: 45 maligned: suggest-new: true dupl: - threshold: 100 + threshold: 200 goconst: min-len: 3 - min-occurrences: 2 + min-occurrences: 3 linters: enable-all: true @@ -20,35 +20,41 @@ linters: - lll - gochecknoinits - gochecknoglobals - - nlreturn - - testpackage + - funlen + - godox + - gocognit + - whitespace + - wsl - wrapcheck + - testpackage + - nlreturn - gomnd - - exhaustive - exhaustivestruct - goerr113 - - wsl - - whitespace - - gofumpt - - godot + - errorlint - nestif - - godox - - funlen - - gci - - gocognit + - godot + - gofumpt - paralleltest + - tparallel - thelper - ifshort - - gomoddirectives - - cyclop - - forcetypeassert - - ireturn - - tagliatelle - - varnamelen - - goimports - - tenv - - golint - exhaustruct - - nilnil + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint - nosnakecase diff --git a/vendor/github.com/go-openapi/swag/BENCHMARK.md b/vendor/github.com/go-openapi/swag/BENCHMARK.md new file mode 100644 index 00000000..e7f28ed6 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/BENCHMARK.md @@ -0,0 +1,52 @@ +# Benchmarks + +## Name mangling utilities + +```bash +go test -bench XXX -run XXX -benchtime 30s +``` + +### Benchmarks at b3e7a5386f996177e4808f11acb2aa93a0f660df + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz +BenchmarkToXXXName/ToGoName-4 862623 44101 ns/op 10450 B/op 732 allocs/op +BenchmarkToXXXName/ToVarName-4 853656 40728 ns/op 10468 B/op 734 allocs/op +BenchmarkToXXXName/ToFileName-4 1268312 27813 ns/op 9785 B/op 617 allocs/op +BenchmarkToXXXName/ToCommandName-4 1276322 27903 ns/op 9785 B/op 617 allocs/op +BenchmarkToXXXName/ToHumanNameLower-4 895334 40354 ns/op 10472 B/op 731 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-4 882441 40678 ns/op 10566 B/op 749 allocs/op +``` + +### Benchmarks after PR #79 + +~ x10 performance improvement and ~ /100 memory allocations. + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz +BenchmarkToXXXName/ToGoName-4 9595830 3991 ns/op 42 B/op 5 allocs/op +BenchmarkToXXXName/ToVarName-4 9194276 3984 ns/op 62 B/op 7 allocs/op +BenchmarkToXXXName/ToFileName-4 17002711 2123 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToCommandName-4 16772926 2111 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToHumanNameLower-4 9788331 3749 ns/op 92 B/op 6 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-4 9188260 3941 ns/op 104 B/op 6 allocs/op +``` + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: AMD Ryzen 7 5800X 8-Core Processor +BenchmarkToXXXName/ToGoName-16 18527378 1972 ns/op 42 B/op 5 allocs/op +BenchmarkToXXXName/ToVarName-16 15552692 2093 ns/op 62 B/op 7 allocs/op +BenchmarkToXXXName/ToFileName-16 32161176 1117 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToCommandName-16 32256634 1137 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToHumanNameLower-16 18599661 1946 ns/op 92 B/op 6 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-16 17581353 2054 ns/op 105 B/op 6 allocs/op +``` diff --git a/vendor/github.com/go-openapi/swag/README.md b/vendor/github.com/go-openapi/swag/README.md index 217f6fa5..a7292229 100644 --- a/vendor/github.com/go-openapi/swag/README.md +++ b/vendor/github.com/go-openapi/swag/README.md @@ -1,7 +1,8 @@ -# Swag [![Build Status](https://travis-ci.org/go-openapi/swag.svg?branch=master)](https://travis-ci.org/go-openapi/swag) [![codecov](https://codecov.io/gh/go-openapi/swag/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/swag) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +# Swag [![Build Status](https://github.com/go-openapi/swag/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/swag/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/swag/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/swag) +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) [![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE) -[![GoDoc](https://godoc.org/github.com/go-openapi/swag?status.svg)](http://godoc.org/github.com/go-openapi/swag) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/swag.svg)](https://pkg.go.dev/github.com/go-openapi/swag) [![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/swag)](https://goreportcard.com/report/github.com/go-openapi/swag) Contains a bunch of helper functions for go-openapi and go-swagger projects. @@ -18,4 +19,5 @@ You may also use it standalone for your projects. This repo has only few dependencies outside of the standard library: -* YAML utilities depend on gopkg.in/yaml.v2 +* YAML utilities depend on `gopkg.in/yaml.v3` +* `github.com/mailru/easyjson v0.7.7` diff --git a/vendor/github.com/go-openapi/swag/initialism_index.go b/vendor/github.com/go-openapi/swag/initialism_index.go new file mode 100644 index 00000000..20a359bb --- /dev/null +++ b/vendor/github.com/go-openapi/swag/initialism_index.go @@ -0,0 +1,202 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "sort" + "strings" + "sync" +) + +var ( + // commonInitialisms are common acronyms that are kept as whole uppercased words. + commonInitialisms *indexOfInitialisms + + // initialisms is a slice of sorted initialisms + initialisms []string + + // a copy of initialisms pre-baked as []rune + initialismsRunes [][]rune + initialismsUpperCased [][]rune + + isInitialism func(string) bool + + maxAllocMatches int +) + +func init() { + // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769 + configuredInitialisms := map[string]bool{ + "ACL": true, + "API": true, + "ASCII": true, + "CPU": true, + "CSS": true, + "DNS": true, + "EOF": true, + "GUID": true, + "HTML": true, + "HTTPS": true, + "HTTP": true, + "ID": true, + "IP": true, + "IPv4": true, + "IPv6": true, + "JSON": true, + "LHS": true, + "OAI": true, + "QPS": true, + "RAM": true, + "RHS": true, + "RPC": true, + "SLA": true, + "SMTP": true, + "SQL": true, + "SSH": true, + "TCP": true, + "TLS": true, + "TTL": true, + "UDP": true, + "UI": true, + "UID": true, + "UUID": true, + "URI": true, + "URL": true, + "UTF8": true, + "VM": true, + "XML": true, + "XMPP": true, + "XSRF": true, + "XSS": true, + } + + // a thread-safe index of initialisms + commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms) + initialisms = commonInitialisms.sorted() + initialismsRunes = asRunes(initialisms) + initialismsUpperCased = asUpperCased(initialisms) + maxAllocMatches = maxAllocHeuristic(initialismsRunes) + + // a test function + isInitialism = commonInitialisms.isInitialism +} + +func asRunes(in []string) [][]rune { + out := make([][]rune, len(in)) + for i, initialism := range in { + out[i] = []rune(initialism) + } + + return out +} + +func asUpperCased(in []string) [][]rune { + out := make([][]rune, len(in)) + + for i, initialism := range in { + out[i] = []rune(upper(trim(initialism))) + } + + return out +} + +func maxAllocHeuristic(in [][]rune) int { + heuristic := make(map[rune]int) + for _, initialism := range in { + heuristic[initialism[0]]++ + } + + var maxAlloc int + for _, val := range heuristic { + if val > maxAlloc { + maxAlloc = val + } + } + + return maxAlloc +} + +// AddInitialisms add additional initialisms +func AddInitialisms(words ...string) { + for _, word := range words { + // commonInitialisms[upper(word)] = true + commonInitialisms.add(upper(word)) + } + // sort again + initialisms = commonInitialisms.sorted() + initialismsRunes = asRunes(initialisms) + initialismsUpperCased = asUpperCased(initialisms) +} + +// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms. +// Since go1.9, this may be implemented with sync.Map. +type indexOfInitialisms struct { + sortMutex *sync.Mutex + index *sync.Map +} + +func newIndexOfInitialisms() *indexOfInitialisms { + return &indexOfInitialisms{ + sortMutex: new(sync.Mutex), + index: new(sync.Map), + } +} + +func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms { + m.sortMutex.Lock() + defer m.sortMutex.Unlock() + for k, v := range initial { + m.index.Store(k, v) + } + return m +} + +func (m *indexOfInitialisms) isInitialism(key string) bool { + _, ok := m.index.Load(key) + return ok +} + +func (m *indexOfInitialisms) add(key string) *indexOfInitialisms { + m.index.Store(key, true) + return m +} + +func (m *indexOfInitialisms) sorted() (result []string) { + m.sortMutex.Lock() + defer m.sortMutex.Unlock() + m.index.Range(func(key, _ interface{}) bool { + k := key.(string) + result = append(result, k) + return true + }) + sort.Sort(sort.Reverse(byInitialism(result))) + return +} + +type byInitialism []string + +func (s byInitialism) Len() int { + return len(s) +} +func (s byInitialism) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} +func (s byInitialism) Less(i, j int) bool { + if len(s[i]) != len(s[j]) { + return len(s[i]) < len(s[j]) + } + + return strings.Compare(s[i], s[j]) > 0 +} diff --git a/vendor/github.com/go-openapi/swag/loading.go b/vendor/github.com/go-openapi/swag/loading.go index 00038c37..783442fd 100644 --- a/vendor/github.com/go-openapi/swag/loading.go +++ b/vendor/github.com/go-openapi/swag/loading.go @@ -21,6 +21,7 @@ import ( "net/http" "net/url" "os" + "path" "path/filepath" "runtime" "strings" @@ -40,43 +41,97 @@ var LoadHTTPBasicAuthPassword = "" var LoadHTTPCustomHeaders = map[string]string{} // LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in -func LoadFromFileOrHTTP(path string) ([]byte, error) { - return LoadStrategy(path, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(path) +func LoadFromFileOrHTTP(pth string) ([]byte, error) { + return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(pth) } // LoadFromFileOrHTTPWithTimeout loads the bytes from a file or a remote http server based on the path passed in // timeout arg allows for per request overriding of the request timeout -func LoadFromFileOrHTTPWithTimeout(path string, timeout time.Duration) ([]byte, error) { - return LoadStrategy(path, os.ReadFile, loadHTTPBytes(timeout))(path) +func LoadFromFileOrHTTPWithTimeout(pth string, timeout time.Duration) ([]byte, error) { + return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(timeout))(pth) } -// LoadStrategy returns a loader function for a given path or uri -func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) { - if strings.HasPrefix(path, "http") { +// LoadStrategy returns a loader function for a given path or URI. +// +// The load strategy returns the remote load for any path starting with `http`. +// So this works for any URI with a scheme `http` or `https`. +// +// The fallback strategy is to call the local loader. +// +// The local loader takes a local file system path (absolute or relative) as argument, +// or alternatively a `file://...` URI, **without host** (see also below for windows). +// +// There are a few liberalities, initially intended to be tolerant regarding the URI syntax, +// especially on windows. +// +// Before the local loader is called, the given path is transformed: +// - percent-encoded characters are unescaped +// - simple paths (e.g. `./folder/file`) are passed as-is +// - on windows, occurrences of `/` are replaced by `\`, so providing a relative path such a `folder/file` works too. +// +// For paths provided as URIs with the "file" scheme, please note that: +// - `file://` is simply stripped. +// This means that the host part of the URI is not parsed at all. +// For example, `file:///folder/file" becomes "/folder/file`, +// but `file://localhost/folder/file` becomes `localhost/folder/file` on unix systems. +// Similarly, `file://./folder/file` yields `./folder/file`. +// - on windows, `file://...` can take a host so as to specify an UNC share location. +// +// Reminder about windows-specifics: +// - `file://host/folder/file` becomes an UNC path like `\\host\folder\file` (no port specification is supported) +// - `file:///c:/folder/file` becomes `C:\folder\file` +// - `file://c:/folder/file` is tolerated (without leading `/`) and becomes `c:\folder\file` +func LoadStrategy(pth string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) { + if strings.HasPrefix(pth, "http") { return remote } - return func(pth string) ([]byte, error) { - upth, err := pathUnescape(pth) + + return func(p string) ([]byte, error) { + upth, err := url.PathUnescape(p) if err != nil { return nil, err } - if strings.HasPrefix(pth, `file://`) { - if runtime.GOOS == "windows" { - // support for canonical file URIs on windows. - // Zero tolerance here for dodgy URIs. - u, _ := url.Parse(upth) - if u.Host != "" { - // assume UNC name (volume share) - // file://host/share/folder\... ==> \\host\share\path\folder - // NOTE: UNC port not yet supported - upth = strings.Join([]string{`\`, u.Host, u.Path}, `\`) - } else { - // file:///c:/folder/... ==> just remove the leading slash - upth = strings.TrimPrefix(upth, `file:///`) - } - } else { - upth = strings.TrimPrefix(upth, `file://`) + if !strings.HasPrefix(p, `file://`) { + // regular file path provided: just normalize slashes + return local(filepath.FromSlash(upth)) + } + + if runtime.GOOS != "windows" { + // crude processing: this leaves full URIs with a host with a (mostly) unexpected result + upth = strings.TrimPrefix(upth, `file://`) + + return local(filepath.FromSlash(upth)) + } + + // windows-only pre-processing of file://... URIs + + // support for canonical file URIs on windows. + u, err := url.Parse(filepath.ToSlash(upth)) + if err != nil { + return nil, err + } + + if u.Host != "" { + // assume UNC name (volume share) + // NOTE: UNC port not yet supported + + // when the "host" segment is a drive letter: + // file://C:/folder/... => C:\folder + upth = path.Clean(strings.Join([]string{u.Host, u.Path}, `/`)) + if !strings.HasSuffix(u.Host, ":") && u.Host[0] != '.' { + // tolerance: if we have a leading dot, this can't be a host + // file://host/share/folder\... ==> \\host\share\path\folder + upth = "//" + upth + } + } else { + // no host, let's figure out if this is a drive letter + upth = strings.TrimPrefix(upth, `file://`) + first, _, _ := strings.Cut(strings.TrimPrefix(u.Path, "/"), "/") + if strings.HasSuffix(first, ":") { + // drive letter in the first segment: + // file:///c:/folder/... ==> strip the leading slash + upth = strings.TrimPrefix(upth, `/`) } } diff --git a/vendor/github.com/go-openapi/swag/name_lexem.go b/vendor/github.com/go-openapi/swag/name_lexem.go index aa7f6a9b..8bb64ac3 100644 --- a/vendor/github.com/go-openapi/swag/name_lexem.go +++ b/vendor/github.com/go-openapi/swag/name_lexem.go @@ -14,74 +14,80 @@ package swag -import "unicode" +import ( + "unicode" + "unicode/utf8" +) type ( - nameLexem interface { - GetUnsafeGoName() string - GetOriginal() string - IsInitialism() bool - } + lexemKind uint8 - initialismNameLexem struct { + nameLexem struct { original string matchedInitialism string + kind lexemKind } +) - casualNameLexem struct { - original string - } +const ( + lexemKindCasualName lexemKind = iota + lexemKindInitialismName ) -func newInitialismNameLexem(original, matchedInitialism string) *initialismNameLexem { - return &initialismNameLexem{ +func newInitialismNameLexem(original, matchedInitialism string) nameLexem { + return nameLexem{ + kind: lexemKindInitialismName, original: original, matchedInitialism: matchedInitialism, } } -func newCasualNameLexem(original string) *casualNameLexem { - return &casualNameLexem{ +func newCasualNameLexem(original string) nameLexem { + return nameLexem{ + kind: lexemKindCasualName, original: original, } } -func (l *initialismNameLexem) GetUnsafeGoName() string { - return l.matchedInitialism -} +func (l nameLexem) GetUnsafeGoName() string { + if l.kind == lexemKindInitialismName { + return l.matchedInitialism + } + + var ( + first rune + rest string + ) -func (l *casualNameLexem) GetUnsafeGoName() string { - var first rune - var rest string for i, orig := range l.original { if i == 0 { first = orig continue } + if i > 0 { rest = l.original[i:] break } } + if len(l.original) > 1 { - return string(unicode.ToUpper(first)) + lower(rest) + b := poolOfBuffers.BorrowBuffer(utf8.UTFMax + len(rest)) + defer func() { + poolOfBuffers.RedeemBuffer(b) + }() + b.WriteRune(unicode.ToUpper(first)) + b.WriteString(lower(rest)) + return b.String() } return l.original } -func (l *initialismNameLexem) GetOriginal() string { +func (l nameLexem) GetOriginal() string { return l.original } -func (l *casualNameLexem) GetOriginal() string { - return l.original -} - -func (l *initialismNameLexem) IsInitialism() bool { - return true -} - -func (l *casualNameLexem) IsInitialism() bool { - return false +func (l nameLexem) IsInitialism() bool { + return l.kind == lexemKindInitialismName } diff --git a/vendor/github.com/go-openapi/swag/post_go19.go b/vendor/github.com/go-openapi/swag/post_go19.go deleted file mode 100644 index 7c7da9c0..00000000 --- a/vendor/github.com/go-openapi/swag/post_go19.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build go1.9 -// +build go1.9 - -package swag - -import ( - "sort" - "sync" -) - -// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms. -// Since go1.9, this may be implemented with sync.Map. -type indexOfInitialisms struct { - sortMutex *sync.Mutex - index *sync.Map -} - -func newIndexOfInitialisms() *indexOfInitialisms { - return &indexOfInitialisms{ - sortMutex: new(sync.Mutex), - index: new(sync.Map), - } -} - -func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms { - m.sortMutex.Lock() - defer m.sortMutex.Unlock() - for k, v := range initial { - m.index.Store(k, v) - } - return m -} - -func (m *indexOfInitialisms) isInitialism(key string) bool { - _, ok := m.index.Load(key) - return ok -} - -func (m *indexOfInitialisms) add(key string) *indexOfInitialisms { - m.index.Store(key, true) - return m -} - -func (m *indexOfInitialisms) sorted() (result []string) { - m.sortMutex.Lock() - defer m.sortMutex.Unlock() - m.index.Range(func(key, value interface{}) bool { - k := key.(string) - result = append(result, k) - return true - }) - sort.Sort(sort.Reverse(byInitialism(result))) - return -} diff --git a/vendor/github.com/go-openapi/swag/pre_go19.go b/vendor/github.com/go-openapi/swag/pre_go19.go deleted file mode 100644 index 0565db37..00000000 --- a/vendor/github.com/go-openapi/swag/pre_go19.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.9 -// +build !go1.9 - -package swag - -import ( - "sort" - "sync" -) - -// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms. -// Before go1.9, this may be implemented with a mutex on the map. -type indexOfInitialisms struct { - getMutex *sync.Mutex - index map[string]bool -} - -func newIndexOfInitialisms() *indexOfInitialisms { - return &indexOfInitialisms{ - getMutex: new(sync.Mutex), - index: make(map[string]bool, 50), - } -} - -func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms { - m.getMutex.Lock() - defer m.getMutex.Unlock() - for k, v := range initial { - m.index[k] = v - } - return m -} - -func (m *indexOfInitialisms) isInitialism(key string) bool { - m.getMutex.Lock() - defer m.getMutex.Unlock() - _, ok := m.index[key] - return ok -} - -func (m *indexOfInitialisms) add(key string) *indexOfInitialisms { - m.getMutex.Lock() - defer m.getMutex.Unlock() - m.index[key] = true - return m -} - -func (m *indexOfInitialisms) sorted() (result []string) { - m.getMutex.Lock() - defer m.getMutex.Unlock() - for k := range m.index { - result = append(result, k) - } - sort.Sort(sort.Reverse(byInitialism(result))) - return -} diff --git a/vendor/github.com/go-openapi/swag/split.go b/vendor/github.com/go-openapi/swag/split.go index a1825fb7..274727a8 100644 --- a/vendor/github.com/go-openapi/swag/split.go +++ b/vendor/github.com/go-openapi/swag/split.go @@ -15,124 +15,269 @@ package swag import ( + "bytes" + "sync" "unicode" + "unicode/utf8" ) -var nameReplaceTable = map[rune]string{ - '@': "At ", - '&': "And ", - '|': "Pipe ", - '$': "Dollar ", - '!': "Bang ", - '-': "", - '_': "", -} - type ( splitter struct { - postSplitInitialismCheck bool initialisms []string + initialismsRunes [][]rune + initialismsUpperCased [][]rune // initialisms cached in their trimmed, upper-cased version + postSplitInitialismCheck bool + } + + splitterOption func(*splitter) + + initialismMatch struct { + body []rune + start, end int + complete bool + } + initialismMatches []initialismMatch +) + +type ( + // memory pools of temporary objects. + // + // These are used to recycle temporarily allocated objects + // and relieve the GC from undue pressure. + + matchesPool struct { + *sync.Pool } - splitterOption func(*splitter) *splitter + buffersPool struct { + *sync.Pool + } + + lexemsPool struct { + *sync.Pool + } + + splittersPool struct { + *sync.Pool + } ) -// split calls the splitter; splitter provides more control and post options +var ( + // poolOfMatches holds temporary slices for recycling during the initialism match process + poolOfMatches = matchesPool{ + Pool: &sync.Pool{ + New: func() any { + s := make(initialismMatches, 0, maxAllocMatches) + + return &s + }, + }, + } + + poolOfBuffers = buffersPool{ + Pool: &sync.Pool{ + New: func() any { + return new(bytes.Buffer) + }, + }, + } + + poolOfLexems = lexemsPool{ + Pool: &sync.Pool{ + New: func() any { + s := make([]nameLexem, 0, maxAllocMatches) + + return &s + }, + }, + } + + poolOfSplitters = splittersPool{ + Pool: &sync.Pool{ + New: func() any { + s := newSplitter() + + return &s + }, + }, + } +) + +// nameReplaceTable finds a word representation for special characters. +func nameReplaceTable(r rune) (string, bool) { + switch r { + case '@': + return "At ", true + case '&': + return "And ", true + case '|': + return "Pipe ", true + case '$': + return "Dollar ", true + case '!': + return "Bang ", true + case '-': + return "", true + case '_': + return "", true + default: + return "", false + } +} + +// split calls the splitter. +// +// Use newSplitter for more control and options func split(str string) []string { - lexems := newSplitter().split(str) - result := make([]string, 0, len(lexems)) + s := poolOfSplitters.BorrowSplitter() + lexems := s.split(str) + result := make([]string, 0, len(*lexems)) - for _, lexem := range lexems { + for _, lexem := range *lexems { result = append(result, lexem.GetOriginal()) } + poolOfLexems.RedeemLexems(lexems) + poolOfSplitters.RedeemSplitter(s) return result } -func (s *splitter) split(str string) []nameLexem { - return s.toNameLexems(str) -} - -func newSplitter(options ...splitterOption) *splitter { - splitter := &splitter{ +func newSplitter(options ...splitterOption) splitter { + s := splitter{ postSplitInitialismCheck: false, initialisms: initialisms, + initialismsRunes: initialismsRunes, + initialismsUpperCased: initialismsUpperCased, } for _, option := range options { - splitter = option(splitter) + option(&s) } - return splitter + return s } // withPostSplitInitialismCheck allows to catch initialisms after main split process -func withPostSplitInitialismCheck(s *splitter) *splitter { +func withPostSplitInitialismCheck(s *splitter) { s.postSplitInitialismCheck = true +} + +func (p matchesPool) BorrowMatches() *initialismMatches { + s := p.Get().(*initialismMatches) + *s = (*s)[:0] // reset slice, keep allocated capacity + return s } -type ( - initialismMatch struct { - start, end int - body []rune - complete bool +func (p buffersPool) BorrowBuffer(size int) *bytes.Buffer { + s := p.Get().(*bytes.Buffer) + s.Reset() + + if s.Cap() < size { + s.Grow(size) } - initialismMatches []*initialismMatch -) -func (s *splitter) toNameLexems(name string) []nameLexem { + return s +} + +func (p lexemsPool) BorrowLexems() *[]nameLexem { + s := p.Get().(*[]nameLexem) + *s = (*s)[:0] // reset slice, keep allocated capacity + + return s +} + +func (p splittersPool) BorrowSplitter(options ...splitterOption) *splitter { + s := p.Get().(*splitter) + s.postSplitInitialismCheck = false // reset options + for _, apply := range options { + apply(s) + } + + return s +} + +func (p matchesPool) RedeemMatches(s *initialismMatches) { + p.Put(s) +} + +func (p buffersPool) RedeemBuffer(s *bytes.Buffer) { + p.Put(s) +} + +func (p lexemsPool) RedeemLexems(s *[]nameLexem) { + p.Put(s) +} + +func (p splittersPool) RedeemSplitter(s *splitter) { + p.Put(s) +} + +func (m initialismMatch) isZero() bool { + return m.start == 0 && m.end == 0 +} + +func (s splitter) split(name string) *[]nameLexem { nameRunes := []rune(name) matches := s.gatherInitialismMatches(nameRunes) + if matches == nil { + return poolOfLexems.BorrowLexems() + } + return s.mapMatchesToNameLexems(nameRunes, matches) } -func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches { - matches := make(initialismMatches, 0) +func (s splitter) gatherInitialismMatches(nameRunes []rune) *initialismMatches { + var matches *initialismMatches for currentRunePosition, currentRune := range nameRunes { - newMatches := make(initialismMatches, 0, len(matches)) + // recycle these allocations as we loop over runes + // with such recycling, only 2 slices should be allocated per call + // instead of o(n). + newMatches := poolOfMatches.BorrowMatches() // check current initialism matches - for _, match := range matches { - if keepCompleteMatch := match.complete; keepCompleteMatch { - newMatches = append(newMatches, match) - continue - } + if matches != nil { // skip first iteration + for _, match := range *matches { + if keepCompleteMatch := match.complete; keepCompleteMatch { + *newMatches = append(*newMatches, match) + continue + } - // drop failed match - currentMatchRune := match.body[currentRunePosition-match.start] - if !s.initialismRuneEqual(currentMatchRune, currentRune) { - continue - } + // drop failed match + currentMatchRune := match.body[currentRunePosition-match.start] + if currentMatchRune != currentRune { + continue + } - // try to complete ongoing match - if currentRunePosition-match.start == len(match.body)-1 { - // we are close; the next step is to check the symbol ahead - // if it is a small letter, then it is not the end of match - // but beginning of the next word - - if currentRunePosition < len(nameRunes)-1 { - nextRune := nameRunes[currentRunePosition+1] - if newWord := unicode.IsLower(nextRune); newWord { - // oh ok, it was the start of a new word - continue + // try to complete ongoing match + if currentRunePosition-match.start == len(match.body)-1 { + // we are close; the next step is to check the symbol ahead + // if it is a small letter, then it is not the end of match + // but beginning of the next word + + if currentRunePosition < len(nameRunes)-1 { + nextRune := nameRunes[currentRunePosition+1] + if newWord := unicode.IsLower(nextRune); newWord { + // oh ok, it was the start of a new word + continue + } } + + match.complete = true + match.end = currentRunePosition } - match.complete = true - match.end = currentRunePosition + *newMatches = append(*newMatches, match) } - - newMatches = append(newMatches, match) } // check for new initialism matches - for _, initialism := range s.initialisms { - initialismRunes := []rune(initialism) - if s.initialismRuneEqual(initialismRunes[0], currentRune) { - newMatches = append(newMatches, &initialismMatch{ + for i := range s.initialisms { + initialismRunes := s.initialismsRunes[i] + if initialismRunes[0] == currentRune { + *newMatches = append(*newMatches, initialismMatch{ start: currentRunePosition, body: initialismRunes, complete: false, @@ -140,24 +285,28 @@ func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches { } } + if matches != nil { + poolOfMatches.RedeemMatches(matches) + } matches = newMatches } + // up to the caller to redeem this last slice return matches } -func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMatches) []nameLexem { - nameLexems := make([]nameLexem, 0) +func (s splitter) mapMatchesToNameLexems(nameRunes []rune, matches *initialismMatches) *[]nameLexem { + nameLexems := poolOfLexems.BorrowLexems() - var lastAcceptedMatch *initialismMatch - for _, match := range matches { + var lastAcceptedMatch initialismMatch + for _, match := range *matches { if !match.complete { continue } - if firstMatch := lastAcceptedMatch == nil; firstMatch { - nameLexems = append(nameLexems, s.breakCasualString(nameRunes[:match.start])...) - nameLexems = append(nameLexems, s.breakInitialism(string(match.body))) + if firstMatch := lastAcceptedMatch.isZero(); firstMatch { + s.appendBrokenDownCasualString(nameLexems, nameRunes[:match.start]) + *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body))) lastAcceptedMatch = match @@ -169,63 +318,66 @@ func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMa } middle := nameRunes[lastAcceptedMatch.end+1 : match.start] - nameLexems = append(nameLexems, s.breakCasualString(middle)...) - nameLexems = append(nameLexems, s.breakInitialism(string(match.body))) + s.appendBrokenDownCasualString(nameLexems, middle) + *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body))) lastAcceptedMatch = match } // we have not found any accepted matches - if lastAcceptedMatch == nil { - return s.breakCasualString(nameRunes) - } - - if lastAcceptedMatch.end+1 != len(nameRunes) { + if lastAcceptedMatch.isZero() { + *nameLexems = (*nameLexems)[:0] + s.appendBrokenDownCasualString(nameLexems, nameRunes) + } else if lastAcceptedMatch.end+1 != len(nameRunes) { rest := nameRunes[lastAcceptedMatch.end+1:] - nameLexems = append(nameLexems, s.breakCasualString(rest)...) + s.appendBrokenDownCasualString(nameLexems, rest) } - return nameLexems -} + poolOfMatches.RedeemMatches(matches) -func (s *splitter) initialismRuneEqual(a, b rune) bool { - return a == b + return nameLexems } -func (s *splitter) breakInitialism(original string) nameLexem { +func (s splitter) breakInitialism(original string) nameLexem { return newInitialismNameLexem(original, original) } -func (s *splitter) breakCasualString(str []rune) []nameLexem { - segments := make([]nameLexem, 0) - currentSegment := "" +func (s splitter) appendBrokenDownCasualString(segments *[]nameLexem, str []rune) { + currentSegment := poolOfBuffers.BorrowBuffer(len(str)) // unlike strings.Builder, bytes.Buffer initial storage can reused + defer func() { + poolOfBuffers.RedeemBuffer(currentSegment) + }() addCasualNameLexem := func(original string) { - segments = append(segments, newCasualNameLexem(original)) + *segments = append(*segments, newCasualNameLexem(original)) } addInitialismNameLexem := func(original, match string) { - segments = append(segments, newInitialismNameLexem(original, match)) + *segments = append(*segments, newInitialismNameLexem(original, match)) } - addNameLexem := func(original string) { - if s.postSplitInitialismCheck { - for _, initialism := range s.initialisms { - if upper(initialism) == upper(original) { - addInitialismNameLexem(original, initialism) + var addNameLexem func(string) + if s.postSplitInitialismCheck { + addNameLexem = func(original string) { + for i := range s.initialisms { + if isEqualFoldIgnoreSpace(s.initialismsUpperCased[i], original) { + addInitialismNameLexem(original, s.initialisms[i]) + return } } - } - addCasualNameLexem(original) + addCasualNameLexem(original) + } + } else { + addNameLexem = addCasualNameLexem } - for _, rn := range string(str) { - if replace, found := nameReplaceTable[rn]; found { - if currentSegment != "" { - addNameLexem(currentSegment) - currentSegment = "" + for _, rn := range str { + if replace, found := nameReplaceTable(rn); found { + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) + currentSegment.Reset() } if replace != "" { @@ -236,27 +388,121 @@ func (s *splitter) breakCasualString(str []rune) []nameLexem { } if !unicode.In(rn, unicode.L, unicode.M, unicode.N, unicode.Pc) { - if currentSegment != "" { - addNameLexem(currentSegment) - currentSegment = "" + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) + currentSegment.Reset() } continue } if unicode.IsUpper(rn) { - if currentSegment != "" { - addNameLexem(currentSegment) + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) } - currentSegment = "" + currentSegment.Reset() } - currentSegment += string(rn) + currentSegment.WriteRune(rn) + } + + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) } +} + +// isEqualFoldIgnoreSpace is the same as strings.EqualFold, but +// it ignores leading and trailing blank spaces in the compared +// string. +// +// base is assumed to be composed of upper-cased runes, and be already +// trimmed. +// +// This code is heavily inspired from strings.EqualFold. +func isEqualFoldIgnoreSpace(base []rune, str string) bool { + var i, baseIndex int + // equivalent to b := []byte(str), but without data copy + b := hackStringBytes(str) + + for i < len(b) { + if c := b[i]; c < utf8.RuneSelf { + // fast path for ASCII + if c != ' ' && c != '\t' { + break + } + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if !unicode.IsSpace(r) { + break + } + i += size + } + + if i >= len(b) { + return len(base) == 0 + } + + for _, baseRune := range base { + if i >= len(b) { + break + } + + if c := b[i]; c < utf8.RuneSelf { + // single byte rune case (ASCII) + if baseRune >= utf8.RuneSelf { + return false + } + + baseChar := byte(baseRune) + if c != baseChar && + !('a' <= c && c <= 'z' && c-'a'+'A' == baseChar) { + return false + } + + baseIndex++ + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if unicode.ToUpper(r) != baseRune { + return false + } + baseIndex++ + i += size + } + + if baseIndex != len(base) { + return false + } + + // all passed: now we should only have blanks + for i < len(b) { + if c := b[i]; c < utf8.RuneSelf { + // fast path for ASCII + if c != ' ' && c != '\t' { + return false + } + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if !unicode.IsSpace(r) { + return false + } - if currentSegment != "" { - addNameLexem(currentSegment) + i += size } - return segments + return true } diff --git a/vendor/github.com/go-openapi/swag/string_bytes.go b/vendor/github.com/go-openapi/swag/string_bytes.go new file mode 100644 index 00000000..c52d6bf7 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/string_bytes.go @@ -0,0 +1,22 @@ +package swag + +import "unsafe" + +type internalString struct { + Data unsafe.Pointer + Len int +} + +// hackStringBytes returns the (unsafe) underlying bytes slice of a string. +func hackStringBytes(str string) []byte { + p := (*internalString)(unsafe.Pointer(&str)).Data + return unsafe.Slice((*byte)(p), len(str)) +} + +/* + * go1.20 version (for when go mod moves to a go1.20 requirement): + +func hackStringBytes(str string) []byte { + return unsafe.Slice(unsafe.StringData(str), len(str)) +} +*/ diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go index d971fbe3..5051401c 100644 --- a/vendor/github.com/go-openapi/swag/util.go +++ b/vendor/github.com/go-openapi/swag/util.go @@ -18,76 +18,25 @@ import ( "reflect" "strings" "unicode" + "unicode/utf8" ) -// commonInitialisms are common acronyms that are kept as whole uppercased words. -var commonInitialisms *indexOfInitialisms - -// initialisms is a slice of sorted initialisms -var initialisms []string - -var isInitialism func(string) bool - // GoNamePrefixFunc sets an optional rule to prefix go names // which do not start with a letter. // +// The prefix function is assumed to return a string that starts with an upper case letter. +// // e.g. to help convert "123" into "{prefix}123" // // The default is to prefix with "X" var GoNamePrefixFunc func(string) string -func init() { - // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769 - var configuredInitialisms = map[string]bool{ - "ACL": true, - "API": true, - "ASCII": true, - "CPU": true, - "CSS": true, - "DNS": true, - "EOF": true, - "GUID": true, - "HTML": true, - "HTTPS": true, - "HTTP": true, - "ID": true, - "IP": true, - "IPv4": true, - "IPv6": true, - "JSON": true, - "LHS": true, - "OAI": true, - "QPS": true, - "RAM": true, - "RHS": true, - "RPC": true, - "SLA": true, - "SMTP": true, - "SQL": true, - "SSH": true, - "TCP": true, - "TLS": true, - "TTL": true, - "UDP": true, - "UI": true, - "UID": true, - "UUID": true, - "URI": true, - "URL": true, - "UTF8": true, - "VM": true, - "XML": true, - "XMPP": true, - "XSRF": true, - "XSS": true, +func prefixFunc(name, in string) string { + if GoNamePrefixFunc == nil { + return "X" + in } - // a thread-safe index of initialisms - commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms) - initialisms = commonInitialisms.sorted() - - // a test function - isInitialism = commonInitialisms.isInitialism + return GoNamePrefixFunc(name) + in } const ( @@ -156,25 +105,9 @@ func SplitByFormat(data, format string) []string { return result } -type byInitialism []string - -func (s byInitialism) Len() int { - return len(s) -} -func (s byInitialism) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} -func (s byInitialism) Less(i, j int) bool { - if len(s[i]) != len(s[j]) { - return len(s[i]) < len(s[j]) - } - - return strings.Compare(s[i], s[j]) > 0 -} - // Removes leading whitespaces func trim(str string) string { - return strings.Trim(str, " ") + return strings.TrimSpace(str) } // Shortcut to strings.ToUpper() @@ -188,15 +121,20 @@ func lower(str string) string { } // Camelize an uppercased word -func Camelize(word string) (camelized string) { +func Camelize(word string) string { + camelized := poolOfBuffers.BorrowBuffer(len(word)) + defer func() { + poolOfBuffers.RedeemBuffer(camelized) + }() + for pos, ru := range []rune(word) { if pos > 0 { - camelized += string(unicode.ToLower(ru)) + camelized.WriteRune(unicode.ToLower(ru)) } else { - camelized += string(unicode.ToUpper(ru)) + camelized.WriteRune(unicode.ToUpper(ru)) } } - return + return camelized.String() } // ToFileName lowercases and underscores a go type name @@ -224,33 +162,40 @@ func ToCommandName(name string) string { // ToHumanNameLower represents a code name as a human series of words func ToHumanNameLower(name string) string { - in := newSplitter(withPostSplitInitialismCheck).split(name) - out := make([]string, 0, len(in)) + s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) + in := s.split(name) + poolOfSplitters.RedeemSplitter(s) + out := make([]string, 0, len(*in)) - for _, w := range in { + for _, w := range *in { if !w.IsInitialism() { out = append(out, lower(w.GetOriginal())) } else { - out = append(out, w.GetOriginal()) + out = append(out, trim(w.GetOriginal())) } } + poolOfLexems.RedeemLexems(in) return strings.Join(out, " ") } // ToHumanNameTitle represents a code name as a human series of words with the first letters titleized func ToHumanNameTitle(name string) string { - in := newSplitter(withPostSplitInitialismCheck).split(name) + s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) + in := s.split(name) + poolOfSplitters.RedeemSplitter(s) - out := make([]string, 0, len(in)) - for _, w := range in { - original := w.GetOriginal() + out := make([]string, 0, len(*in)) + for _, w := range *in { + original := trim(w.GetOriginal()) if !w.IsInitialism() { out = append(out, Camelize(original)) } else { out = append(out, original) } } + poolOfLexems.RedeemLexems(in) + return strings.Join(out, " ") } @@ -264,7 +209,7 @@ func ToJSONName(name string) string { out = append(out, lower(w)) continue } - out = append(out, Camelize(w)) + out = append(out, Camelize(trim(w))) } return strings.Join(out, "") } @@ -283,35 +228,70 @@ func ToVarName(name string) string { // ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes func ToGoName(name string) string { - lexems := newSplitter(withPostSplitInitialismCheck).split(name) + s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) + lexems := s.split(name) + poolOfSplitters.RedeemSplitter(s) + defer func() { + poolOfLexems.RedeemLexems(lexems) + }() + lexemes := *lexems + + if len(lexemes) == 0 { + return "" + } + + result := poolOfBuffers.BorrowBuffer(len(name)) + defer func() { + poolOfBuffers.RedeemBuffer(result) + }() + + // check if not starting with a letter, upper case + firstPart := lexemes[0].GetUnsafeGoName() + if lexemes[0].IsInitialism() { + firstPart = upper(firstPart) + } + + if c := firstPart[0]; c < utf8.RuneSelf { + // ASCII + switch { + case 'A' <= c && c <= 'Z': + result.WriteString(firstPart) + case 'a' <= c && c <= 'z': + result.WriteByte(c - 'a' + 'A') + result.WriteString(firstPart[1:]) + default: + result.WriteString(prefixFunc(name, firstPart)) + // NOTE: no longer check if prefixFunc returns a string that starts with uppercase: + // assume this is always the case + } + } else { + // unicode + firstRune, _ := utf8.DecodeRuneInString(firstPart) + switch { + case !unicode.IsLetter(firstRune): + result.WriteString(prefixFunc(name, firstPart)) + case !unicode.IsUpper(firstRune): + result.WriteString(prefixFunc(name, firstPart)) + /* + result.WriteRune(unicode.ToUpper(firstRune)) + result.WriteString(firstPart[offset:]) + */ + default: + result.WriteString(firstPart) + } + } - result := "" - for _, lexem := range lexems { + for _, lexem := range lexemes[1:] { goName := lexem.GetUnsafeGoName() // to support old behavior if lexem.IsInitialism() { goName = upper(goName) } - result += goName + result.WriteString(goName) } - if len(result) > 0 { - // Only prefix with X when the first character isn't an ascii letter - first := []rune(result)[0] - if !unicode.IsLetter(first) || (first > unicode.MaxASCII && !unicode.IsUpper(first)) { - if GoNamePrefixFunc == nil { - return "X" + result - } - result = GoNamePrefixFunc(name) + result - } - first = []rune(result)[0] - if unicode.IsLetter(first) && !unicode.IsUpper(first) { - result = string(append([]rune{unicode.ToUpper(first)}, []rune(result)[1:]...)) - } - } - - return result + return result.String() } // ContainsStrings searches a slice of strings for a case-sensitive match @@ -343,7 +323,7 @@ type zeroable interface { func IsZero(data interface{}) bool { v := reflect.ValueOf(data) // check for nil data - switch v.Kind() { + switch v.Kind() { //nolint:exhaustive case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: if v.IsNil() { return true @@ -356,7 +336,7 @@ func IsZero(data interface{}) bool { } // continue with slightly more complex reflection - switch v.Kind() { + switch v.Kind() { //nolint:exhaustive case reflect.String: return v.Len() == 0 case reflect.Bool: @@ -376,16 +356,6 @@ func IsZero(data interface{}) bool { } } -// AddInitialisms add additional initialisms -func AddInitialisms(words ...string) { - for _, word := range words { - // commonInitialisms[upper(word)] = true - commonInitialisms.add(upper(word)) - } - // sort again - initialisms = commonInitialisms.sorted() -} - // CommandLineOptionsGroup represents a group of user-defined command line options type CommandLineOptionsGroup struct { ShortDescription string diff --git a/vendor/github.com/go-openapi/swag/yaml.go b/vendor/github.com/go-openapi/swag/yaml.go index f09ee609..f59e0259 100644 --- a/vendor/github.com/go-openapi/swag/yaml.go +++ b/vendor/github.com/go-openapi/swag/yaml.go @@ -16,8 +16,11 @@ package swag import ( "encoding/json" + "errors" "fmt" "path/filepath" + "reflect" + "sort" "strconv" "github.com/mailru/easyjson/jlexer" @@ -48,7 +51,7 @@ func BytesToYAMLDoc(data []byte) (interface{}, error) { return nil, err } if document.Kind != yaml.DocumentNode || len(document.Content) != 1 || document.Content[0].Kind != yaml.MappingNode { - return nil, fmt.Errorf("only YAML documents that are objects are supported") + return nil, errors.New("only YAML documents that are objects are supported") } return &document, nil } @@ -147,7 +150,7 @@ func yamlScalar(node *yaml.Node) (interface{}, error) { case yamlTimestamp: return node.Value, nil case yamlNull: - return nil, nil + return nil, nil //nolint:nilnil default: return nil, fmt.Errorf("YAML tag %q is not supported", node.LongTag()) } @@ -245,7 +248,27 @@ func (s JSONMapSlice) MarshalYAML() (interface{}, error) { return yaml.Marshal(&n) } +func isNil(input interface{}) bool { + if input == nil { + return true + } + kind := reflect.TypeOf(input).Kind() + switch kind { //nolint:exhaustive + case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan: + return reflect.ValueOf(input).IsNil() + default: + return false + } +} + func json2yaml(item interface{}) (*yaml.Node, error) { + if isNil(item) { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Value: "null", + }, nil + } + switch val := item.(type) { case JSONMapSlice: var n yaml.Node @@ -265,7 +288,14 @@ func json2yaml(item interface{}) (*yaml.Node, error) { case map[string]interface{}: var n yaml.Node n.Kind = yaml.MappingNode - for k, v := range val { + keys := make([]string, 0, len(val)) + for k := range val { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + v := val[k] childNode, err := json2yaml(v) if err != nil { return nil, err @@ -318,8 +348,9 @@ func json2yaml(item interface{}) (*yaml.Node, error) { Tag: yamlBoolScalar, Value: strconv.FormatBool(val), }, nil + default: + return nil, fmt.Errorf("unhandled type: %T", val) } - return nil, nil } // JSONMapItem represents the value of a key in a JSON object held by JSONMapSlice diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md index 7ed347d3..7ec5ac7e 100644 --- a/vendor/github.com/google/uuid/CHANGELOG.md +++ b/vendor/github.com/google/uuid/CHANGELOG.md @@ -1,5 +1,25 @@ # Changelog +## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16) + + +### Features + +* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3)) + + +### Bug Fixes + +* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06)) +* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6)) + +## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12) + + +### Features + +* Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29)) + ## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26) diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go index b404f4be..dc60082d 100644 --- a/vendor/github.com/google/uuid/hash.go +++ b/vendor/github.com/google/uuid/hash.go @@ -17,6 +17,12 @@ var ( NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) Nil UUID // empty UUID, all zeros + + // The Max UUID is special form of UUID that is specified to have all 128 bits set to 1. + Max = UUID{ + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + } ) // NewHash returns a new UUID derived from the hash of space concatenated with diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go index e6ef06cd..c3511292 100644 --- a/vendor/github.com/google/uuid/time.go +++ b/vendor/github.com/google/uuid/time.go @@ -108,12 +108,23 @@ func setClockSequence(seq int) { } // Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in -// uuid. The time is only defined for version 1 and 2 UUIDs. +// uuid. The time is only defined for version 1, 2, 6 and 7 UUIDs. func (uuid UUID) Time() Time { - time := int64(binary.BigEndian.Uint32(uuid[0:4])) - time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 - time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 - return Time(time) + var t Time + switch uuid.Version() { + case 6: + time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110 + t = Time(time) + case 7: + time := binary.BigEndian.Uint64(uuid[:8]) + t = Time((time>>16)*10000 + g1582ns100) + default: // forward compatible + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + t = Time(time) + } + return t } // ClockSequence returns the clock sequence encoded in uuid. diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go index dc75f7d9..5232b486 100644 --- a/vendor/github.com/google/uuid/uuid.go +++ b/vendor/github.com/google/uuid/uuid.go @@ -186,6 +186,59 @@ func Must(uuid UUID, err error) UUID { return uuid } +// Validate returns an error if s is not a properly formatted UUID in one of the following formats: +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} +// It returns an error if the format is invalid, otherwise nil. +func Validate(s string) error { + switch len(s) { + // Standard UUID format + case 36: + + // UUID with "urn:uuid:" prefix + case 36 + 9: + if !strings.EqualFold(s[:9], "urn:uuid:") { + return fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + + // UUID enclosed in braces + case 36 + 2: + if s[0] != '{' || s[len(s)-1] != '}' { + return fmt.Errorf("invalid bracketed UUID format") + } + s = s[1 : len(s)-1] + + // UUID without hyphens + case 32: + for i := 0; i < len(s); i += 2 { + _, ok := xtob(s[i], s[i+1]) + if !ok { + return errors.New("invalid UUID format") + } + } + + default: + return invalidLengthError{len(s)} + } + + // Check for standard UUID format + if len(s) == 36 { + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return errors.New("invalid UUID format") + } + for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} { + if _, ok := xtob(s[x], s[x+1]); !ok { + return errors.New("invalid UUID format") + } + } + } + + return nil +} + // String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx // , or "" if uuid is invalid. func (uuid UUID) String() string { diff --git a/vendor/github.com/google/uuid/version6.go b/vendor/github.com/google/uuid/version6.go new file mode 100644 index 00000000..339a959a --- /dev/null +++ b/vendor/github.com/google/uuid/version6.go @@ -0,0 +1,56 @@ +// Copyright 2023 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "encoding/binary" + +// UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality. +// It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs. +// Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead. +// +// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6 +// +// NewV6 returns a Version 6 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewV6 returns Nil and an error. +func NewV6() (UUID, error) { + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + /* + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | time_high | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | time_mid | time_low_and_version | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |clk_seq_hi_res | clk_seq_low | node (0-1) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | node (2-5) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ + + binary.BigEndian.PutUint64(uuid[0:], uint64(now)) + binary.BigEndian.PutUint16(uuid[8:], seq) + + uuid[6] = 0x60 | (uuid[6] & 0x0F) + uuid[8] = 0x80 | (uuid[8] & 0x3F) + + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + copy(uuid[10:], nodeID[:]) + nodeMu.Unlock() + + return uuid, nil +} diff --git a/vendor/github.com/google/uuid/version7.go b/vendor/github.com/google/uuid/version7.go new file mode 100644 index 00000000..3167b643 --- /dev/null +++ b/vendor/github.com/google/uuid/version7.go @@ -0,0 +1,104 @@ +// Copyright 2023 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// UUID version 7 features a time-ordered value field derived from the widely +// implemented and well known Unix Epoch timestamp source, +// the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded. +// As well as improved entropy characteristics over versions 1 or 6. +// +// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7 +// +// Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible. +// +// NewV7 returns a Version 7 UUID based on the current time(Unix Epoch). +// Uses the randomness pool if it was enabled with EnableRandPool. +// On error, NewV7 returns Nil and an error +func NewV7() (UUID, error) { + uuid, err := NewRandom() + if err != nil { + return uuid, err + } + makeV7(uuid[:]) + return uuid, nil +} + +// NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch). +// it use NewRandomFromReader fill random bits. +// On error, NewV7FromReader returns Nil and an error. +func NewV7FromReader(r io.Reader) (UUID, error) { + uuid, err := NewRandomFromReader(r) + if err != nil { + return uuid, err + } + + makeV7(uuid[:]) + return uuid, nil +} + +// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6]) +// uuid[8] already has the right version number (Variant is 10) +// see function NewV7 and NewV7FromReader +func makeV7(uuid []byte) { + /* + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | unix_ts_ms | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | unix_ts_ms | ver | rand_a (12 bit seq) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |var| rand_b | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | rand_b | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ + _ = uuid[15] // bounds check + + t, s := getV7Time() + + uuid[0] = byte(t >> 40) + uuid[1] = byte(t >> 32) + uuid[2] = byte(t >> 24) + uuid[3] = byte(t >> 16) + uuid[4] = byte(t >> 8) + uuid[5] = byte(t) + + uuid[6] = 0x70 | (0x0F & byte(s>>8)) + uuid[7] = byte(s) +} + +// lastV7time is the last time we returned stored as: +// +// 52 bits of time in milliseconds since epoch +// 12 bits of (fractional nanoseconds) >> 8 +var lastV7time int64 + +const nanoPerMilli = 1000000 + +// getV7Time returns the time in milliseconds and nanoseconds / 256. +// The returned (milli << 12 + seq) is guarenteed to be greater than +// (milli << 12 + seq) returned by any previous call to getV7Time. +func getV7Time() (milli, seq int64) { + timeMu.Lock() + defer timeMu.Unlock() + + nano := timeNow().UnixNano() + milli = nano / nanoPerMilli + // Sequence number is between 0 and 3906 (nanoPerMilli>>8) + seq = (nano - milli*nanoPerMilli) >> 8 + now := milli<<12 + seq + if now <= lastV7time { + now = lastV7time + 1 + milli = now >> 12 + seq = now & 0xfff + } + lastV7time = now + return milli, seq +} diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml index 4c28dff4..a2295380 100644 --- a/vendor/github.com/klauspost/compress/.goreleaser.yml +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -3,7 +3,6 @@ before: hooks: - ./gen.sh - - go install mvdan.cc/garble@v0.10.1 builds: - @@ -32,7 +31,6 @@ builds: - mips64le goarm: - 7 - gobinary: garble - id: "s2d" binary: s2d @@ -59,7 +57,6 @@ builds: - mips64le goarm: - 7 - gobinary: garble - id: "s2sx" binary: s2sx @@ -87,7 +84,6 @@ builds: - mips64le goarm: - 7 - gobinary: garble archives: - diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 7e83f583..1f72cdde 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -16,6 +16,30 @@ This package provides various compression algorithms. # changelog +* Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6) + * zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923 + * s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925 + +* Jan 26th, 2024 - [v1.17.5](https://github.com/klauspost/compress/releases/tag/v1.17.5) + * flate: Fix reset with dictionary on custom window encodes https://github.com/klauspost/compress/pull/912 + * zstd: Add Frame header encoding and stripping https://github.com/klauspost/compress/pull/908 + * zstd: Limit better/best default window to 8MB https://github.com/klauspost/compress/pull/913 + * zstd: Speed improvements by @greatroar in https://github.com/klauspost/compress/pull/896 https://github.com/klauspost/compress/pull/910 + * s2: Fix callbacks for skippable blocks and disallow 0xfe (Padding) by @Jille in https://github.com/klauspost/compress/pull/916 https://github.com/klauspost/compress/pull/917 +https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/compress/pull/918 + +* Dec 1st, 2023 - [v1.17.4](https://github.com/klauspost/compress/releases/tag/v1.17.4) + * huff0: Speed up symbol counting by @greatroar in https://github.com/klauspost/compress/pull/887 + * huff0: Remove byteReader by @greatroar in https://github.com/klauspost/compress/pull/886 + * gzhttp: Allow overriding decompression on transport https://github.com/klauspost/compress/pull/892 + * gzhttp: Clamp compression level https://github.com/klauspost/compress/pull/890 + * gzip: Error out if reserved bits are set https://github.com/klauspost/compress/pull/891 + +* Nov 15th, 2023 - [v1.17.3](https://github.com/klauspost/compress/releases/tag/v1.17.3) + * fse: Fix max header size https://github.com/klauspost/compress/pull/881 + * zstd: Improve better/best compression https://github.com/klauspost/compress/pull/877 + * gzhttp: Fix missing content type on Close https://github.com/klauspost/compress/pull/883 + * Oct 22nd, 2023 - [v1.17.2](https://github.com/klauspost/compress/releases/tag/v1.17.2) * zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876 @@ -554,7 +578,7 @@ For direct deflate use, NewStatelessWriter and StatelessDeflate are available. S A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer: -``` +```go // replace 'ioutil.Discard' with your output. gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression) if err != nil { diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go index de912e18..66d1657d 100644 --- a/vendor/github.com/klauspost/compress/flate/deflate.go +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -212,7 +212,7 @@ func (d *compressor) writeBlockSkip(tok *tokens, index int, eof bool) error { // Should only be used after a start/reset. func (d *compressor) fillWindow(b []byte) { // Do not fill window if we are in store-only or huffman mode. - if d.level <= 0 { + if d.level <= 0 && d.level > -MinCustomWindowSize { return } if d.fast != nil { diff --git a/vendor/github.com/klauspost/compress/huff0/bytereader.go b/vendor/github.com/klauspost/compress/huff0/bytereader.go deleted file mode 100644 index 4dcab8d2..00000000 --- a/vendor/github.com/klauspost/compress/huff0/bytereader.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package huff0 - -// byteReader provides a byte reader that reads -// little endian values from a byte stream. -// The input stream is manually advanced. -// The reader performs no bounds checks. -type byteReader struct { - b []byte - off int -} - -// init will initialize the reader and set the input. -func (b *byteReader) init(in []byte) { - b.b = in - b.off = 0 -} - -// Int32 returns a little endian int32 starting at current offset. -func (b byteReader) Int32() int32 { - v3 := int32(b.b[b.off+3]) - v2 := int32(b.b[b.off+2]) - v1 := int32(b.b[b.off+1]) - v0 := int32(b.b[b.off]) - return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 -} - -// Uint32 returns a little endian uint32 starting at current offset. -func (b byteReader) Uint32() uint32 { - v3 := uint32(b.b[b.off+3]) - v2 := uint32(b.b[b.off+2]) - v1 := uint32(b.b[b.off+1]) - v0 := uint32(b.b[b.off]) - return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 -} - -// remain will return the number of bytes remaining. -func (b byteReader) remain() int { - return len(b.b) - b.off -} diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go index 518436cf..84aa3d12 100644 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -350,6 +350,7 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { // Does not update s.clearCount. func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { reuse = true + _ = s.count // Assert that s != nil to speed up the following loop. for _, v := range in { s.count[v]++ } @@ -415,7 +416,7 @@ func (s *Scratch) validateTable(c cTable) bool { // minTableLog provides the minimum logSize to safely represent a distribution. func (s *Scratch) minTableLog() uint8 { - minBitsSrc := highBit32(uint32(s.br.remain())) + 1 + minBitsSrc := highBit32(uint32(s.srcLen)) + 1 minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2 if minBitsSrc < minBitsSymbols { return uint8(minBitsSrc) @@ -427,7 +428,7 @@ func (s *Scratch) minTableLog() uint8 { func (s *Scratch) optimalTableLog() { tableLog := s.TableLog minBits := s.minTableLog() - maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1 + maxBitsSrc := uint8(highBit32(uint32(s.srcLen-1))) - 1 if maxBitsSrc < tableLog { // Accuracy can be reduced tableLog = maxBitsSrc diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go index e8ad17ad..77ecd68e 100644 --- a/vendor/github.com/klauspost/compress/huff0/huff0.go +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -88,7 +88,7 @@ type Scratch struct { // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded. MaxDecodedSize int - br byteReader + srcLen int // MaxSymbolValue will override the maximum symbol value of the next block. MaxSymbolValue uint8 @@ -170,7 +170,7 @@ func (s *Scratch) prepare(in []byte) (*Scratch, error) { if s.fse == nil { s.fse = &fse.Scratch{} } - s.br.init(in) + s.srcLen = len(in) return s, nil } diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod index 2263853f..5a4412f9 100644 --- a/vendor/github.com/klauspost/compress/s2sx.mod +++ b/vendor/github.com/klauspost/compress/s2sx.mod @@ -1,4 +1,4 @@ module github.com/klauspost/compress -go 1.16 +go 1.19 diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md index bdd49c8b..92e2347b 100644 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -259,7 +259,7 @@ nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68 ## Decompressor -Staus: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. +Status: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), kindly supplied by [fuzzit.dev](https://fuzzit.dev/). diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go index f6a24097..6a5a2988 100644 --- a/vendor/github.com/klauspost/compress/zstd/decodeheader.go +++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go @@ -95,42 +95,54 @@ type Header struct { // If there isn't enough input, io.ErrUnexpectedEOF is returned. // The FirstBlock.OK will indicate if enough information was available to decode the first block header. func (h *Header) Decode(in []byte) error { + _, err := h.DecodeAndStrip(in) + return err +} + +// DecodeAndStrip will decode the header from the beginning of the stream +// and on success return the remaining bytes. +// This will decode the frame header and the first block header if enough bytes are provided. +// It is recommended to provide at least HeaderMaxSize bytes. +// If the frame header cannot be read an error will be returned. +// If there isn't enough input, io.ErrUnexpectedEOF is returned. +// The FirstBlock.OK will indicate if enough information was available to decode the first block header. +func (h *Header) DecodeAndStrip(in []byte) (remain []byte, err error) { *h = Header{} if len(in) < 4 { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } h.HeaderSize += 4 b, in := in[:4], in[4:] if string(b) != frameMagic { if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 { - return ErrMagicMismatch + return nil, ErrMagicMismatch } if len(in) < 4 { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } h.HeaderSize += 4 h.Skippable = true h.SkippableID = int(b[0] & 0xf) h.SkippableSize = binary.LittleEndian.Uint32(in) - return nil + return in[4:], nil } // Read Window_Descriptor // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor if len(in) < 1 { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } fhd, in := in[0], in[1:] h.HeaderSize++ h.SingleSegment = fhd&(1<<5) != 0 h.HasCheckSum = fhd&(1<<2) != 0 if fhd&(1<<3) != 0 { - return errors.New("reserved bit set on frame header") + return nil, errors.New("reserved bit set on frame header") } if !h.SingleSegment { if len(in) < 1 { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } var wd byte wd, in = in[0], in[1:] @@ -148,7 +160,7 @@ func (h *Header) Decode(in []byte) error { size = 4 } if len(in) < int(size) { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } b, in = in[:size], in[size:] h.HeaderSize += int(size) @@ -178,7 +190,7 @@ func (h *Header) Decode(in []byte) error { if fcsSize > 0 { h.HasFCS = true if len(in) < fcsSize { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } b, in = in[:fcsSize], in[fcsSize:] h.HeaderSize += int(fcsSize) @@ -199,7 +211,7 @@ func (h *Header) Decode(in []byte) error { // Frame Header done, we will not fail from now on. if len(in) < 3 { - return nil + return in, nil } tmp := in[:3] bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) @@ -209,7 +221,7 @@ func (h *Header) Decode(in []byte) error { cSize := int(bh >> 3) switch blockType { case blockTypeReserved: - return nil + return in, nil case blockTypeRLE: h.FirstBlock.Compressed = true h.FirstBlock.DecompressedSize = cSize @@ -225,5 +237,25 @@ func (h *Header) Decode(in []byte) error { } h.FirstBlock.OK = true - return nil + return in, nil +} + +// AppendTo will append the encoded header to the dst slice. +// There is no error checking performed on the header values. +func (h *Header) AppendTo(dst []byte) ([]byte, error) { + if h.Skippable { + magic := [4]byte{0x50, 0x2a, 0x4d, 0x18} + magic[0] |= byte(h.SkippableID & 0xf) + dst = append(dst, magic[:]...) + f := h.SkippableSize + return append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)), nil + } + f := frameHeader{ + ContentSize: h.FrameContentSize, + WindowSize: uint32(h.WindowSize), + SingleSegment: h.SingleSegment, + Checksum: h.HasCheckSum, + DictID: h.DictionaryID, + } + return f.appendTo(dst), nil } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go index c81a1535..87f42879 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -201,14 +201,6 @@ encodeLoop: if delta >= e.maxMatchOff || delta <= 0 || load3232(src, offset) != first { return } - if debugAsserts { - if offset >= s { - panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff)) - } - if !bytes.Equal(src[s:s+4], src[offset:offset+4]) { - panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) - } - } // Try to quick reject if we already have a long match. if m.length > 16 { left := len(src) - int(m.s+m.length) @@ -227,8 +219,10 @@ encodeLoop: } } l := 4 + e.matchlen(s+4, offset+4, src) - if true { + if m.rep <= 0 { // Extend candidate match backwards as far as possible. + // Do not extend repeats as we can assume they are optimal + // and offsets change if s == nextEmit. tMin := s - e.maxMatchOff if tMin < 0 { tMin = 0 @@ -239,7 +233,14 @@ encodeLoop: l++ } } - + if debugAsserts { + if offset >= s { + panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff)) + } + if !bytes.Equal(src[s:s+l], src[offset:offset+l]) { + panic(fmt.Sprintf("second match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) + } + } cand := match{offset: offset, s: s, length: l, rep: rep} cand.estBits(bitsPerByte) if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 { @@ -336,24 +337,31 @@ encodeLoop: } if debugAsserts { + if best.offset >= best.s { + panic(fmt.Sprintf("best.offset > s: %d >= %d", best.offset, best.s)) + } + if best.s < nextEmit { + panic(fmt.Sprintf("s %d < nextEmit %d", best.s, nextEmit)) + } + if best.offset < s-e.maxMatchOff { + panic(fmt.Sprintf("best.offset < s-e.maxMatchOff: %d < %d", best.offset, s-e.maxMatchOff)) + } if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) { panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length])) } } // We have a match, we can store the forward value + s = best.s if best.rep > 0 { var seq seq seq.matchLen = uint32(best.length - zstdMinMatch) - if debugAsserts && s < nextEmit { - panic("s < nextEmit") - } addLiterals(&seq, best.s) // Repeat. If bit 4 is set, this is a non-lit repeat. seq.offset = uint32(best.rep & 3) if debugSequences { - println("repeat sequence", seq, "next s:", s) + println("repeat sequence", seq, "next s:", best.s, "off:", best.s-best.offset) } blk.sequences = append(blk.sequences, seq) @@ -396,7 +404,6 @@ encodeLoop: // A 4-byte match has been found. Update recent offsets. // We'll later see if more than 4 bytes. - s = best.s t := best.offset offset1, offset2, offset3 = s-t, offset1, offset2 diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index faaf8192..20671dcb 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -94,7 +94,7 @@ func WithEncoderConcurrency(n int) EOption { // The value must be a power of two between MinWindowSize and MaxWindowSize. // A larger value will enable better compression but allocate more memory and, // for above-default values, take considerably longer. -// The default value is determined by the compression level. +// The default value is determined by the compression level and max 8MB. func WithWindowSize(n int) EOption { return func(o *encoderOptions) error { switch { @@ -232,9 +232,9 @@ func WithEncoderLevel(l EncoderLevel) EOption { case SpeedDefault: o.windowSize = 8 << 20 case SpeedBetterCompression: - o.windowSize = 16 << 20 + o.windowSize = 8 << 20 case SpeedBestCompression: - o.windowSize = 32 << 20 + o.windowSize = 8 << 20 } } if !o.customALEntropy { diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go index 2f5d5ed4..667ca067 100644 --- a/vendor/github.com/klauspost/compress/zstd/frameenc.go +++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go @@ -76,7 +76,7 @@ func (f frameHeader) appendTo(dst []byte) []byte { if f.SingleSegment { dst = append(dst, uint8(f.ContentSize)) } - // Unless SingleSegment is set, framessizes < 256 are nto stored. + // Unless SingleSegment is set, framessizes < 256 are not stored. case 1: f.ContentSize -= 256 dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8)) diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go index 332e51fe..8adfebb0 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go @@ -20,10 +20,9 @@ func (s *fseDecoder) buildDtable() error { if v == -1 { s.dt[highThreshold].setAddBits(uint8(i)) highThreshold-- - symbolNext[i] = 1 - } else { - symbolNext[i] = uint16(v) + v = 1 } + symbolNext[i] = uint16(v) } } @@ -35,10 +34,12 @@ func (s *fseDecoder) buildDtable() error { for ss, v := range s.norm[:s.symbolLen] { for i := 0; i < int(v); i++ { s.dt[position].setAddBits(uint8(ss)) - position = (position + step) & tableMask - for position > highThreshold { + for { // lowprob area position = (position + step) & tableMask + if position <= highThreshold { + break + } } } } diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s index 974b9972..5b06174b 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -157,8 +157,7 @@ sequenceDecs_decode_amd64_ll_update_zero: // Update Literal Length State MOVBQZX DI, R14 - SHRQ $0x10, DI - MOVWQZX DI, DI + SHRL $0x10, DI LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -177,8 +176,7 @@ sequenceDecs_decode_amd64_ll_update_zero: // Update Match Length State MOVBQZX R8, R14 - SHRQ $0x10, R8 - MOVWQZX R8, R8 + SHRL $0x10, R8 LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -197,8 +195,7 @@ sequenceDecs_decode_amd64_ll_update_zero: // Update Offset State MOVBQZX R9, R14 - SHRQ $0x10, R9 - MOVWQZX R9, R9 + SHRL $0x10, R9 LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -459,8 +456,7 @@ sequenceDecs_decode_56_amd64_ll_update_zero: // Update Literal Length State MOVBQZX DI, R14 - SHRQ $0x10, DI - MOVWQZX DI, DI + SHRL $0x10, DI LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -479,8 +475,7 @@ sequenceDecs_decode_56_amd64_ll_update_zero: // Update Match Length State MOVBQZX R8, R14 - SHRQ $0x10, R8 - MOVWQZX R8, R8 + SHRL $0x10, R8 LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -499,8 +494,7 @@ sequenceDecs_decode_56_amd64_ll_update_zero: // Update Offset State MOVBQZX R9, R14 - SHRQ $0x10, R9 - MOVWQZX R9, R9 + SHRL $0x10, R9 LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -772,11 +766,10 @@ sequenceDecs_decode_bmi2_fill_2_end: BZHIQ R14, R15, R15 // Update Offset State - BZHIQ R8, R15, CX - SHRXQ R8, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, R8, R8 - ADDQ CX, R8 + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + SHRL $0x10, R8 + ADDQ CX, R8 // Load ctx.ofTable MOVQ ctx+16(FP), CX @@ -784,11 +777,10 @@ sequenceDecs_decode_bmi2_fill_2_end: MOVQ (CX)(R8*8), R8 // Update Match Length State - BZHIQ DI, R15, CX - SHRXQ DI, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, DI, DI - ADDQ CX, DI + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + SHRL $0x10, DI + ADDQ CX, DI // Load ctx.mlTable MOVQ ctx+16(FP), CX @@ -796,10 +788,9 @@ sequenceDecs_decode_bmi2_fill_2_end: MOVQ (CX)(DI*8), DI // Update Literal Length State - BZHIQ SI, R15, CX - MOVQ $0x00001010, R14 - BEXTRQ R14, SI, SI - ADDQ CX, SI + BZHIQ SI, R15, CX + SHRL $0x10, SI + ADDQ CX, SI // Load ctx.llTable MOVQ ctx+16(FP), CX @@ -1032,11 +1023,10 @@ sequenceDecs_decode_56_bmi2_fill_end: BZHIQ R14, R15, R15 // Update Offset State - BZHIQ R8, R15, CX - SHRXQ R8, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, R8, R8 - ADDQ CX, R8 + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + SHRL $0x10, R8 + ADDQ CX, R8 // Load ctx.ofTable MOVQ ctx+16(FP), CX @@ -1044,11 +1034,10 @@ sequenceDecs_decode_56_bmi2_fill_end: MOVQ (CX)(R8*8), R8 // Update Match Length State - BZHIQ DI, R15, CX - SHRXQ DI, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, DI, DI - ADDQ CX, DI + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + SHRL $0x10, DI + ADDQ CX, DI // Load ctx.mlTable MOVQ ctx+16(FP), CX @@ -1056,10 +1045,9 @@ sequenceDecs_decode_56_bmi2_fill_end: MOVQ (CX)(DI*8), DI // Update Literal Length State - BZHIQ SI, R15, CX - MOVQ $0x00001010, R14 - BEXTRQ R14, SI, SI - ADDQ CX, SI + BZHIQ SI, R15, CX + SHRL $0x10, SI + ADDQ CX, SI // Load ctx.llTable MOVQ ctx+16(FP), CX @@ -1967,8 +1955,7 @@ sequenceDecs_decodeSync_amd64_ll_update_zero: // Update Literal Length State MOVBQZX DI, R13 - SHRQ $0x10, DI - MOVWQZX DI, DI + SHRL $0x10, DI LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -1987,8 +1974,7 @@ sequenceDecs_decodeSync_amd64_ll_update_zero: // Update Match Length State MOVBQZX R8, R13 - SHRQ $0x10, R8 - MOVWQZX R8, R8 + SHRL $0x10, R8 LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -2007,8 +1993,7 @@ sequenceDecs_decodeSync_amd64_ll_update_zero: // Update Offset State MOVBQZX R9, R13 - SHRQ $0x10, R9 - MOVWQZX R9, R9 + SHRL $0x10, R9 LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -2514,11 +2499,10 @@ sequenceDecs_decodeSync_bmi2_fill_2_end: BZHIQ R13, R14, R14 // Update Offset State - BZHIQ R8, R14, CX - SHRXQ R8, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, R8, R8 - ADDQ CX, R8 + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + SHRL $0x10, R8 + ADDQ CX, R8 // Load ctx.ofTable MOVQ ctx+16(FP), CX @@ -2526,11 +2510,10 @@ sequenceDecs_decodeSync_bmi2_fill_2_end: MOVQ (CX)(R8*8), R8 // Update Match Length State - BZHIQ DI, R14, CX - SHRXQ DI, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, DI, DI - ADDQ CX, DI + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + SHRL $0x10, DI + ADDQ CX, DI // Load ctx.mlTable MOVQ ctx+16(FP), CX @@ -2538,10 +2521,9 @@ sequenceDecs_decodeSync_bmi2_fill_2_end: MOVQ (CX)(DI*8), DI // Update Literal Length State - BZHIQ SI, R14, CX - MOVQ $0x00001010, R13 - BEXTRQ R13, SI, SI - ADDQ CX, SI + BZHIQ SI, R14, CX + SHRL $0x10, SI + ADDQ CX, SI // Load ctx.llTable MOVQ ctx+16(FP), CX @@ -3055,8 +3037,7 @@ sequenceDecs_decodeSync_safe_amd64_ll_update_zero: // Update Literal Length State MOVBQZX DI, R13 - SHRQ $0x10, DI - MOVWQZX DI, DI + SHRL $0x10, DI LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -3075,8 +3056,7 @@ sequenceDecs_decodeSync_safe_amd64_ll_update_zero: // Update Match Length State MOVBQZX R8, R13 - SHRQ $0x10, R8 - MOVWQZX R8, R8 + SHRL $0x10, R8 LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -3095,8 +3075,7 @@ sequenceDecs_decodeSync_safe_amd64_ll_update_zero: // Update Offset State MOVBQZX R9, R13 - SHRQ $0x10, R9 - MOVWQZX R9, R9 + SHRL $0x10, R9 LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -3704,11 +3683,10 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_end: BZHIQ R13, R14, R14 // Update Offset State - BZHIQ R8, R14, CX - SHRXQ R8, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, R8, R8 - ADDQ CX, R8 + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + SHRL $0x10, R8 + ADDQ CX, R8 // Load ctx.ofTable MOVQ ctx+16(FP), CX @@ -3716,11 +3694,10 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_end: MOVQ (CX)(R8*8), R8 // Update Match Length State - BZHIQ DI, R14, CX - SHRXQ DI, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, DI, DI - ADDQ CX, DI + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + SHRL $0x10, DI + ADDQ CX, DI // Load ctx.mlTable MOVQ ctx+16(FP), CX @@ -3728,10 +3705,9 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_end: MOVQ (CX)(DI*8), DI // Update Literal Length State - BZHIQ SI, R14, CX - MOVQ $0x00001010, R13 - BEXTRQ R13, SI, SI - ADDQ CX, SI + BZHIQ SI, R14, CX + SHRL $0x10, SI + ADDQ CX, SI // Load ctx.llTable MOVQ ctx+16(FP), CX diff --git a/vendor/github.com/letsencrypt/boulder/core/objects.go b/vendor/github.com/letsencrypt/boulder/core/objects.go index 2afbfbe9..b52f0f5e 100644 --- a/vendor/github.com/letsencrypt/boulder/core/objects.go +++ b/vendor/github.com/letsencrypt/boulder/core/objects.go @@ -75,11 +75,16 @@ type OCSPStatus string const ( OCSPStatusGood = OCSPStatus("good") OCSPStatusRevoked = OCSPStatus("revoked") + // Not a real OCSP status. This is a placeholder we write before the + // actual precertificate is issued, to ensure we never return "good" before + // issuance succeeds, for BR compliance reasons. + OCSPStatusNotReady = OCSPStatus("wait") ) var OCSPStatusToInt = map[OCSPStatus]int{ - OCSPStatusGood: ocsp.Good, - OCSPStatusRevoked: ocsp.Revoked, + OCSPStatusGood: ocsp.Good, + OCSPStatusRevoked: ocsp.Revoked, + OCSPStatusNotReady: -1, } // DNSPrefix is attached to DNS names in DNS challenges @@ -120,7 +125,7 @@ type ValidationRecord struct { URL string `json:"url,omitempty"` // Shared - Hostname string `json:"hostname"` + Hostname string `json:"hostname,omitempty"` Port string `json:"port,omitempty"` AddressesResolved []net.IP `json:"addressesResolved,omitempty"` AddressUsed net.IP `json:"addressUsed,omitempty"` @@ -337,11 +342,18 @@ type Authorization struct { // slice and the order of these challenges may not be predictable. Challenges []Challenge `json:"challenges,omitempty" db:"-"` - // Wildcard is a Boulder-specific Authorization field that indicates the - // authorization was created as a result of an order containing a name with - // a `*.`wildcard prefix. This will help convey to users that an - // Authorization with the identifier `example.com` and one DNS-01 challenge - // corresponds to a name `*.example.com` from an associated order. + // https://datatracker.ietf.org/doc/html/rfc8555#page-29 + // + // wildcard (optional, boolean): This field MUST be present and true + // for authorizations created as a result of a newOrder request + // containing a DNS identifier with a value that was a wildcard + // domain name. For other authorizations, it MUST be absent. + // Wildcard domain names are described in Section 7.1.3. + // + // This is not represented in the database because we calculate it from + // the identifier stored in the database. Unlike the identifier returned + // as part of the authorization, the identifier we store in the database + // can contain an asterisk. Wildcard bool `json:"wildcard,omitempty" db:"-"` } @@ -406,53 +418,46 @@ type Certificate struct { } // CertificateStatus structs are internal to the server. They represent the -// latest data about the status of the certificate, required for OCSP updating -// and for validating that the subscriber has accepted the certificate. +// latest data about the status of the certificate, required for generating new +// OCSP responses and determining if a certificate has been revoked. type CertificateStatus struct { ID int64 `db:"id"` Serial string `db:"serial"` // status: 'good' or 'revoked'. Note that good, expired certificates remain - // with status 'good' but don't necessarily get fresh OCSP responses. + // with status 'good' but don't necessarily get fresh OCSP responses. Status OCSPStatus `db:"status"` // ocspLastUpdated: The date and time of the last time we generated an OCSP - // response. If we have never generated one, this has the zero value of - // time.Time, i.e. Jan 1 1970. + // response. If we have never generated one, this has the zero value of + // time.Time, i.e. Jan 1 1970. OCSPLastUpdated time.Time `db:"ocspLastUpdated"` // revokedDate: If status is 'revoked', this is the date and time it was - // revoked. Otherwise it has the zero value of time.Time, i.e. Jan 1 1970. + // revoked. Otherwise it has the zero value of time.Time, i.e. Jan 1 1970. RevokedDate time.Time `db:"revokedDate"` // revokedReason: If status is 'revoked', this is the reason code for the - // revocation. Otherwise it is zero (which happens to be the reason - // code for 'unspecified'). + // revocation. Otherwise it is zero (which happens to be the reason + // code for 'unspecified'). RevokedReason revocation.Reason `db:"revokedReason"` LastExpirationNagSent time.Time `db:"lastExpirationNagSent"` - // The encoded and signed OCSP response. - OCSPResponse []byte `db:"ocspResponse"` - - // For performance reasons[0] we duplicate the `Expires` field of the - // `Certificates` object/table in `CertificateStatus` to avoid a costly `JOIN` - // later on just to retrieve this `Time` value. This helps both the OCSP - // updater and the expiration-mailer stay performant. - // - // Similarly, we add an explicit `IsExpired` boolean to `CertificateStatus` - // table that the OCSP updater so that the database can create a meaningful - // index on `(isExpired, ocspLastUpdated)` without a `JOIN` on `certificates`. - // For more detail see Boulder #1864[0]. - // - // [0]: https://github.com/letsencrypt/boulder/issues/1864 + // NotAfter and IsExpired are convenience columns which allow expensive + // queries to quickly filter out certificates that we don't need to care about + // anymore. These are particularly useful for the expiration mailer and CRL + // updater. See https://github.com/letsencrypt/boulder/issues/1864. NotAfter time.Time `db:"notAfter"` IsExpired bool `db:"isExpired"` - // TODO(#5152): Change this to an issuance.Issuer(Name)ID after it no longer - // has to support both IssuerNameIDs and IssuerIDs. - IssuerID int64 + // Note: this is not an issuance.IssuerNameID because that would create an + // import cycle between core and issuance. + // Note2: This field used to be called `issuerID`. We keep the old name in + // the DB, but update the Go field name to be clear which type of ID this + // is. + IssuerNameID int64 `db:"issuerID"` } // FQDNSet contains the SHA256 hash of the lowercased, comma joined dNSNames @@ -501,7 +506,7 @@ func RenewalInfoSimple(issued time.Time, expires time.Time) RenewalInfo { } // RenewalInfoImmediate constructs a `RenewalInfo` object with a suggested -// window in the past. Per the draft-ietf-acme-ari-00 spec, clients should +// window in the past. Per the draft-ietf-acme-ari-01 spec, clients should // attempt to renew immediately if the suggested window is in the past. The // passed `now` is assumed to be a timestamp representing the current moment in // time. diff --git a/vendor/github.com/letsencrypt/boulder/core/util.go b/vendor/github.com/letsencrypt/boulder/core/util.go index 6949e456..d7fe0266 100644 --- a/vendor/github.com/letsencrypt/boulder/core/util.go +++ b/vendor/github.com/letsencrypt/boulder/core/util.go @@ -1,9 +1,10 @@ package core import ( - "bytes" "crypto" + "crypto/ecdsa" "crypto/rand" + "crypto/rsa" "crypto/sha256" "crypto/x509" "encoding/base64" @@ -16,6 +17,7 @@ import ( "math/big" mrand "math/rand" "os" + "path" "reflect" "regexp" "sort" @@ -23,7 +25,7 @@ import ( "time" "unicode" - jose "gopkg.in/go-jose/go-jose.v2" + "gopkg.in/go-jose/go-jose.v2" ) const Unspecified = "Unspecified" @@ -96,7 +98,7 @@ func KeyDigest(key crypto.PublicKey) (Sha256Digest, error) { switch t := key.(type) { case *jose.JSONWebKey: if t == nil { - return Sha256Digest{}, fmt.Errorf("Cannot compute digest of nil key") + return Sha256Digest{}, errors.New("cannot compute digest of nil key") } return KeyDigest(t.Key) case jose.JSONWebKey: @@ -132,21 +134,16 @@ func KeyDigestEquals(j, k crypto.PublicKey) bool { return digestJ == digestK } -// PublicKeysEqual determines whether two public keys have the same marshalled -// bytes as one another -func PublicKeysEqual(a, b interface{}) (bool, error) { - if a == nil || b == nil { - return false, errors.New("One or more nil arguments to PublicKeysEqual") - } - aBytes, err := x509.MarshalPKIXPublicKey(a) - if err != nil { - return false, err - } - bBytes, err := x509.MarshalPKIXPublicKey(b) - if err != nil { - return false, err +// PublicKeysEqual determines whether two public keys are identical. +func PublicKeysEqual(a, b crypto.PublicKey) (bool, error) { + switch ak := a.(type) { + case *rsa.PublicKey: + return ak.Equal(b), nil + case *ecdsa.PublicKey: + return ak.Equal(b), nil + default: + return false, fmt.Errorf("unsupported public key type %T", ak) } - return bytes.Equal(aBytes, bBytes), nil } // SerialToString converts a certificate serial number (big.Int) to a String @@ -160,7 +157,7 @@ func SerialToString(serial *big.Int) string { func StringToSerial(serial string) (*big.Int, error) { var serialNum big.Int if !ValidSerial(serial) { - return &serialNum, errors.New("Invalid serial number") + return &serialNum, fmt.Errorf("invalid serial number %q", serial) } _, err := fmt.Sscanf(serial, "%036x", &serialNum) return &serialNum, err @@ -245,6 +242,14 @@ func UniqueLowerNames(names []string) (unique []string) { return } +// HashNames returns a hash of the names requested. This is intended for use +// when interacting with the orderFqdnSets table and rate limiting. +func HashNames(names []string) []byte { + names = UniqueLowerNames(names) + hash := sha256.Sum256([]byte(strings.Join(names, ","))) + return hash[:] +} + // LoadCert loads a PEM certificate specified by filename or returns an error func LoadCert(filename string) (*x509.Certificate, error) { certPEM, err := os.ReadFile(filename) @@ -253,7 +258,7 @@ func LoadCert(filename string) (*x509.Certificate, error) { } block, _ := pem.Decode(certPEM) if block == nil { - return nil, fmt.Errorf("No data in cert PEM file %s", filename) + return nil, fmt.Errorf("no data in cert PEM file %q", filename) } cert, err := x509.ParseCertificate(block.Bytes) if err != nil { @@ -298,3 +303,7 @@ func IsASCII(str string) bool { } return true } + +func Command() string { + return path.Base(os.Args[0]) +} diff --git a/vendor/github.com/letsencrypt/boulder/errors/errors.go b/vendor/github.com/letsencrypt/boulder/errors/errors.go deleted file mode 100644 index 50be1087..00000000 --- a/vendor/github.com/letsencrypt/boulder/errors/errors.go +++ /dev/null @@ -1,194 +0,0 @@ -// Package errors provides internal-facing error types for use in Boulder. Many -// of these are transformed directly into Problem Details documents by the WFE. -// Some, like NotFound, may be handled internally. We avoid using Problem -// Details documents as part of our internal error system to avoid layering -// confusions. -// -// These errors are specifically for use in errors that cross RPC boundaries. -// An error type that does not need to be passed through an RPC can use a plain -// Go type locally. Our gRPC code is aware of these error types and will -// serialize and deserialize them automatically. -package errors - -import ( - "fmt" - "time" - - "github.com/letsencrypt/boulder/identifier" -) - -// ErrorType provides a coarse category for BoulderErrors. -// Objects of type ErrorType should never be directly returned by other -// functions; instead use the methods below to create an appropriate -// BoulderError wrapping one of these types. -type ErrorType int - -// These numeric constants are used when sending berrors through gRPC. -const ( - // InternalServer is deprecated. Instead, pass a plain Go error. That will get - // turned into a probs.InternalServerError by the WFE. - InternalServer ErrorType = iota - _ - Malformed - Unauthorized - NotFound - RateLimit - RejectedIdentifier - InvalidEmail - ConnectionFailure - _ // Reserved, previously WrongAuthorizationState - CAA - MissingSCTs - Duplicate - OrderNotReady - DNS - BadPublicKey - BadCSR - AlreadyRevoked - BadRevocationReason -) - -func (ErrorType) Error() string { - return "urn:ietf:params:acme:error" -} - -// BoulderError represents internal Boulder errors -type BoulderError struct { - Type ErrorType - Detail string - SubErrors []SubBoulderError - - // RetryAfter the duration a client should wait before retrying the request - // which resulted in this error. - RetryAfter time.Duration -} - -// SubBoulderError represents sub-errors specific to an identifier that are -// related to a top-level internal Boulder error. -type SubBoulderError struct { - *BoulderError - Identifier identifier.ACMEIdentifier -} - -func (be *BoulderError) Error() string { - return be.Detail -} - -func (be *BoulderError) Unwrap() error { - return be.Type -} - -// WithSubErrors returns a new BoulderError instance created by adding the -// provided subErrs to the existing BoulderError. -func (be *BoulderError) WithSubErrors(subErrs []SubBoulderError) *BoulderError { - return &BoulderError{ - Type: be.Type, - Detail: be.Detail, - SubErrors: append(be.SubErrors, subErrs...), - RetryAfter: be.RetryAfter, - } -} - -// New is a convenience function for creating a new BoulderError -func New(errType ErrorType, msg string, args ...interface{}) error { - return &BoulderError{ - Type: errType, - Detail: fmt.Sprintf(msg, args...), - } -} - -func InternalServerError(msg string, args ...interface{}) error { - return New(InternalServer, msg, args...) -} - -func MalformedError(msg string, args ...interface{}) error { - return New(Malformed, msg, args...) -} - -func UnauthorizedError(msg string, args ...interface{}) error { - return New(Unauthorized, msg, args...) -} - -func NotFoundError(msg string, args ...interface{}) error { - return New(NotFound, msg, args...) -} - -func RateLimitError(retryAfter time.Duration, msg string, args ...interface{}) error { - return &BoulderError{ - Type: RateLimit, - Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/rate-limits/", args...), - RetryAfter: retryAfter, - } -} - -func DuplicateCertificateError(retryAfter time.Duration, msg string, args ...interface{}) error { - return &BoulderError{ - Type: RateLimit, - Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/duplicate-certificate-limit/", args...), - RetryAfter: retryAfter, - } -} - -func FailedValidationError(retryAfter time.Duration, msg string, args ...interface{}) error { - return &BoulderError{ - Type: RateLimit, - Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/failed-validation-limit/", args...), - RetryAfter: retryAfter, - } -} - -func RegistrationsPerIPError(retryAfter time.Duration, msg string, args ...interface{}) error { - return &BoulderError{ - Type: RateLimit, - Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/too-many-registrations-for-this-ip/", args...), - RetryAfter: retryAfter, - } -} - -func RejectedIdentifierError(msg string, args ...interface{}) error { - return New(RejectedIdentifier, msg, args...) -} - -func InvalidEmailError(msg string, args ...interface{}) error { - return New(InvalidEmail, msg, args...) -} - -func ConnectionFailureError(msg string, args ...interface{}) error { - return New(ConnectionFailure, msg, args...) -} - -func CAAError(msg string, args ...interface{}) error { - return New(CAA, msg, args...) -} - -func MissingSCTsError(msg string, args ...interface{}) error { - return New(MissingSCTs, msg, args...) -} - -func DuplicateError(msg string, args ...interface{}) error { - return New(Duplicate, msg, args...) -} - -func OrderNotReadyError(msg string, args ...interface{}) error { - return New(OrderNotReady, msg, args...) -} - -func DNSError(msg string, args ...interface{}) error { - return New(DNS, msg, args...) -} - -func BadPublicKeyError(msg string, args ...interface{}) error { - return New(BadPublicKey, msg, args...) -} - -func BadCSRError(msg string, args ...interface{}) error { - return New(BadCSR, msg, args...) -} - -func AlreadyRevokedError(msg string, args ...interface{}) error { - return New(AlreadyRevoked, msg, args...) -} - -func BadRevocationReasonError(reason int64) error { - return New(BadRevocationReason, "disallowed revocation reason: %d", reason) -} diff --git a/vendor/github.com/letsencrypt/boulder/goodkey/blocked.go b/vendor/github.com/letsencrypt/boulder/goodkey/blocked.go index fdcfe9a1..198c09db 100644 --- a/vendor/github.com/letsencrypt/boulder/goodkey/blocked.go +++ b/vendor/github.com/letsencrypt/boulder/goodkey/blocked.go @@ -9,8 +9,7 @@ import ( "os" "github.com/letsencrypt/boulder/core" - - yaml "gopkg.in/yaml.v3" + "github.com/letsencrypt/boulder/strictyaml" ) // blockedKeys is a type for maintaining a map of SHA256 hashes @@ -58,7 +57,7 @@ func loadBlockedKeysList(filename string) (*blockedKeys, error) { BlockedHashes []string `yaml:"blocked"` BlockedHashesHex []string `yaml:"blockedHashesHex"` } - err = yaml.Unmarshal(yamlBytes, &list) + err = strictyaml.Unmarshal(yamlBytes, &list) if err != nil { return nil, err } diff --git a/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go b/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go index 741bbf26..087a0181 100644 --- a/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go +++ b/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go @@ -12,7 +12,6 @@ import ( "sync" "github.com/letsencrypt/boulder/core" - berrors "github.com/letsencrypt/boulder/errors" "github.com/titanous/rocacheck" ) @@ -136,7 +135,7 @@ func (policy *KeyPolicy) GoodKey(ctx context.Context, key crypto.PublicKey) erro // that has been administratively blocked. if policy.blockedList != nil { if blocked, err := policy.blockedList.blocked(key); err != nil { - return berrors.InternalServerError("error checking blocklist for key: %v", key) + return fmt.Errorf("error checking blocklist for key: %v", key) } else if blocked { return badKey("public key is forbidden") } diff --git a/vendor/github.com/letsencrypt/boulder/probs/probs.go b/vendor/github.com/letsencrypt/boulder/probs/probs.go index 3736e8d3..2cc76623 100644 --- a/vendor/github.com/letsencrypt/boulder/probs/probs.go +++ b/vendor/github.com/letsencrypt/boulder/probs/probs.go @@ -7,29 +7,33 @@ import ( "github.com/letsencrypt/boulder/identifier" ) -// Error types that can be used in ACME payloads const ( + // Error types that can be used in ACME payloads. These are sorted in the + // same order as they are defined in RFC8555 Section 6.7. We do not implement + // the `compound`, `externalAccountRequired`, or `userActionRequired` errors, + // because we have no path that would return them. + AccountDoesNotExistProblem = ProblemType("accountDoesNotExist") + AlreadyRevokedProblem = ProblemType("alreadyRevoked") + BadCSRProblem = ProblemType("badCSR") + BadNonceProblem = ProblemType("badNonce") + BadPublicKeyProblem = ProblemType("badPublicKey") + BadRevocationReasonProblem = ProblemType("badRevocationReason") + BadSignatureAlgorithmProblem = ProblemType("badSignatureAlgorithm") + CAAProblem = ProblemType("caa") ConnectionProblem = ProblemType("connection") + DNSProblem = ProblemType("dns") + InvalidContactProblem = ProblemType("invalidContact") MalformedProblem = ProblemType("malformed") + OrderNotReadyProblem = ProblemType("orderNotReady") + RateLimitedProblem = ProblemType("rateLimited") + RejectedIdentifierProblem = ProblemType("rejectedIdentifier") ServerInternalProblem = ProblemType("serverInternal") TLSProblem = ProblemType("tls") UnauthorizedProblem = ProblemType("unauthorized") - RateLimitedProblem = ProblemType("rateLimited") - BadNonceProblem = ProblemType("badNonce") - InvalidEmailProblem = ProblemType("invalidEmail") - RejectedIdentifierProblem = ProblemType("rejectedIdentifier") - AccountDoesNotExistProblem = ProblemType("accountDoesNotExist") - CAAProblem = ProblemType("caa") - DNSProblem = ProblemType("dns") - AlreadyRevokedProblem = ProblemType("alreadyRevoked") - OrderNotReadyProblem = ProblemType("orderNotReady") - BadSignatureAlgorithmProblem = ProblemType("badSignatureAlgorithm") - BadPublicKeyProblem = ProblemType("badPublicKey") - BadRevocationReasonProblem = ProblemType("badRevocationReason") - BadCSRProblem = ProblemType("badCSR") + UnsupportedContactProblem = ProblemType("unsupportedContact") + UnsupportedIdentifierProblem = ProblemType("unsupportedIdentifier") - V1ErrorNS = "urn:acme:error:" - V2ErrorNS = "urn:ietf:params:acme:error:" + ErrorNS = "urn:ietf:params:acme:error:" ) // ProblemType defines the error types in the ACME protocol @@ -71,112 +75,71 @@ func (pd *ProblemDetails) WithSubProblems(subProbs []SubProblemDetails) *Problem } } -// statusTooManyRequests is the HTTP status code meant for rate limiting -// errors. It's not currently in the net/http library so we add it here. -const statusTooManyRequests = 429 +// Helper functions which construct the basic RFC8555 Problem Documents, with +// the Type already set and the Details supplied by the caller. -// ProblemDetailsToStatusCode inspects the given ProblemDetails to figure out -// what HTTP status code it should represent. It should only be used by the WFE -// but is included in this package because of its reliance on ProblemTypes. -func ProblemDetailsToStatusCode(prob *ProblemDetails) int { - if prob.HTTPStatus != 0 { - return prob.HTTPStatus - } - switch prob.Type { - case - ConnectionProblem, - MalformedProblem, - BadSignatureAlgorithmProblem, - BadPublicKeyProblem, - TLSProblem, - BadNonceProblem, - InvalidEmailProblem, - RejectedIdentifierProblem, - AccountDoesNotExistProblem, - BadRevocationReasonProblem: - return http.StatusBadRequest - case ServerInternalProblem: - return http.StatusInternalServerError - case - UnauthorizedProblem, - CAAProblem: - return http.StatusForbidden - case RateLimitedProblem: - return statusTooManyRequests - default: - return http.StatusInternalServerError - } -} - -// BadNonce returns a ProblemDetails with a BadNonceProblem and a 400 Bad -// Request status code. -func BadNonce(detail string) *ProblemDetails { +// AccountDoesNotExist returns a ProblemDetails representing an +// AccountDoesNotExistProblem error +func AccountDoesNotExist(detail string) *ProblemDetails { return &ProblemDetails{ - Type: BadNonceProblem, + Type: AccountDoesNotExistProblem, Detail: detail, HTTPStatus: http.StatusBadRequest, } } -// RejectedIdentifier returns a ProblemDetails with a RejectedIdentifierProblem and a 400 Bad +// AlreadyRevoked returns a ProblemDetails with a AlreadyRevokedProblem and a 400 Bad // Request status code. -func RejectedIdentifier(detail string) *ProblemDetails { +func AlreadyRevoked(detail string, a ...any) *ProblemDetails { return &ProblemDetails{ - Type: RejectedIdentifierProblem, - Detail: detail, + Type: AlreadyRevokedProblem, + Detail: fmt.Sprintf(detail, a...), HTTPStatus: http.StatusBadRequest, } } -// Conflict returns a ProblemDetails with a MalformedProblem and a 409 Conflict -// status code. -func Conflict(detail string) *ProblemDetails { +// BadCSR returns a ProblemDetails representing a BadCSRProblem. +func BadCSR(detail string, a ...any) *ProblemDetails { return &ProblemDetails{ - Type: MalformedProblem, - Detail: detail, - HTTPStatus: http.StatusConflict, + Type: BadCSRProblem, + Detail: fmt.Sprintf(detail, a...), + HTTPStatus: http.StatusBadRequest, } } -// AlreadyRevoked returns a ProblemDetails with a AlreadyRevokedProblem and a 400 Bad +// BadNonce returns a ProblemDetails with a BadNonceProblem and a 400 Bad // Request status code. -func AlreadyRevoked(detail string, a ...interface{}) *ProblemDetails { +func BadNonce(detail string) *ProblemDetails { return &ProblemDetails{ - Type: AlreadyRevokedProblem, - Detail: fmt.Sprintf(detail, a...), + Type: BadNonceProblem, + Detail: detail, HTTPStatus: http.StatusBadRequest, } } -// Malformed returns a ProblemDetails with a MalformedProblem and a 400 Bad +// BadPublicKey returns a ProblemDetails with a BadPublicKeyProblem and a 400 Bad // Request status code. -func Malformed(detail string, args ...interface{}) *ProblemDetails { - if len(args) > 0 { - detail = fmt.Sprintf(detail, args...) - } +func BadPublicKey(detail string, a ...any) *ProblemDetails { return &ProblemDetails{ - Type: MalformedProblem, - Detail: detail, + Type: BadPublicKeyProblem, + Detail: fmt.Sprintf(detail, a...), HTTPStatus: http.StatusBadRequest, } } -// Canceled returns a ProblemDetails with a MalformedProblem and a 408 Request -// Timeout status code. -func Canceled(detail string, args ...interface{}) *ProblemDetails { - if len(args) > 0 { - detail = fmt.Sprintf(detail, args...) - } +// BadRevocationReason returns a ProblemDetails representing +// a BadRevocationReasonProblem +func BadRevocationReason(detail string, a ...any) *ProblemDetails { return &ProblemDetails{ - Type: MalformedProblem, - Detail: detail, - HTTPStatus: http.StatusRequestTimeout, + Type: BadRevocationReasonProblem, + Detail: fmt.Sprintf(detail, a...), + HTTPStatus: http.StatusBadRequest, } } // BadSignatureAlgorithm returns a ProblemDetails with a BadSignatureAlgorithmProblem // and a 400 Bad Request status code. -func BadSignatureAlgorithm(detail string, a ...interface{}) *ProblemDetails { +func BadSignatureAlgorithm(detail string, a ...any) *ProblemDetails { return &ProblemDetails{ Type: BadSignatureAlgorithmProblem, Detail: fmt.Sprintf(detail, a...), @@ -184,166 +147,195 @@ func BadSignatureAlgorithm(detail string, a ...interface{}) *ProblemDetails { } } -// BadPublicKey returns a ProblemDetails with a BadPublicKeyProblem and a 400 Bad -// Request status code. -func BadPublicKey(detail string, a ...interface{}) *ProblemDetails { +// CAA returns a ProblemDetails representing a CAAProblem +func CAA(detail string) *ProblemDetails { return &ProblemDetails{ - Type: BadPublicKeyProblem, - Detail: fmt.Sprintf(detail, a...), - HTTPStatus: http.StatusBadRequest, + Type: CAAProblem, + Detail: detail, + HTTPStatus: http.StatusForbidden, } } -// NotFound returns a ProblemDetails with a MalformedProblem and a 404 Not Found -// status code. -func NotFound(detail string) *ProblemDetails { +// Connection returns a ProblemDetails representing a ConnectionProblem +// error +func Connection(detail string) *ProblemDetails { return &ProblemDetails{ - Type: MalformedProblem, + Type: ConnectionProblem, Detail: detail, - HTTPStatus: http.StatusNotFound, + HTTPStatus: http.StatusBadRequest, } } -// ServerInternal returns a ProblemDetails with a ServerInternalProblem and a -// 500 Internal Server Failure status code. -func ServerInternal(detail string) *ProblemDetails { +// DNS returns a ProblemDetails representing a DNSProblem +func DNS(detail string) *ProblemDetails { return &ProblemDetails{ - Type: ServerInternalProblem, + Type: DNSProblem, Detail: detail, - HTTPStatus: http.StatusInternalServerError, + HTTPStatus: http.StatusBadRequest, } } -// Unauthorized returns a ProblemDetails with an UnauthorizedProblem and a 403 -// Forbidden status code. -func Unauthorized(detail string) *ProblemDetails { +// InvalidContact returns a ProblemDetails representing an InvalidContactProblem. +func InvalidContact(detail string) *ProblemDetails { return &ProblemDetails{ - Type: UnauthorizedProblem, + Type: InvalidContactProblem, Detail: detail, - HTTPStatus: http.StatusForbidden, + HTTPStatus: http.StatusBadRequest, } } -// MethodNotAllowed returns a ProblemDetails representing a disallowed HTTP -// method error. -func MethodNotAllowed() *ProblemDetails { +// Malformed returns a ProblemDetails with a MalformedProblem and a 400 Bad +// Request status code. +func Malformed(detail string, a ...any) *ProblemDetails { + if len(a) > 0 { + detail = fmt.Sprintf(detail, a...) + } return &ProblemDetails{ Type: MalformedProblem, - Detail: "Method not allowed", - HTTPStatus: http.StatusMethodNotAllowed, + Detail: detail, + HTTPStatus: http.StatusBadRequest, } } -// ContentLengthRequired returns a ProblemDetails representing a missing -// Content-Length header error -func ContentLengthRequired() *ProblemDetails { +// OrderNotReady returns a ProblemDetails representing a OrderNotReadyProblem +func OrderNotReady(detail string, a ...any) *ProblemDetails { return &ProblemDetails{ - Type: MalformedProblem, - Detail: "missing Content-Length header", - HTTPStatus: http.StatusLengthRequired, + Type: OrderNotReadyProblem, + Detail: fmt.Sprintf(detail, a...), + HTTPStatus: http.StatusForbidden, } } -// InvalidContentType returns a ProblemDetails suitable for a missing -// ContentType header, or an incorrect ContentType header -func InvalidContentType(detail string) *ProblemDetails { +// RateLimited returns a ProblemDetails representing a RateLimitedProblem error +func RateLimited(detail string) *ProblemDetails { return &ProblemDetails{ - Type: MalformedProblem, + Type: RateLimitedProblem, Detail: detail, - HTTPStatus: http.StatusUnsupportedMediaType, + HTTPStatus: http.StatusTooManyRequests, } } -// InvalidEmail returns a ProblemDetails representing an invalid email address -// error -func InvalidEmail(detail string) *ProblemDetails { +// RejectedIdentifier returns a ProblemDetails with a RejectedIdentifierProblem and a 400 Bad +// Request status code. +func RejectedIdentifier(detail string) *ProblemDetails { return &ProblemDetails{ - Type: InvalidEmailProblem, + Type: RejectedIdentifierProblem, Detail: detail, HTTPStatus: http.StatusBadRequest, } } -// ConnectionFailure returns a ProblemDetails representing a ConnectionProblem -// error -func ConnectionFailure(detail string) *ProblemDetails { +// ServerInternal returns a ProblemDetails with a ServerInternalProblem and a +// 500 Internal Server Failure status code. +func ServerInternal(detail string) *ProblemDetails { return &ProblemDetails{ - Type: ConnectionProblem, + Type: ServerInternalProblem, + Detail: detail, + HTTPStatus: http.StatusInternalServerError, + } +} + +// TLS returns a ProblemDetails representing a TLSProblem error +func TLS(detail string) *ProblemDetails { + return &ProblemDetails{ + Type: TLSProblem, Detail: detail, HTTPStatus: http.StatusBadRequest, } } -// RateLimited returns a ProblemDetails representing a RateLimitedProblem error -func RateLimited(detail string) *ProblemDetails { +// Unauthorized returns a ProblemDetails with an UnauthorizedProblem and a 403 +// Forbidden status code. +func Unauthorized(detail string) *ProblemDetails { return &ProblemDetails{ - Type: RateLimitedProblem, + Type: UnauthorizedProblem, Detail: detail, - HTTPStatus: statusTooManyRequests, + HTTPStatus: http.StatusForbidden, } } -// TLSError returns a ProblemDetails representing a TLSProblem error -func TLSError(detail string) *ProblemDetails { +// UnsupportedContact returns a ProblemDetails representing an +// UnsupportedContactProblem +func UnsupportedContact(detail string) *ProblemDetails { return &ProblemDetails{ - Type: TLSProblem, + Type: UnsupportedContactProblem, Detail: detail, HTTPStatus: http.StatusBadRequest, } } -// AccountDoesNotExist returns a ProblemDetails representing an -// AccountDoesNotExistProblem error -func AccountDoesNotExist(detail string) *ProblemDetails { +// UnsupportedIdentifier returns a ProblemDetails representing an +// UnsupportedIdentifierProblem +func UnsupportedIdentifier(detail string, a ...any) *ProblemDetails { return &ProblemDetails{ - Type: AccountDoesNotExistProblem, - Detail: detail, + Type: UnsupportedIdentifierProblem, + Detail: fmt.Sprintf(detail, a...), HTTPStatus: http.StatusBadRequest, } } -// CAA returns a ProblemDetails representing a CAAProblem -func CAA(detail string) *ProblemDetails { +// Additional helper functions that return variations on MalformedProblem with +// different HTTP status codes set. + +// Canceled returns a ProblemDetails with a MalformedProblem and a 408 Request +// Timeout status code. +func Canceled(detail string, a ...any) *ProblemDetails { + if len(a) > 0 { + detail = fmt.Sprintf(detail, a...) + } return &ProblemDetails{ - Type: CAAProblem, + Type: MalformedProblem, Detail: detail, - HTTPStatus: http.StatusForbidden, + HTTPStatus: http.StatusRequestTimeout, } } -// DNS returns a ProblemDetails representing a DNSProblem -func DNS(detail string) *ProblemDetails { +// Conflict returns a ProblemDetails with a MalformedProblem and a 409 Conflict +// status code. +func Conflict(detail string) *ProblemDetails { return &ProblemDetails{ - Type: DNSProblem, + Type: MalformedProblem, Detail: detail, - HTTPStatus: http.StatusBadRequest, + HTTPStatus: http.StatusConflict, } } -// OrderNotReady returns a ProblemDetails representing a OrderNotReadyProblem -func OrderNotReady(detail string, a ...interface{}) *ProblemDetails { +// ContentLengthRequired returns a ProblemDetails representing a missing +// Content-Length header error +func ContentLengthRequired() *ProblemDetails { return &ProblemDetails{ - Type: OrderNotReadyProblem, - Detail: fmt.Sprintf(detail, a...), - HTTPStatus: http.StatusForbidden, + Type: MalformedProblem, + Detail: "missing Content-Length header", + HTTPStatus: http.StatusLengthRequired, } } -// BadRevocationReason returns a ProblemDetails representing -// a BadRevocationReasonProblem -func BadRevocationReason(detail string, a ...interface{}) *ProblemDetails { +// InvalidContentType returns a ProblemDetails suitable for a missing +// ContentType header, or an incorrect ContentType header +func InvalidContentType(detail string) *ProblemDetails { return &ProblemDetails{ - Type: BadRevocationReasonProblem, - Detail: fmt.Sprintf(detail, a...), - HTTPStatus: http.StatusBadRequest, + Type: MalformedProblem, + Detail: detail, + HTTPStatus: http.StatusUnsupportedMediaType, } } -// BadCSR returns a ProblemDetails representing a BadCSRProblem. -func BadCSR(detail string, a ...interface{}) *ProblemDetails { +// MethodNotAllowed returns a ProblemDetails representing a disallowed HTTP +// method error. +func MethodNotAllowed() *ProblemDetails { return &ProblemDetails{ - Type: BadCSRProblem, - Detail: fmt.Sprintf(detail, a...), - HTTPStatus: http.StatusBadRequest, + Type: MalformedProblem, + Detail: "Method not allowed", + HTTPStatus: http.StatusMethodNotAllowed, + } +} + +// NotFound returns a ProblemDetails with a MalformedProblem and a 404 Not Found +// status code. +func NotFound(detail string) *ProblemDetails { + return &ProblemDetails{ + Type: MalformedProblem, + Detail: detail, + HTTPStatus: http.StatusNotFound, } } diff --git a/vendor/github.com/letsencrypt/boulder/strictyaml/yaml.go b/vendor/github.com/letsencrypt/boulder/strictyaml/yaml.go new file mode 100644 index 00000000..8e3bae99 --- /dev/null +++ b/vendor/github.com/letsencrypt/boulder/strictyaml/yaml.go @@ -0,0 +1,46 @@ +// Package strictyaml provides a strict YAML unmarshaller based on `go-yaml/yaml` +package strictyaml + +import ( + "bytes" + "errors" + "fmt" + "io" + + "gopkg.in/yaml.v3" +) + +// Unmarshal takes a byte array and an interface passed by reference. The +// d.Decode will read the next YAML-encoded value from its input and store it in +// the value pointed to by yamlObj. Any config keys from the incoming YAML +// document which do not correspond to expected keys in the config struct will +// result in errors. +// +// TODO(https://github.com/go-yaml/yaml/issues/639): Replace this function with +// yaml.Unmarshal once a more ergonomic way to set unmarshal options is added +// upstream. +func Unmarshal(b []byte, yamlObj interface{}) error { + r := bytes.NewReader(b) + + d := yaml.NewDecoder(r) + d.KnownFields(true) + + // d.Decode will mutate yamlObj + err := d.Decode(yamlObj) + + if err != nil { + // io.EOF is returned when the YAML document is empty. + if errors.Is(err, io.EOF) { + return fmt.Errorf("unmarshalling YAML, bytes cannot be nil: %w", err) + } + return fmt.Errorf("unmarshalling YAML: %w", err) + } + + // As bytes are read by the decoder, the length of the byte buffer should + // decrease. If it doesn't, there's a problem. + if r.Len() != 0 { + return fmt.Errorf("yaml object of size %d bytes had %d bytes of unexpected unconsumed trailers", r.Size(), r.Len()) + } + + return nil +} diff --git a/vendor/github.com/mattn/go-sqlite3/callback.go b/vendor/github.com/mattn/go-sqlite3/callback.go index d3056910..b794bcd8 100644 --- a/vendor/github.com/mattn/go-sqlite3/callback.go +++ b/vendor/github.com/mattn/go-sqlite3/callback.go @@ -100,13 +100,13 @@ func preUpdateHookTrampoline(handle unsafe.Pointer, dbHandle uintptr, op int, db // Use handles to avoid passing Go pointers to C. type handleVal struct { db *SQLiteConn - val interface{} + val any } var handleLock sync.Mutex var handleVals = make(map[unsafe.Pointer]handleVal) -func newHandle(db *SQLiteConn, v interface{}) unsafe.Pointer { +func newHandle(db *SQLiteConn, v any) unsafe.Pointer { handleLock.Lock() defer handleLock.Unlock() val := handleVal{db: db, val: v} @@ -124,7 +124,7 @@ func lookupHandleVal(handle unsafe.Pointer) handleVal { return handleVals[handle] } -func lookupHandle(handle unsafe.Pointer) interface{} { +func lookupHandle(handle unsafe.Pointer) any { return lookupHandleVal(handle).val } @@ -238,7 +238,7 @@ func callbackArg(typ reflect.Type) (callbackArgConverter, error) { switch typ.Kind() { case reflect.Interface: if typ.NumMethod() != 0 { - return nil, errors.New("the only supported interface type is interface{}") + return nil, errors.New("the only supported interface type is any") } return callbackArgGeneric, nil case reflect.Slice: @@ -360,11 +360,11 @@ func callbackRetGeneric(ctx *C.sqlite3_context, v reflect.Value) error { } cb, err := callbackRet(v.Elem().Type()) - if err != nil { - return err - } + if err != nil { + return err + } - return cb(ctx, v.Elem()) + return cb(ctx, v.Elem()) } func callbackRet(typ reflect.Type) (callbackRetConverter, error) { diff --git a/vendor/github.com/mattn/go-sqlite3/convert.go b/vendor/github.com/mattn/go-sqlite3/convert.go index 03850737..f7a9dcd7 100644 --- a/vendor/github.com/mattn/go-sqlite3/convert.go +++ b/vendor/github.com/mattn/go-sqlite3/convert.go @@ -23,7 +23,7 @@ var errNilPtr = errors.New("destination pointer is nil") // embedded in descript // convertAssign copies to dest the value in src, converting it if possible. // An error is returned if the copy would result in loss of information. // dest should be a pointer type. -func convertAssign(dest, src interface{}) error { +func convertAssign(dest, src any) error { // Common cases, without reflect. switch s := src.(type) { case string: @@ -55,7 +55,7 @@ func convertAssign(dest, src interface{}) error { } *d = string(s) return nil - case *interface{}: + case *any: if d == nil { return errNilPtr } @@ -97,7 +97,7 @@ func convertAssign(dest, src interface{}) error { } case nil: switch d := dest.(type) { - case *interface{}: + case *any: if d == nil { return errNilPtr } @@ -149,7 +149,7 @@ func convertAssign(dest, src interface{}) error { *d = bv.(bool) } return err - case *interface{}: + case *any: *d = src return nil } @@ -256,7 +256,7 @@ func cloneBytes(b []byte) []byte { return c } -func asString(src interface{}) string { +func asString(src any) string { switch v := src.(type) { case string: return v diff --git a/vendor/github.com/mattn/go-sqlite3/doc.go b/vendor/github.com/mattn/go-sqlite3/doc.go index ac27633b..a3bcebbc 100644 --- a/vendor/github.com/mattn/go-sqlite3/doc.go +++ b/vendor/github.com/mattn/go-sqlite3/doc.go @@ -5,63 +5,63 @@ This works as a driver for database/sql. Installation - go get github.com/mattn/go-sqlite3 + go get github.com/mattn/go-sqlite3 -Supported Types +# Supported Types Currently, go-sqlite3 supports the following data types. - +------------------------------+ - |go | sqlite3 | - |----------|-------------------| - |nil | null | - |int | integer | - |int64 | integer | - |float64 | float | - |bool | integer | - |[]byte | blob | - |string | text | - |time.Time | timestamp/datetime| - +------------------------------+ - -SQLite3 Extension + +------------------------------+ + |go | sqlite3 | + |----------|-------------------| + |nil | null | + |int | integer | + |int64 | integer | + |float64 | float | + |bool | integer | + |[]byte | blob | + |string | text | + |time.Time | timestamp/datetime| + +------------------------------+ + +# SQLite3 Extension You can write your own extension module for sqlite3. For example, below is an extension for a Regexp matcher operation. - #include - #include - #include - #include - - SQLITE_EXTENSION_INIT1 - static void regexp_func(sqlite3_context *context, int argc, sqlite3_value **argv) { - if (argc >= 2) { - const char *target = (const char *)sqlite3_value_text(argv[1]); - const char *pattern = (const char *)sqlite3_value_text(argv[0]); - const char* errstr = NULL; - int erroff = 0; - int vec[500]; - int n, rc; - pcre* re = pcre_compile(pattern, 0, &errstr, &erroff, NULL); - rc = pcre_exec(re, NULL, target, strlen(target), 0, 0, vec, 500); - if (rc <= 0) { - sqlite3_result_error(context, errstr, 0); - return; - } - sqlite3_result_int(context, 1); - } - } - - #ifdef _WIN32 - __declspec(dllexport) - #endif - int sqlite3_extension_init(sqlite3 *db, char **errmsg, - const sqlite3_api_routines *api) { - SQLITE_EXTENSION_INIT2(api); - return sqlite3_create_function(db, "regexp", 2, SQLITE_UTF8, - (void*)db, regexp_func, NULL, NULL); - } + #include + #include + #include + #include + + SQLITE_EXTENSION_INIT1 + static void regexp_func(sqlite3_context *context, int argc, sqlite3_value **argv) { + if (argc >= 2) { + const char *target = (const char *)sqlite3_value_text(argv[1]); + const char *pattern = (const char *)sqlite3_value_text(argv[0]); + const char* errstr = NULL; + int erroff = 0; + int vec[500]; + int n, rc; + pcre* re = pcre_compile(pattern, 0, &errstr, &erroff, NULL); + rc = pcre_exec(re, NULL, target, strlen(target), 0, 0, vec, 500); + if (rc <= 0) { + sqlite3_result_error(context, errstr, 0); + return; + } + sqlite3_result_int(context, 1); + } + } + + #ifdef _WIN32 + __declspec(dllexport) + #endif + int sqlite3_extension_init(sqlite3 *db, char **errmsg, + const sqlite3_api_routines *api) { + SQLITE_EXTENSION_INIT2(api); + return sqlite3_create_function(db, "regexp", 2, SQLITE_UTF8, + (void*)db, regexp_func, NULL, NULL); + } It needs to be built as a so/dll shared library. And you need to register the extension module like below. @@ -77,7 +77,7 @@ Then, you can use this extension. rows, err := db.Query("select text from mytable where name regexp '^golang'") -Connection Hook +# Connection Hook You can hook and inject your code when the connection is established by setting ConnectHook to get the SQLiteConn. @@ -95,13 +95,13 @@ You can also use database/sql.Conn.Raw (Go >= 1.13): conn, err := db.Conn(context.Background()) // if err != nil { ... } defer conn.Close() - err = conn.Raw(func (driverConn interface{}) error { + err = conn.Raw(func (driverConn any) error { sqliteConn := driverConn.(*sqlite3.SQLiteConn) // ... use sqliteConn }) // if err != nil { ... } -Go SQlite3 Extensions +# Go SQlite3 Extensions If you want to register Go functions as SQLite extension functions you can make a custom driver by calling RegisterFunction from @@ -130,6 +130,5 @@ You can then use the custom driver by passing its name to sql.Open. } See the documentation of RegisterFunc for more details. - */ package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c index 454512ad..53d7560e 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c @@ -1,7 +1,7 @@ #ifndef USE_LIBSQLITE3 /****************************************************************************** ** This file is an amalgamation of many separate C source files from SQLite -** version 3.44.0. By combining all the individual C code files into this +** version 3.45.1. By combining all the individual C code files into this ** single large file, the entire code can be compiled as a single translation ** unit. This allows many compilers to do optimizations that would not be ** possible if the files were compiled separately. Performance improvements @@ -19,7 +19,7 @@ ** separate file. This file contains only code for the core SQLite library. ** ** The content in this amalgamation comes from Fossil check-in -** 17129ba1ff7f0daf37100ee82d507aef7827. +** e876e51a0ed5c5b3126f52e532044363a014. */ #define SQLITE_CORE 1 #define SQLITE_AMALGAMATION 1 @@ -460,9 +460,9 @@ extern "C" { ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.44.0" -#define SQLITE_VERSION_NUMBER 3044000 -#define SQLITE_SOURCE_ID "2023-11-01 11:23:50 17129ba1ff7f0daf37100ee82d507aef7827cf38de1866e2633096ae6ad81301" +#define SQLITE_VERSION "3.45.1" +#define SQLITE_VERSION_NUMBER 3045001 +#define SQLITE_SOURCE_ID "2024-01-30 16:01:20 e876e51a0ed5c5b3126f52e532044363a014bc594cfefa87ffb5b82257cc467a" /* ** CAPI3REF: Run-Time Library Version Numbers @@ -4268,15 +4268,17 @@ SQLITE_API void sqlite3_free_filename(sqlite3_filename); ** ** ** ^The sqlite3_errmsg() and sqlite3_errmsg16() return English-language -** text that describes the error, as either UTF-8 or UTF-16 respectively. +** text that describes the error, as either UTF-8 or UTF-16 respectively, +** or NULL if no error message is available. ** (See how SQLite handles [invalid UTF] for exceptions to this rule.) ** ^(Memory to hold the error message string is managed internally. ** The application does not need to worry about freeing the result. ** However, the error string might be overwritten or deallocated by ** subsequent calls to other SQLite interface functions.)^ ** -** ^The sqlite3_errstr() interface returns the English-language text -** that describes the [result code], as UTF-8. +** ^The sqlite3_errstr(E) interface returns the English-language text +** that describes the [result code] E, as UTF-8, or NULL if E is not an +** result code for which a text error message is available. ** ^(Memory to hold the error message string is managed internally ** and must not be freed by the application)^. ** @@ -5887,13 +5889,27 @@ SQLITE_API int sqlite3_create_window_function( ** ** ** [[SQLITE_SUBTYPE]]
SQLITE_SUBTYPE
-** The SQLITE_SUBTYPE flag indicates to SQLite that a function may call +** The SQLITE_SUBTYPE flag indicates to SQLite that a function might call ** [sqlite3_value_subtype()] to inspect the sub-types of its arguments. -** Specifying this flag makes no difference for scalar or aggregate user -** functions. However, if it is not specified for a user-defined window -** function, then any sub-types belonging to arguments passed to the window -** function may be discarded before the window function is called (i.e. -** sqlite3_value_subtype() will always return 0). +** This flag instructs SQLite to omit some corner-case optimizations that +** might disrupt the operation of the [sqlite3_value_subtype()] function, +** causing it to return zero rather than the correct subtype(). +** SQL functions that invokes [sqlite3_value_subtype()] should have this +** property. If the SQLITE_SUBTYPE property is omitted, then the return +** value from [sqlite3_value_subtype()] might sometimes be zero even though +** a non-zero subtype was specified by the function argument expression. +** +** [[SQLITE_RESULT_SUBTYPE]]
SQLITE_RESULT_SUBTYPE
+** The SQLITE_RESULT_SUBTYPE flag indicates to SQLite that a function might call +** [sqlite3_result_subtype()] to cause a sub-type to be associated with its +** result. +** Every function that invokes [sqlite3_result_subtype()] should have this +** property. If it does not, then the call to [sqlite3_result_subtype()] +** might become a no-op if the function is used as term in an +** [expression index]. On the other hand, SQL functions that never invoke +** [sqlite3_result_subtype()] should avoid setting this property, as the +** purpose of this property is to disable certain optimizations that are +** incompatible with subtypes. **
** */ @@ -5901,6 +5917,7 @@ SQLITE_API int sqlite3_create_window_function( #define SQLITE_DIRECTONLY 0x000080000 #define SQLITE_SUBTYPE 0x000100000 #define SQLITE_INNOCUOUS 0x000200000 +#define SQLITE_RESULT_SUBTYPE 0x001000000 /* ** CAPI3REF: Deprecated Functions @@ -6097,6 +6114,12 @@ SQLITE_API int sqlite3_value_encoding(sqlite3_value*); ** information can be used to pass a limited amount of context from ** one SQL function to another. Use the [sqlite3_result_subtype()] ** routine to set the subtype for the return value of an SQL function. +** +** Every [application-defined SQL function] that invoke this interface +** should include the [SQLITE_SUBTYPE] property in the text +** encoding argument when the function is [sqlite3_create_function|registered]. +** If the [SQLITE_SUBTYPE] property is omitted, then sqlite3_value_subtype() +** might return zero instead of the upstream subtype in some corner cases. */ SQLITE_API unsigned int sqlite3_value_subtype(sqlite3_value*); @@ -6227,14 +6250,22 @@ SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context*); **
  • ^(when sqlite3_set_auxdata() is invoked again on the same ** parameter)^, or **
  • ^(during the original sqlite3_set_auxdata() call when a memory -** allocation error occurs.)^ +** allocation error occurs.)^ +**
  • ^(during the original sqlite3_set_auxdata() call if the function +** is evaluated during query planning instead of during query execution, +** as sometimes happens with [SQLITE_ENABLE_STAT4].)^ ** -** Note the last bullet in particular. The destructor X in +** Note the last two bullets in particular. The destructor X in ** sqlite3_set_auxdata(C,N,P,X) might be called immediately, before the ** sqlite3_set_auxdata() interface even returns. Hence sqlite3_set_auxdata() ** should be called near the end of the function implementation and the ** function implementation should not make any use of P after -** sqlite3_set_auxdata() has been called. +** sqlite3_set_auxdata() has been called. Furthermore, a call to +** sqlite3_get_auxdata() that occurs immediately after a corresponding call +** to sqlite3_set_auxdata() might still return NULL if an out-of-memory +** condition occurred during the sqlite3_set_auxdata() call or if the +** function is being evaluated during query planning rather than during +** query execution. ** ** ^(In practice, auxiliary data is preserved between function calls for ** function parameters that are compile-time constants, including literal @@ -6508,6 +6539,20 @@ SQLITE_API int sqlite3_result_zeroblob64(sqlite3_context*, sqlite3_uint64 n); ** higher order bits are discarded. ** The number of subtype bytes preserved by SQLite might increase ** in future releases of SQLite. +** +** Every [application-defined SQL function] that invokes this interface +** should include the [SQLITE_RESULT_SUBTYPE] property in its +** text encoding argument when the SQL function is +** [sqlite3_create_function|registered]. If the [SQLITE_RESULT_SUBTYPE] +** property is omitted from the function that invokes sqlite3_result_subtype(), +** then in some cases the sqlite3_result_subtype() might fail to set +** the result subtype. +** +** If SQLite is compiled with -DSQLITE_STRICT_SUBTYPE=1, then any +** SQL function that invokes the sqlite3_result_subtype() interface +** and that does not have the SQLITE_RESULT_SUBTYPE property will raise +** an error. Future versions of SQLite might enable -DSQLITE_STRICT_SUBTYPE=1 +** by default. */ SQLITE_API void sqlite3_result_subtype(sqlite3_context*,unsigned int); @@ -8308,9 +8353,11 @@ SQLITE_API int sqlite3_vfs_unregister(sqlite3_vfs*); ** ** ^(Some systems (for example, Windows 95) do not support the operation ** implemented by sqlite3_mutex_try(). On those systems, sqlite3_mutex_try() -** will always return SQLITE_BUSY. The SQLite core only ever uses -** sqlite3_mutex_try() as an optimization so this is acceptable -** behavior.)^ +** will always return SQLITE_BUSY. In most cases the SQLite core only uses +** sqlite3_mutex_try() as an optimization, so this is acceptable +** behavior. The exceptions are unix builds that set the +** SQLITE_ENABLE_SETLK_TIMEOUT build option. In that case a working +** sqlite3_mutex_try() is required.)^ ** ** ^The sqlite3_mutex_leave() routine exits a mutex that was ** previously entered by the same thread. The behavior @@ -8569,6 +8616,7 @@ SQLITE_API int sqlite3_test_control(int op, ...); #define SQLITE_TESTCTRL_ASSERT 12 #define SQLITE_TESTCTRL_ALWAYS 13 #define SQLITE_TESTCTRL_RESERVE 14 /* NOT USED */ +#define SQLITE_TESTCTRL_JSON_SELFCHECK 14 #define SQLITE_TESTCTRL_OPTIMIZATIONS 15 #define SQLITE_TESTCTRL_ISKEYWORD 16 /* NOT USED */ #define SQLITE_TESTCTRL_SCRATCHMALLOC 17 /* NOT USED */ @@ -13082,8 +13130,11 @@ struct Fts5PhraseIter { ** created with the "columnsize=0" option. ** ** xColumnText: -** This function attempts to retrieve the text of column iCol of the -** current document. If successful, (*pz) is set to point to a buffer +** If parameter iCol is less than zero, or greater than or equal to the +** number of columns in the table, SQLITE_RANGE is returned. +** +** Otherwise, this function attempts to retrieve the text of column iCol of +** the current document. If successful, (*pz) is set to point to a buffer ** containing the text in utf-8 encoding, (*pn) is set to the size in bytes ** (not characters) of the buffer and SQLITE_OK is returned. Otherwise, ** if an error occurs, an SQLite error code is returned and the final values @@ -13093,8 +13144,10 @@ struct Fts5PhraseIter { ** Returns the number of phrases in the current query expression. ** ** xPhraseSize: -** Returns the number of tokens in phrase iPhrase of the query. Phrases -** are numbered starting from zero. +** If parameter iCol is less than zero, or greater than or equal to the +** number of phrases in the current query, as returned by xPhraseCount, +** 0 is returned. Otherwise, this function returns the number of tokens in +** phrase iPhrase of the query. Phrases are numbered starting from zero. ** ** xInstCount: ** Set *pnInst to the total number of occurrences of all phrases within @@ -13110,12 +13163,13 @@ struct Fts5PhraseIter { ** Query for the details of phrase match iIdx within the current row. ** Phrase matches are numbered starting from zero, so the iIdx argument ** should be greater than or equal to zero and smaller than the value -** output by xInstCount(). +** output by xInstCount(). If iIdx is less than zero or greater than +** or equal to the value returned by xInstCount(), SQLITE_RANGE is returned. ** -** Usually, output parameter *piPhrase is set to the phrase number, *piCol +** Otherwise, output parameter *piPhrase is set to the phrase number, *piCol ** to the column in which it occurs and *piOff the token offset of the -** first token of the phrase. Returns SQLITE_OK if successful, or an error -** code (i.e. SQLITE_NOMEM) if an error occurs. +** first token of the phrase. SQLITE_OK is returned if successful, or an +** error code (i.e. SQLITE_NOMEM) if an error occurs. ** ** This API can be quite slow if used with an FTS5 table created with the ** "detail=none" or "detail=column" option. @@ -13141,6 +13195,10 @@ struct Fts5PhraseIter { ** Invoking Api.xUserData() returns a copy of the pointer passed as ** the third argument to pUserData. ** +** If parameter iPhrase is less than zero, or greater than or equal to +** the number of phrases in the query, as returned by xPhraseCount(), +** this function returns SQLITE_RANGE. +** ** If the callback function returns any value other than SQLITE_OK, the ** query is abandoned and the xQueryPhrase function returns immediately. ** If the returned value is SQLITE_DONE, xQueryPhrase returns SQLITE_OK. @@ -13255,9 +13313,42 @@ struct Fts5PhraseIter { ** ** xPhraseNextColumn() ** See xPhraseFirstColumn above. +** +** xQueryToken(pFts5, iPhrase, iToken, ppToken, pnToken) +** This is used to access token iToken of phrase iPhrase of the current +** query. Before returning, output parameter *ppToken is set to point +** to a buffer containing the requested token, and *pnToken to the +** size of this buffer in bytes. +** +** If iPhrase or iToken are less than zero, or if iPhrase is greater than +** or equal to the number of phrases in the query as reported by +** xPhraseCount(), or if iToken is equal to or greater than the number of +** tokens in the phrase, SQLITE_RANGE is returned and *ppToken and *pnToken + are both zeroed. +** +** The output text is not a copy of the query text that specified the +** token. It is the output of the tokenizer module. For tokendata=1 +** tables, this includes any embedded 0x00 and trailing data. +** +** xInstToken(pFts5, iIdx, iToken, ppToken, pnToken) +** This is used to access token iToken of phrase hit iIdx within the +** current row. If iIdx is less than zero or greater than or equal to the +** value returned by xInstCount(), SQLITE_RANGE is returned. Otherwise, +** output variable (*ppToken) is set to point to a buffer containing the +** matching document token, and (*pnToken) to the size of that buffer in +** bytes. This API is not available if the specified token matches a +** prefix query term. In that case both output variables are always set +** to 0. +** +** The output text is not a copy of the document text that was tokenized. +** It is the output of the tokenizer module. For tokendata=1 tables, this +** includes any embedded 0x00 and trailing data. +** +** This API can be quite slow if used with an FTS5 table created with the +** "detail=none" or "detail=column" option. */ struct Fts5ExtensionApi { - int iVersion; /* Currently always set to 2 */ + int iVersion; /* Currently always set to 3 */ void *(*xUserData)(Fts5Context*); @@ -13292,6 +13383,13 @@ struct Fts5ExtensionApi { int (*xPhraseFirstColumn)(Fts5Context*, int iPhrase, Fts5PhraseIter*, int*); void (*xPhraseNextColumn)(Fts5Context*, Fts5PhraseIter*, int *piCol); + + /* Below this point are iVersion>=3 only */ + int (*xQueryToken)(Fts5Context*, + int iPhrase, int iToken, + const char **ppToken, int *pnToken + ); + int (*xInstToken)(Fts5Context*, int iIdx, int iToken, const char**, int*); }; /* @@ -13778,7 +13876,7 @@ struct fts5_api { ** max_page_count macro. */ #ifndef SQLITE_MAX_PAGE_COUNT -# define SQLITE_MAX_PAGE_COUNT 1073741823 +# define SQLITE_MAX_PAGE_COUNT 0xfffffffe /* 4294967294 */ #endif /* @@ -13917,6 +14015,19 @@ struct fts5_api { # undef SQLITE_USE_SEH #endif +/* +** Enable SQLITE_DIRECT_OVERFLOW_READ, unless the build explicitly +** disables it using -DSQLITE_DIRECT_OVERFLOW_READ=0 +*/ +#if defined(SQLITE_DIRECT_OVERFLOW_READ) && SQLITE_DIRECT_OVERFLOW_READ+1==1 + /* Disable if -DSQLITE_DIRECT_OVERFLOW_READ=0 */ +# undef SQLITE_DIRECT_OVERFLOW_READ +#else + /* In all other cases, enable */ +# define SQLITE_DIRECT_OVERFLOW_READ 1 +#endif + + /* ** The SQLITE_THREADSAFE macro must be defined as 0, 1, or 2. ** 0 means mutexes are permanently disable and the library is never @@ -15799,7 +15910,7 @@ SQLITE_PRIVATE sqlite3_file *sqlite3PagerJrnlFile(Pager*); SQLITE_PRIVATE const char *sqlite3PagerJournalname(Pager*); SQLITE_PRIVATE void *sqlite3PagerTempSpace(Pager*); SQLITE_PRIVATE int sqlite3PagerIsMemdb(Pager*); -SQLITE_PRIVATE void sqlite3PagerCacheStat(Pager *, int, int, int *); +SQLITE_PRIVATE void sqlite3PagerCacheStat(Pager *, int, int, u64*); SQLITE_PRIVATE void sqlite3PagerClearCache(Pager*); SQLITE_PRIVATE int sqlite3SectorSize(sqlite3_file *); @@ -16386,6 +16497,7 @@ typedef struct VdbeOpList VdbeOpList; #define P4_INT64 (-13) /* P4 is a 64-bit signed integer */ #define P4_INTARRAY (-14) /* P4 is a vector of 32-bit integers */ #define P4_FUNCCTX (-15) /* P4 is a pointer to an sqlite3_context object */ +#define P4_TABLEREF (-16) /* Like P4_TABLE, but reference counted */ /* Error message codes for OP_Halt */ #define P5_ConstraintNotNull 1 @@ -16608,13 +16720,15 @@ typedef struct VdbeOpList VdbeOpList; #define OP_Pagecount 178 #define OP_MaxPgcnt 179 #define OP_ClrSubtype 180 /* synopsis: r[P1].subtype = 0 */ -#define OP_FilterAdd 181 /* synopsis: filter(P1) += key(P3@P4) */ -#define OP_Trace 182 -#define OP_CursorHint 183 -#define OP_ReleaseReg 184 /* synopsis: release r[P1@P2] mask P3 */ -#define OP_Noop 185 -#define OP_Explain 186 -#define OP_Abortable 187 +#define OP_GetSubtype 181 /* synopsis: r[P2] = r[P1].subtype */ +#define OP_SetSubtype 182 /* synopsis: r[P2].subtype = r[P1] */ +#define OP_FilterAdd 183 /* synopsis: filter(P1) += key(P3@P4) */ +#define OP_Trace 184 +#define OP_CursorHint 185 +#define OP_ReleaseReg 186 /* synopsis: release r[P1@P2] mask P3 */ +#define OP_Noop 187 +#define OP_Explain 188 +#define OP_Abortable 189 /* Properties such as "out2" or "jump" that are specified in ** comments following the "case" for each opcode in the vdbe.c @@ -16650,8 +16764,8 @@ typedef struct VdbeOpList VdbeOpList; /* 152 */ 0x00, 0x10, 0x00, 0x00, 0x06, 0x10, 0x00, 0x04,\ /* 160 */ 0x1a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\ /* 168 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x10, 0x50,\ -/* 176 */ 0x40, 0x00, 0x10, 0x10, 0x02, 0x00, 0x00, 0x00,\ -/* 184 */ 0x00, 0x00, 0x00, 0x00,} +/* 176 */ 0x40, 0x00, 0x10, 0x10, 0x02, 0x12, 0x12, 0x00,\ +/* 184 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,} /* The resolve3P2Values() routine is able to run faster if it knows ** the value of the largest JUMP opcode. The smaller the maximum @@ -17812,14 +17926,15 @@ struct FuncDestructor { #define SQLITE_FUNC_SLOCHNG 0x2000 /* "Slow Change". Value constant during a ** single query - might change over time */ #define SQLITE_FUNC_TEST 0x4000 /* Built-in testing functions */ -/* 0x8000 -- available for reuse */ +#define SQLITE_FUNC_RUNONLY 0x8000 /* Cannot be used by valueFromFunction */ #define SQLITE_FUNC_WINDOW 0x00010000 /* Built-in window-only function */ #define SQLITE_FUNC_INTERNAL 0x00040000 /* For use by NestedParse() only */ #define SQLITE_FUNC_DIRECT 0x00080000 /* Not for use in TRIGGERs or VIEWs */ -#define SQLITE_FUNC_SUBTYPE 0x00100000 /* Result likely to have sub-type */ +/* SQLITE_SUBTYPE 0x00100000 // Consumer of subtypes */ #define SQLITE_FUNC_UNSAFE 0x00200000 /* Function has side effects */ #define SQLITE_FUNC_INLINE 0x00400000 /* Functions implemented in-line */ #define SQLITE_FUNC_BUILTIN 0x00800000 /* This is a built-in function */ +/* SQLITE_RESULT_SUBTYPE 0x01000000 // Generator of subtypes */ #define SQLITE_FUNC_ANYORDER 0x08000000 /* count/min/max aggregate */ /* Identifier numbers for each in-line function */ @@ -17911,10 +18026,11 @@ struct FuncDestructor { #define MFUNCTION(zName, nArg, xPtr, xFunc) \ {nArg, SQLITE_FUNC_BUILTIN|SQLITE_FUNC_CONSTANT|SQLITE_UTF8, \ xPtr, 0, xFunc, 0, 0, 0, #zName, {0} } -#define JFUNCTION(zName, nArg, iArg, xFunc) \ - {nArg, SQLITE_FUNC_BUILTIN|SQLITE_DETERMINISTIC|\ - SQLITE_FUNC_CONSTANT|SQLITE_UTF8, \ - SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, 0, 0, #zName, {0} } +#define JFUNCTION(zName, nArg, bUseCache, bWS, bRS, bJsonB, iArg, xFunc) \ + {nArg, SQLITE_FUNC_BUILTIN|SQLITE_DETERMINISTIC|SQLITE_FUNC_CONSTANT|\ + SQLITE_UTF8|((bUseCache)*SQLITE_FUNC_RUNONLY)|\ + ((bRS)*SQLITE_SUBTYPE)|((bWS)*SQLITE_RESULT_SUBTYPE), \ + SQLITE_INT_TO_PTR(iArg|((bJsonB)*JSON_BLOB)),0,xFunc,0, 0, 0, #zName, {0} } #define INLINE_FUNC(zName, nArg, iArg, mFlags) \ {nArg, SQLITE_FUNC_BUILTIN|\ SQLITE_UTF8|SQLITE_FUNC_INLINE|SQLITE_FUNC_CONSTANT|(mFlags), \ @@ -18549,6 +18665,7 @@ struct Index { unsigned isCovering:1; /* True if this is a covering index */ unsigned noSkipScan:1; /* Do not try to use skip-scan if true */ unsigned hasStat1:1; /* aiRowLogEst values come from sqlite_stat1 */ + unsigned bLowQual:1; /* sqlite_stat1 says this is a low-quality index */ unsigned bNoQuery:1; /* Do not use this index to optimize queries */ unsigned bAscKeyBug:1; /* True if the bba7b69f9849b5bf bug applies */ unsigned bHasVCol:1; /* Index references one or more VIRTUAL columns */ @@ -18662,6 +18779,7 @@ struct AggInfo { int iOBTab; /* Ephemeral table to implement ORDER BY */ u8 bOBPayload; /* iOBTab has payload columns separate from key */ u8 bOBUnique; /* Enforce uniqueness on iOBTab keys */ + u8 bUseSubtype; /* Transfer subtype info through sorter */ } *aFunc; int nFunc; /* Number of entries in aFunc[] */ u32 selId; /* Select to which this AggInfo belongs */ @@ -19195,6 +19313,7 @@ struct NameContext { int nRef; /* Number of names resolved by this context */ int nNcErr; /* Number of errors encountered while resolving names */ int ncFlags; /* Zero or more NC_* flags defined below */ + u32 nNestedSelect; /* Number of nested selects using this NC */ Select *pWinSelect; /* SELECT statement for any window functions */ }; @@ -19911,6 +20030,9 @@ struct sqlite3_str { ** ** 3. Make a (read-only) copy of a read-only RCStr string using ** sqlite3RCStrRef(). +** +** "String" is in the name, but an RCStr object can also be used to hold +** binary data. */ struct RCStr { u64 nRCRef; /* Number of references */ @@ -19969,6 +20091,9 @@ struct Sqlite3Config { u8 bSmallMalloc; /* Avoid large memory allocations if true */ u8 bExtraSchemaChecks; /* Verify type,name,tbl_name in schema */ u8 bUseLongDouble; /* Make use of long double */ +#ifdef SQLITE_DEBUG + u8 bJsonSelfcheck; /* Double-check JSON parsing */ +#endif int mxStrlen; /* Maximum string length */ int neverCorrupt; /* Database is always well-formed */ int szLookaside; /* Default lookaside buffer size */ @@ -20595,6 +20720,7 @@ SQLITE_PRIVATE void sqlite3ExprOrderByAggregateError(Parse*,Expr*); SQLITE_PRIVATE void sqlite3ExprFunctionUsable(Parse*,const Expr*,const FuncDef*); SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse*, Expr*, u32); SQLITE_PRIVATE void sqlite3ExprDelete(sqlite3*, Expr*); +SQLITE_PRIVATE void sqlite3ExprDeleteGeneric(sqlite3*,void*); SQLITE_PRIVATE void sqlite3ExprDeferredDelete(Parse*, Expr*); SQLITE_PRIVATE void sqlite3ExprUnmapAndDelete(Parse*, Expr*); SQLITE_PRIVATE ExprList *sqlite3ExprListAppend(Parse*,ExprList*,Expr*); @@ -20604,6 +20730,7 @@ SQLITE_PRIVATE void sqlite3ExprListSetSortOrder(ExprList*,int,int); SQLITE_PRIVATE void sqlite3ExprListSetName(Parse*,ExprList*,const Token*,int); SQLITE_PRIVATE void sqlite3ExprListSetSpan(Parse*,ExprList*,const char*,const char*); SQLITE_PRIVATE void sqlite3ExprListDelete(sqlite3*, ExprList*); +SQLITE_PRIVATE void sqlite3ExprListDeleteGeneric(sqlite3*,void*); SQLITE_PRIVATE u32 sqlite3ExprListFlags(const ExprList*); SQLITE_PRIVATE int sqlite3IndexHasDuplicateRootPage(Index*); SQLITE_PRIVATE int sqlite3Init(sqlite3*, char**); @@ -20694,6 +20821,7 @@ SQLITE_PRIVATE int sqlite3DbMaskAllZero(yDbMask); SQLITE_PRIVATE void sqlite3DropTable(Parse*, SrcList*, int, int); SQLITE_PRIVATE void sqlite3CodeDropTable(Parse*, Table*, int, int); SQLITE_PRIVATE void sqlite3DeleteTable(sqlite3*, Table*); +SQLITE_PRIVATE void sqlite3DeleteTableGeneric(sqlite3*, void*); SQLITE_PRIVATE void sqlite3FreeIndex(sqlite3*, Index*); #ifndef SQLITE_OMIT_AUTOINCREMENT SQLITE_PRIVATE void sqlite3AutoincrementBegin(Parse *pParse); @@ -20730,6 +20858,7 @@ SQLITE_PRIVATE int sqlite3Select(Parse*, Select*, SelectDest*); SQLITE_PRIVATE Select *sqlite3SelectNew(Parse*,ExprList*,SrcList*,Expr*,ExprList*, Expr*,ExprList*,u32,Expr*); SQLITE_PRIVATE void sqlite3SelectDelete(sqlite3*, Select*); +SQLITE_PRIVATE void sqlite3SelectDeleteGeneric(sqlite3*,void*); SQLITE_PRIVATE Table *sqlite3SrcListLookup(Parse*, SrcList*); SQLITE_PRIVATE int sqlite3IsReadOnly(Parse*, Table*, Trigger*); SQLITE_PRIVATE void sqlite3OpenTable(Parse*, int iCur, int iDb, Table*, int); @@ -20956,6 +21085,7 @@ SQLITE_PRIVATE int sqlite3Utf16ByteLen(const void *pData, int nChar); #endif SQLITE_PRIVATE int sqlite3Utf8CharLen(const char *pData, int nByte); SQLITE_PRIVATE u32 sqlite3Utf8Read(const u8**); +SQLITE_PRIVATE int sqlite3Utf8ReadLimited(const u8*, int, u32*); SQLITE_PRIVATE LogEst sqlite3LogEst(u64); SQLITE_PRIVATE LogEst sqlite3LogEstAdd(LogEst,LogEst); SQLITE_PRIVATE LogEst sqlite3LogEstFromDouble(double); @@ -21302,6 +21432,7 @@ SQLITE_PRIVATE Cte *sqlite3CteNew(Parse*,Token*,ExprList*,Select*,u8); SQLITE_PRIVATE void sqlite3CteDelete(sqlite3*,Cte*); SQLITE_PRIVATE With *sqlite3WithAdd(Parse*,With*,Cte*); SQLITE_PRIVATE void sqlite3WithDelete(sqlite3*,With*); +SQLITE_PRIVATE void sqlite3WithDeleteGeneric(sqlite3*,void*); SQLITE_PRIVATE With *sqlite3WithPush(Parse*, With*, u8); #else # define sqlite3CteNew(P,T,E,S) ((void*)0) @@ -22679,6 +22810,9 @@ SQLITE_PRIVATE SQLITE_WSD struct Sqlite3Config sqlite3Config = { 0, /* bSmallMalloc */ 1, /* bExtraSchemaChecks */ sizeof(LONGDOUBLE_TYPE)>8, /* bUseLongDouble */ +#ifdef SQLITE_DEBUG + 0, /* bJsonSelfcheck */ +#endif 0x7ffffffe, /* mxStrlen */ 0, /* neverCorrupt */ SQLITE_DEFAULT_LOOKASIDE, /* szLookaside, nLookaside */ @@ -23931,7 +24065,7 @@ SQLITE_API int sqlite3_db_status( case SQLITE_DBSTATUS_CACHE_MISS: case SQLITE_DBSTATUS_CACHE_WRITE:{ int i; - int nRet = 0; + u64 nRet = 0; assert( SQLITE_DBSTATUS_CACHE_MISS==SQLITE_DBSTATUS_CACHE_HIT+1 ); assert( SQLITE_DBSTATUS_CACHE_WRITE==SQLITE_DBSTATUS_CACHE_HIT+2 ); @@ -23944,7 +24078,7 @@ SQLITE_API int sqlite3_db_status( *pHighwater = 0; /* IMP: R-42420-56072 */ /* IMP: R-54100-20147 */ /* IMP: R-29431-39229 */ - *pCurrent = nRet; + *pCurrent = (int)nRet & 0x7fffffff; break; } @@ -25013,6 +25147,12 @@ static int isDate( } computeJD(p); if( p->isError || !validJulianDay(p->iJD) ) return 1; + if( argc==1 && p->validYMD && p->D>28 ){ + /* Make sure a YYYY-MM-DD is normalized. + ** Example: 2023-02-31 -> 2023-03-03 */ + assert( p->validJD ); + p->validYMD = 0; + } return 0; } @@ -29454,7 +29594,7 @@ SQLITE_PRIVATE void sqlite3MemoryBarrier(void){ SQLITE_MEMORY_BARRIER; #elif defined(__GNUC__) __sync_synchronize(); -#elif MSVC_VERSION>=1300 +#elif MSVC_VERSION>=1400 _ReadWriteBarrier(); #elif defined(MemoryBarrier) MemoryBarrier(); @@ -32041,7 +32181,7 @@ SQLITE_API void sqlite3_str_appendf(StrAccum *p, const char *zFormat, ...){ /***************************************************************************** -** Reference counted string storage +** Reference counted string/blob storage *****************************************************************************/ /* @@ -32893,7 +33033,7 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m assert( pExpr->x.pList->nExpr==2 ); pY = pExpr->x.pList->a[0].pExpr; pZ = pExpr->x.pList->a[1].pExpr; - sqlite3TreeViewLine(pView, "BETWEEN"); + sqlite3TreeViewLine(pView, "BETWEEN%s", zFlgs); sqlite3TreeViewExpr(pView, pX, 1); sqlite3TreeViewExpr(pView, pY, 1); sqlite3TreeViewExpr(pView, pZ, 0); @@ -34028,7 +34168,38 @@ SQLITE_PRIVATE u32 sqlite3Utf8Read( return c; } - +/* +** Read a single UTF8 character out of buffer z[], but reading no +** more than n characters from the buffer. z[] is not zero-terminated. +** +** Return the number of bytes used to construct the character. +** +** Invalid UTF8 might generate a strange result. No effort is made +** to detect invalid UTF8. +** +** At most 4 bytes will be read out of z[]. The return value will always +** be between 1 and 4. +*/ +SQLITE_PRIVATE int sqlite3Utf8ReadLimited( + const u8 *z, + int n, + u32 *piOut +){ + u32 c; + int i = 1; + assert( n>0 ); + c = z[0]; + if( c>=0xc0 ){ + c = sqlite3Utf8Trans1[c-0xc0]; + if( n>4 ) n = 4; + while( iiBusyTimeout; +#if SQLITE_ENABLE_SETLK_TIMEOUT==1 pFile->iBusyTimeout = *(int*)pArg; +#elif SQLITE_ENABLE_SETLK_TIMEOUT==2 + pFile->iBusyTimeout = !!(*(int*)pArg); +#else +# error "SQLITE_ENABLE_SETLK_TIMEOUT must be set to 1 or 2" +#endif *(int*)pArg = iOld; return SQLITE_OK; } @@ -42102,6 +42281,25 @@ static int unixGetpagesize(void){ ** Either unixShmNode.pShmMutex must be held or unixShmNode.nRef==0 and ** unixMutexHeld() is true when reading or writing any other field ** in this structure. +** +** aLock[SQLITE_SHM_NLOCK]: +** This array records the various locks held by clients on each of the +** SQLITE_SHM_NLOCK slots. If the aLock[] entry is set to 0, then no +** locks are held by the process on this slot. If it is set to -1, then +** some client holds an EXCLUSIVE lock on the locking slot. If the aLock[] +** value is set to a positive value, then it is the number of shared +** locks currently held on the slot. +** +** aMutex[SQLITE_SHM_NLOCK]: +** Normally, when SQLITE_ENABLE_SETLK_TIMEOUT is not defined, mutex +** pShmMutex is used to protect the aLock[] array and the right to +** call fcntl() on unixShmNode.hShm to obtain or release locks. +** +** If SQLITE_ENABLE_SETLK_TIMEOUT is defined though, we use an array +** of mutexes - one for each locking slot. To read or write locking +** slot aLock[iSlot], the caller must hold the corresponding mutex +** aMutex[iSlot]. Similarly, to call fcntl() to obtain or release a +** lock corresponding to slot iSlot, mutex aMutex[iSlot] must be held. */ struct unixShmNode { unixInodeInfo *pInode; /* unixInodeInfo that owns this SHM node */ @@ -42115,10 +42313,11 @@ struct unixShmNode { char **apRegion; /* Array of mapped shared-memory regions */ int nRef; /* Number of unixShm objects pointing to this */ unixShm *pFirst; /* All unixShm objects pointing to this */ +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + sqlite3_mutex *aMutex[SQLITE_SHM_NLOCK]; +#endif int aLock[SQLITE_SHM_NLOCK]; /* # shared locks on slot, -1==excl lock */ #ifdef SQLITE_DEBUG - u8 exclMask; /* Mask of exclusive locks held */ - u8 sharedMask; /* Mask of shared locks held */ u8 nextShmId; /* Next available unixShm.id value */ #endif }; @@ -42201,16 +42400,35 @@ static int unixShmSystemLock( struct flock f; /* The posix advisory locking structure */ int rc = SQLITE_OK; /* Result code form fcntl() */ - /* Access to the unixShmNode object is serialized by the caller */ pShmNode = pFile->pInode->pShmNode; - assert( pShmNode->nRef==0 || sqlite3_mutex_held(pShmNode->pShmMutex) ); - assert( pShmNode->nRef>0 || unixMutexHeld() ); + + /* Assert that the parameters are within expected range and that the + ** correct mutex or mutexes are held. */ + assert( pShmNode->nRef>=0 ); + assert( (ofst==UNIX_SHM_DMS && n==1) + || (ofst>=UNIX_SHM_BASE && ofst+n<=(UNIX_SHM_BASE+SQLITE_SHM_NLOCK)) + ); + if( ofst==UNIX_SHM_DMS ){ + assert( pShmNode->nRef>0 || unixMutexHeld() ); + assert( pShmNode->nRef==0 || sqlite3_mutex_held(pShmNode->pShmMutex) ); + }else{ +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + int ii; + for(ii=ofst-UNIX_SHM_BASE; iiaMutex[ii]) ); + } +#else + assert( sqlite3_mutex_held(pShmNode->pShmMutex) ); + assert( pShmNode->nRef>0 ); +#endif + } /* Shared locks never span more than one byte */ assert( n==1 || lockType!=F_RDLCK ); /* Locks are within range */ assert( n>=1 && n<=SQLITE_SHM_NLOCK ); + assert( ofst>=UNIX_SHM_BASE && ofst<=(UNIX_SHM_DMS+SQLITE_SHM_NLOCK) ); if( pShmNode->hShm>=0 ){ int res; @@ -42221,7 +42439,7 @@ static int unixShmSystemLock( f.l_len = n; res = osSetPosixAdvisoryLock(pShmNode->hShm, &f, pFile); if( res==-1 ){ -#ifdef SQLITE_ENABLE_SETLK_TIMEOUT +#if defined(SQLITE_ENABLE_SETLK_TIMEOUT) && SQLITE_ENABLE_SETLK_TIMEOUT==1 rc = (pFile->iBusyTimeout ? SQLITE_BUSY_TIMEOUT : SQLITE_BUSY); #else rc = SQLITE_BUSY; @@ -42229,39 +42447,28 @@ static int unixShmSystemLock( } } - /* Update the global lock state and do debug tracing */ + /* Do debug tracing */ #ifdef SQLITE_DEBUG - { u16 mask; OSTRACE(("SHM-LOCK ")); - mask = ofst>31 ? 0xffff : (1<<(ofst+n)) - (1<exclMask &= ~mask; - pShmNode->sharedMask &= ~mask; + OSTRACE(("unlock %d..%d ok\n", ofst, ofst+n-1)); }else if( lockType==F_RDLCK ){ - OSTRACE(("read-lock %d ok", ofst)); - pShmNode->exclMask &= ~mask; - pShmNode->sharedMask |= mask; + OSTRACE(("read-lock %d..%d ok\n", ofst, ofst+n-1)); }else{ assert( lockType==F_WRLCK ); - OSTRACE(("write-lock %d ok", ofst)); - pShmNode->exclMask |= mask; - pShmNode->sharedMask &= ~mask; + OSTRACE(("write-lock %d..%d ok\n", ofst, ofst+n-1)); } }else{ if( lockType==F_UNLCK ){ - OSTRACE(("unlock %d failed", ofst)); + OSTRACE(("unlock %d..%d failed\n", ofst, ofst+n-1)); }else if( lockType==F_RDLCK ){ - OSTRACE(("read-lock failed")); + OSTRACE(("read-lock %d..%d failed\n", ofst, ofst+n-1)); }else{ assert( lockType==F_WRLCK ); - OSTRACE(("write-lock %d failed", ofst)); + OSTRACE(("write-lock %d..%d failed\n", ofst, ofst+n-1)); } } - OSTRACE((" - afterwards %03x,%03x\n", - pShmNode->sharedMask, pShmNode->exclMask)); - } #endif return rc; @@ -42298,6 +42505,11 @@ static void unixShmPurge(unixFile *pFd){ int i; assert( p->pInode==pFd->pInode ); sqlite3_mutex_free(p->pShmMutex); +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + for(i=0; iaMutex[i]); + } +#endif for(i=0; inRegion; i+=nShmPerMap){ if( p->hShm>=0 ){ osMunmap(p->apRegion[i], p->szRegion); @@ -42357,7 +42569,20 @@ static int unixLockSharedMemory(unixFile *pDbFd, unixShmNode *pShmNode){ pShmNode->isUnlocked = 1; rc = SQLITE_READONLY_CANTINIT; }else{ +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + /* Do not use a blocking lock here. If the lock cannot be obtained + ** immediately, it means some other connection is truncating the + ** *-shm file. And after it has done so, it will not release its + ** lock, but only downgrade it to a shared lock. So no point in + ** blocking here. The call below to obtain the shared DMS lock may + ** use a blocking lock. */ + int iSaveTimeout = pDbFd->iBusyTimeout; + pDbFd->iBusyTimeout = 0; +#endif rc = unixShmSystemLock(pDbFd, F_WRLCK, UNIX_SHM_DMS, 1); +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + pDbFd->iBusyTimeout = iSaveTimeout; +#endif /* The first connection to attach must truncate the -shm file. We ** truncate to 3 bytes (an arbitrary small number, less than the ** -shm header size) rather than 0 as a system debugging aid, to @@ -42478,6 +42703,18 @@ static int unixOpenSharedMemory(unixFile *pDbFd){ rc = SQLITE_NOMEM_BKPT; goto shm_open_err; } +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + { + int ii; + for(ii=0; iiaMutex[ii] = sqlite3_mutex_alloc(SQLITE_MUTEX_FAST); + if( pShmNode->aMutex[ii]==0 ){ + rc = SQLITE_NOMEM_BKPT; + goto shm_open_err; + } + } + } +#endif } if( pInode->bProcessLock==0 ){ @@ -42699,9 +42936,11 @@ static int unixShmMap( */ #ifdef SQLITE_DEBUG static int assertLockingArrayOk(unixShmNode *pShmNode){ +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + return 1; +#else unixShm *pX; int aLock[SQLITE_SHM_NLOCK]; - assert( sqlite3_mutex_held(pShmNode->pShmMutex) ); memset(aLock, 0, sizeof(aLock)); for(pX=pShmNode->pFirst; pX; pX=pX->pNext){ @@ -42719,13 +42958,14 @@ static int assertLockingArrayOk(unixShmNode *pShmNode){ assert( 0==memcmp(pShmNode->aLock, aLock, sizeof(aLock)) ); return (memcmp(pShmNode->aLock, aLock, sizeof(aLock))==0); +#endif } #endif /* ** Change the lock state for a shared-memory segment. ** -** Note that the relationship between SHAREd and EXCLUSIVE locks is a little +** Note that the relationship between SHARED and EXCLUSIVE locks is a little ** different here than in posix. In xShmLock(), one can go from unlocked ** to shared and back or from unlocked to exclusive and back. But one may ** not go from shared to exclusive or from exclusive to shared. @@ -42740,7 +42980,7 @@ static int unixShmLock( unixShm *p; /* The shared memory being locked */ unixShmNode *pShmNode; /* The underlying file iNode */ int rc = SQLITE_OK; /* Result code */ - u16 mask; /* Mask of locks to take or release */ + u16 mask = (1<<(ofst+n)) - (1<pShm; @@ -42775,88 +43015,151 @@ static int unixShmLock( ** It is not permitted to block on the RECOVER lock. */ #ifdef SQLITE_ENABLE_SETLK_TIMEOUT - assert( (flags & SQLITE_SHM_UNLOCK) || pDbFd->iBusyTimeout==0 || ( - (ofst!=2) /* not RECOVER */ - && (ofst!=1 || (p->exclMask|p->sharedMask)==0) - && (ofst!=0 || (p->exclMask|p->sharedMask)<3) - && (ofst<3 || (p->exclMask|p->sharedMask)<(1<exclMask|p->sharedMask); + assert( (flags & SQLITE_SHM_UNLOCK) || pDbFd->iBusyTimeout==0 || ( + (ofst!=2) /* not RECOVER */ + && (ofst!=1 || lockMask==0 || lockMask==2) + && (ofst!=0 || lockMask<3) + && (ofst<3 || lockMask<(1<1 || mask==(1<pShmMutex); - assert( assertLockingArrayOk(pShmNode) ); - if( flags & SQLITE_SHM_UNLOCK ){ - if( (p->exclMask|p->sharedMask) & mask ){ - int ii; - int bUnlock = 1; + /* Check if there is any work to do. There are three cases: + ** + ** a) An unlock operation where there are locks to unlock, + ** b) An shared lock where the requested lock is not already held + ** c) An exclusive lock where the requested lock is not already held + ** + ** The SQLite core never requests an exclusive lock that it already holds. + ** This is assert()ed below. + */ + assert( flags!=(SQLITE_SHM_EXCLUSIVE|SQLITE_SHM_LOCK) + || 0==(p->exclMask & mask) + ); + if( ((flags & SQLITE_SHM_UNLOCK) && ((p->exclMask|p->sharedMask) & mask)) + || (flags==(SQLITE_SHM_SHARED|SQLITE_SHM_LOCK) && 0==(p->sharedMask & mask)) + || (flags==(SQLITE_SHM_EXCLUSIVE|SQLITE_SHM_LOCK)) + ){ - for(ii=ofst; ii((p->sharedMask & (1<aMutex[iMutex]); + if( rc!=SQLITE_OK ) goto leave_shmnode_mutexes; + }else{ + sqlite3_mutex_enter(pShmNode->aMutex[iMutex]); } + } +#else + sqlite3_mutex_enter(pShmNode->pShmMutex); +#endif - if( bUnlock ){ - rc = unixShmSystemLock(pDbFd, F_UNLCK, ofst+UNIX_SHM_BASE, n); - if( rc==SQLITE_OK ){ - memset(&aLock[ofst], 0, sizeof(int)*n); + if( ALWAYS(rc==SQLITE_OK) ){ + if( flags & SQLITE_SHM_UNLOCK ){ + /* Case (a) - unlock. */ + int bUnlock = 1; + assert( (p->exclMask & p->sharedMask)==0 ); + assert( !(flags & SQLITE_SHM_EXCLUSIVE) || (p->exclMask & mask)==mask ); + assert( !(flags & SQLITE_SHM_SHARED) || (p->sharedMask & mask)==mask ); + + /* If this is a SHARED lock being unlocked, it is possible that other + ** clients within this process are holding the same SHARED lock. In + ** this case, set bUnlock to 0 so that the posix lock is not removed + ** from the file-descriptor below. */ + if( flags & SQLITE_SHM_SHARED ){ + assert( n==1 ); + assert( aLock[ofst]>=1 ); + if( aLock[ofst]>1 ){ + bUnlock = 0; + aLock[ofst]--; + p->sharedMask &= ~mask; + } } - }else if( ALWAYS(p->sharedMask & (1<1 ); - aLock[ofst]--; - } - /* Undo the local locks */ - if( rc==SQLITE_OK ){ - p->exclMask &= ~mask; - p->sharedMask &= ~mask; - } - } - }else if( flags & SQLITE_SHM_SHARED ){ - assert( n==1 ); - assert( (p->exclMask & (1<sharedMask & mask)==0 ){ - if( aLock[ofst]<0 ){ - rc = SQLITE_BUSY; - }else if( aLock[ofst]==0 ){ - rc = unixShmSystemLock(pDbFd, F_RDLCK, ofst+UNIX_SHM_BASE, n); - } + if( bUnlock ){ + rc = unixShmSystemLock(pDbFd, F_UNLCK, ofst+UNIX_SHM_BASE, n); + if( rc==SQLITE_OK ){ + memset(&aLock[ofst], 0, sizeof(int)*n); + p->sharedMask &= ~mask; + p->exclMask &= ~mask; + } + } + }else if( flags & SQLITE_SHM_SHARED ){ + /* Case (b) - a shared lock. */ - /* Get the local shared locks */ - if( rc==SQLITE_OK ){ - p->sharedMask |= mask; - aLock[ofst]++; - } - } - }else{ - /* Make sure no sibling connections hold locks that will block this - ** lock. If any do, return SQLITE_BUSY right away. */ - int ii; - for(ii=ofst; iisharedMask & mask)==0 ); - if( ALWAYS((p->exclMask & (1<sharedMask |= mask; + aLock[ofst]++; + } + }else{ + /* Case (c) - an exclusive lock. */ + int ii; + + assert( flags==(SQLITE_SHM_LOCK|SQLITE_SHM_EXCLUSIVE) ); assert( (p->sharedMask & mask)==0 ); - p->exclMask |= mask; + assert( (p->exclMask & mask)==0 ); + + /* Make sure no sibling connections hold locks that will block this + ** lock. If any do, return SQLITE_BUSY right away. */ for(ii=ofst; iiexclMask |= mask; + for(ii=ofst; ii=ofst; iMutex--){ + sqlite3_mutex_leave(pShmNode->aMutex[iMutex]); + } +#else + sqlite3_mutex_leave(pShmNode->pShmMutex); +#endif } - assert( assertLockingArrayOk(pShmNode) ); - sqlite3_mutex_leave(pShmNode->pShmMutex); + OSTRACE(("SHM-LOCK shmid-%d, pid-%d got %03x,%03x\n", p->id, osGetpid(0), p->sharedMask, p->exclMask)); return rc; @@ -43106,11 +43409,16 @@ static int unixFetch(sqlite3_file *fd, i64 iOff, int nAmt, void **pp){ #if SQLITE_MAX_MMAP_SIZE>0 if( pFd->mmapSizeMax>0 ){ + /* Ensure that there is always at least a 256 byte buffer of addressable + ** memory following the returned page. If the database is corrupt, + ** SQLite may overread the page slightly (in practice only a few bytes, + ** but 256 is safe, round, number). */ + const int nEofBuffer = 256; if( pFd->pMapRegion==0 ){ int rc = unixMapfile(pFd, -1); if( rc!=SQLITE_OK ) return rc; } - if( pFd->mmapSize >= iOff+nAmt ){ + if( pFd->mmapSize >= (iOff+nAmt+nEofBuffer) ){ *pp = &((u8 *)pFd->pMapRegion)[iOff]; pFd->nFetchOut++; } @@ -50463,6 +50771,11 @@ static int winFetch(sqlite3_file *fd, i64 iOff, int nAmt, void **pp){ #if SQLITE_MAX_MMAP_SIZE>0 if( pFd->mmapSizeMax>0 ){ + /* Ensure that there is always at least a 256 byte buffer of addressable + ** memory following the returned page. If the database is corrupt, + ** SQLite may overread the page slightly (in practice only a few bytes, + ** but 256 is safe, round, number). */ + const int nEofBuffer = 256; if( pFd->pMapRegion==0 ){ int rc = winMapfile(pFd, -1); if( rc!=SQLITE_OK ){ @@ -50471,7 +50784,7 @@ static int winFetch(sqlite3_file *fd, i64 iOff, int nAmt, void **pp){ return rc; } } - if( pFd->mmapSize >= iOff+nAmt ){ + if( pFd->mmapSize >= (iOff+nAmt+nEofBuffer) ){ assert( pFd->pMapRegion!=0 ); *pp = &((u8 *)pFd->pMapRegion)[iOff]; pFd->nFetchOut++; @@ -57074,7 +57387,7 @@ struct Pager { char *zJournal; /* Name of the journal file */ int (*xBusyHandler)(void*); /* Function to call when busy */ void *pBusyHandlerArg; /* Context argument for xBusyHandler */ - int aStat[4]; /* Total cache hits, misses, writes, spills */ + u32 aStat[4]; /* Total cache hits, misses, writes, spills */ #ifdef SQLITE_TEST int nRead; /* Database pages read */ #endif @@ -57204,9 +57517,8 @@ SQLITE_PRIVATE int sqlite3PagerDirectReadOk(Pager *pPager, Pgno pgno){ #ifndef SQLITE_OMIT_WAL if( pPager->pWal ){ u32 iRead = 0; - int rc; - rc = sqlite3WalFindFrame(pPager->pWal, pgno, &iRead); - return (rc==SQLITE_OK && iRead==0); + (void)sqlite3WalFindFrame(pPager->pWal, pgno, &iRead); + return iRead==0; } #endif return 1; @@ -61448,10 +61760,13 @@ SQLITE_PRIVATE int sqlite3PagerOpen( */ SQLITE_API sqlite3_file *sqlite3_database_file_object(const char *zName){ Pager *pPager; + const char *p; while( zName[-1]!=0 || zName[-2]!=0 || zName[-3]!=0 || zName[-4]!=0 ){ zName--; } - pPager = *(Pager**)(zName - 4 - sizeof(Pager*)); + p = zName - 4 - sizeof(Pager*); + assert( EIGHT_BYTE_ALIGNMENT(p) ); + pPager = *(Pager**)p; return pPager->fd; } @@ -63215,11 +63530,11 @@ SQLITE_PRIVATE int *sqlite3PagerStats(Pager *pPager){ a[3] = pPager->eState==PAGER_OPEN ? -1 : (int) pPager->dbSize; a[4] = pPager->eState; a[5] = pPager->errCode; - a[6] = pPager->aStat[PAGER_STAT_HIT]; - a[7] = pPager->aStat[PAGER_STAT_MISS]; + a[6] = (int)pPager->aStat[PAGER_STAT_HIT] & 0x7fffffff; + a[7] = (int)pPager->aStat[PAGER_STAT_MISS] & 0x7fffffff; a[8] = 0; /* Used to be pPager->nOvfl */ a[9] = pPager->nRead; - a[10] = pPager->aStat[PAGER_STAT_WRITE]; + a[10] = (int)pPager->aStat[PAGER_STAT_WRITE] & 0x7fffffff; return a; } #endif @@ -63235,7 +63550,7 @@ SQLITE_PRIVATE int *sqlite3PagerStats(Pager *pPager){ ** reset parameter is non-zero, the cache hit or miss count is zeroed before ** returning. */ -SQLITE_PRIVATE void sqlite3PagerCacheStat(Pager *pPager, int eStat, int reset, int *pnVal){ +SQLITE_PRIVATE void sqlite3PagerCacheStat(Pager *pPager, int eStat, int reset, u64 *pnVal){ assert( eStat==SQLITE_DBSTATUS_CACHE_HIT || eStat==SQLITE_DBSTATUS_CACHE_MISS @@ -64175,7 +64490,7 @@ SQLITE_PRIVATE int sqlite3PagerWalFramesize(Pager *pPager){ } #endif -#ifdef SQLITE_USE_SEH +#if defined(SQLITE_USE_SEH) && !defined(SQLITE_OMIT_WAL) SQLITE_PRIVATE int sqlite3PagerWalSystemErrno(Pager *pPager){ return sqlite3WalSystemErrno(pPager->pWal); } @@ -66191,6 +66506,19 @@ static int walIteratorInit(Wal *pWal, u32 nBackfill, WalIterator **pp){ } #ifdef SQLITE_ENABLE_SETLK_TIMEOUT + + +/* +** Attempt to enable blocking locks that block for nMs ms. Return 1 if +** blocking locks are successfully enabled, or 0 otherwise. +*/ +static int walEnableBlockingMs(Wal *pWal, int nMs){ + int rc = sqlite3OsFileControl( + pWal->pDbFd, SQLITE_FCNTL_LOCK_TIMEOUT, (void*)&nMs + ); + return (rc==SQLITE_OK); +} + /* ** Attempt to enable blocking locks. Blocking locks are enabled only if (a) ** they are supported by the VFS, and (b) the database handle is configured @@ -66202,11 +66530,7 @@ static int walEnableBlocking(Wal *pWal){ if( pWal->db ){ int tmout = pWal->db->busyTimeout; if( tmout ){ - int rc; - rc = sqlite3OsFileControl( - pWal->pDbFd, SQLITE_FCNTL_LOCK_TIMEOUT, (void*)&tmout - ); - res = (rc==SQLITE_OK); + res = walEnableBlockingMs(pWal, tmout); } } return res; @@ -66255,20 +66579,10 @@ SQLITE_PRIVATE void sqlite3WalDb(Wal *pWal, sqlite3 *db){ pWal->db = db; } -/* -** Take an exclusive WRITE lock. Blocking if so configured. -*/ -static int walLockWriter(Wal *pWal){ - int rc; - walEnableBlocking(pWal); - rc = walLockExclusive(pWal, WAL_WRITE_LOCK, 1); - walDisableBlocking(pWal); - return rc; -} #else # define walEnableBlocking(x) 0 # define walDisableBlocking(x) -# define walLockWriter(pWal) walLockExclusive((pWal), WAL_WRITE_LOCK, 1) +# define walEnableBlockingMs(pWal, ms) 0 # define sqlite3WalDb(pWal, db) #endif /* ifdef SQLITE_ENABLE_SETLK_TIMEOUT */ @@ -66869,7 +67183,9 @@ static int walIndexReadHdr(Wal *pWal, int *pChanged){ } }else{ int bWriteLock = pWal->writeLock; - if( bWriteLock || SQLITE_OK==(rc = walLockWriter(pWal)) ){ + if( bWriteLock + || SQLITE_OK==(rc = walLockExclusive(pWal, WAL_WRITE_LOCK, 1)) + ){ pWal->writeLock = 1; if( SQLITE_OK==(rc = walIndexPage(pWal, 0, &page0)) ){ badHdr = walIndexTryHdr(pWal, pChanged); @@ -66877,7 +67193,8 @@ static int walIndexReadHdr(Wal *pWal, int *pChanged){ /* If the wal-index header is still malformed even while holding ** a WRITE lock, it can only mean that the header is corrupted and ** needs to be reconstructed. So run recovery to do exactly that. - */ + ** Disable blocking locks first. */ + walDisableBlocking(pWal); rc = walIndexRecover(pWal); *pChanged = 1; } @@ -67087,6 +67404,37 @@ static int walBeginShmUnreliable(Wal *pWal, int *pChanged){ return rc; } +/* +** The final argument passed to walTryBeginRead() is of type (int*). The +** caller should invoke walTryBeginRead as follows: +** +** int cnt = 0; +** do { +** rc = walTryBeginRead(..., &cnt); +** }while( rc==WAL_RETRY ); +** +** The final value of "cnt" is of no use to the caller. It is used by +** the implementation of walTryBeginRead() as follows: +** +** + Each time walTryBeginRead() is called, it is incremented. Once +** it reaches WAL_RETRY_PROTOCOL_LIMIT - indicating that walTryBeginRead() +** has many times been invoked and failed with WAL_RETRY - walTryBeginRead() +** returns SQLITE_PROTOCOL. +** +** + If SQLITE_ENABLE_SETLK_TIMEOUT is defined and walTryBeginRead() failed +** because a blocking lock timed out (SQLITE_BUSY_TIMEOUT from the OS +** layer), the WAL_RETRY_BLOCKED_MASK bit is set in "cnt". In this case +** the next invocation of walTryBeginRead() may omit an expected call to +** sqlite3OsSleep(). There has already been a delay when the previous call +** waited on a lock. +*/ +#define WAL_RETRY_PROTOCOL_LIMIT 100 +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT +# define WAL_RETRY_BLOCKED_MASK 0x10000000 +#else +# define WAL_RETRY_BLOCKED_MASK 0 +#endif + /* ** Attempt to start a read transaction. This might fail due to a race or ** other transient condition. When that happens, it returns WAL_RETRY to @@ -67137,13 +67485,16 @@ static int walBeginShmUnreliable(Wal *pWal, int *pChanged){ ** so it takes care to hold an exclusive lock on the corresponding ** WAL_READ_LOCK() while changing values. */ -static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){ +static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int *pCnt){ volatile WalCkptInfo *pInfo; /* Checkpoint information in wal-index */ u32 mxReadMark; /* Largest aReadMark[] value */ int mxI; /* Index of largest aReadMark[] value */ int i; /* Loop counter */ int rc = SQLITE_OK; /* Return code */ u32 mxFrame; /* Wal frame to lock to */ +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + int nBlockTmout = 0; +#endif assert( pWal->readLock<0 ); /* Not currently locked */ @@ -67167,14 +67518,34 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){ ** so that on the 100th (and last) RETRY we delay for 323 milliseconds. ** The total delay time before giving up is less than 10 seconds. */ - if( cnt>5 ){ + (*pCnt)++; + if( *pCnt>5 ){ int nDelay = 1; /* Pause time in microseconds */ - if( cnt>100 ){ + int cnt = (*pCnt & ~WAL_RETRY_BLOCKED_MASK); + if( cnt>WAL_RETRY_PROTOCOL_LIMIT ){ VVA_ONLY( pWal->lockError = 1; ) return SQLITE_PROTOCOL; } - if( cnt>=10 ) nDelay = (cnt-9)*(cnt-9)*39; + if( *pCnt>=10 ) nDelay = (cnt-9)*(cnt-9)*39; +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + /* In SQLITE_ENABLE_SETLK_TIMEOUT builds, configure the file-descriptor + ** to block for locks for approximately nDelay us. This affects three + ** locks: (a) the shared lock taken on the DMS slot in os_unix.c (if + ** using os_unix.c), (b) the WRITER lock taken in walIndexReadHdr() if the + ** first attempted read fails, and (c) the shared lock taken on the + ** read-mark. + ** + ** If the previous call failed due to an SQLITE_BUSY_TIMEOUT error, + ** then sleep for the minimum of 1us. The previous call already provided + ** an extra delay while it was blocking on the lock. + */ + nBlockTmout = (nDelay+998) / 1000; + if( !useWal && walEnableBlockingMs(pWal, nBlockTmout) ){ + if( *pCnt & WAL_RETRY_BLOCKED_MASK ) nDelay = 1; + } +#endif sqlite3OsSleep(pWal->pVfs, nDelay); + *pCnt &= ~WAL_RETRY_BLOCKED_MASK; } if( !useWal ){ @@ -67182,6 +67553,13 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){ if( pWal->bShmUnreliable==0 ){ rc = walIndexReadHdr(pWal, pChanged); } +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + walDisableBlocking(pWal); + if( rc==SQLITE_BUSY_TIMEOUT ){ + rc = SQLITE_BUSY; + *pCnt |= WAL_RETRY_BLOCKED_MASK; + } +#endif if( rc==SQLITE_BUSY ){ /* If there is not a recovery running in another thread or process ** then convert BUSY errors to WAL_RETRY. If recovery is known to @@ -67296,9 +67674,19 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){ return rc==SQLITE_BUSY ? WAL_RETRY : SQLITE_READONLY_CANTINIT; } + (void)walEnableBlockingMs(pWal, nBlockTmout); rc = walLockShared(pWal, WAL_READ_LOCK(mxI)); + walDisableBlocking(pWal); if( rc ){ - return rc==SQLITE_BUSY ? WAL_RETRY : rc; +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + if( rc==SQLITE_BUSY_TIMEOUT ){ + *pCnt |= WAL_RETRY_BLOCKED_MASK; + } +#else + assert( rc!=SQLITE_BUSY_TIMEOUT ); +#endif + assert( (rc&0xFF)!=SQLITE_BUSY||rc==SQLITE_BUSY||rc==SQLITE_BUSY_TIMEOUT ); + return (rc&0xFF)==SQLITE_BUSY ? WAL_RETRY : rc; } /* Now that the read-lock has been obtained, check that neither the ** value in the aReadMark[] array or the contents of the wal-index @@ -67486,7 +67874,7 @@ static int walBeginReadTransaction(Wal *pWal, int *pChanged){ #endif do{ - rc = walTryBeginRead(pWal, pChanged, 0, ++cnt); + rc = walTryBeginRead(pWal, pChanged, 0, &cnt); }while( rc==WAL_RETRY ); testcase( (rc&0xff)==SQLITE_BUSY ); testcase( (rc&0xff)==SQLITE_IOERR ); @@ -67667,6 +68055,7 @@ static int walFindFrame( iRead = iFrame; } if( (nCollide--)==0 ){ + *piRead = 0; return SQLITE_CORRUPT_BKPT; } iKey = walNextHash(iKey); @@ -67970,7 +68359,7 @@ static int walRestartLog(Wal *pWal){ cnt = 0; do{ int notUsed; - rc = walTryBeginRead(pWal, ¬Used, 1, ++cnt); + rc = walTryBeginRead(pWal, ¬Used, 1, &cnt); }while( rc==WAL_RETRY ); assert( (rc&0xff)!=SQLITE_BUSY ); /* BUSY not possible when useWal==1 */ testcase( (rc&0xff)==SQLITE_IOERR ); @@ -68391,10 +68780,9 @@ SQLITE_PRIVATE int sqlite3WalCheckpoint( if( pWal->readOnly ) return SQLITE_READONLY; WALTRACE(("WAL%p: checkpoint begins\n", pWal)); - /* Enable blocking locks, if possible. If blocking locks are successfully - ** enabled, set xBusy2=0 so that the busy-handler is never invoked. */ + /* Enable blocking locks, if possible. */ sqlite3WalDb(pWal, db); - (void)walEnableBlocking(pWal); + if( xBusy2 ) (void)walEnableBlocking(pWal); /* IMPLEMENTATION-OF: R-62028-47212 All calls obtain an exclusive ** "checkpoint" lock on the database file. @@ -68435,9 +68823,14 @@ SQLITE_PRIVATE int sqlite3WalCheckpoint( /* Read the wal-index header. */ SEH_TRY { if( rc==SQLITE_OK ){ + /* For a passive checkpoint, do not re-enable blocking locks after + ** reading the wal-index header. A passive checkpoint should not block + ** or invoke the busy handler. The only lock such a checkpoint may + ** attempt to obtain is a lock on a read-slot, and it should give up + ** immediately and do a partial checkpoint if it cannot obtain it. */ walDisableBlocking(pWal); rc = walIndexReadHdr(pWal, &isChanged); - (void)walEnableBlocking(pWal); + if( eMode2!=SQLITE_CHECKPOINT_PASSIVE ) (void)walEnableBlocking(pWal); if( isChanged && pWal->pDbFd->pMethods->iVersion>=3 ){ sqlite3OsUnfetch(pWal->pDbFd, 0, 0); } @@ -68774,7 +69167,7 @@ SQLITE_PRIVATE sqlite3_file *sqlite3WalFile(Wal *pWal){ ** 22 1 Min embedded payload fraction (must be 32) ** 23 1 Min leaf payload fraction (must be 32) ** 24 4 File change counter -** 28 4 Reserved for future use +** 28 4 The size of the database in pages ** 32 4 First freelist page ** 36 4 Number of freelist pages in the file ** 40 60 15 4-byte meta values passed to higher layers @@ -74901,7 +75294,6 @@ static int accessPayload( assert( aWrite>=pBufStart ); /* due to (6) */ memcpy(aSave, aWrite, 4); rc = sqlite3OsRead(fd, aWrite, a+4, (i64)pBt->pageSize*(nextPage-1)); - if( rc && nextPage>pBt->nPage ) rc = SQLITE_CORRUPT_BKPT; nextPage = get4byte(aWrite); memcpy(aWrite, aSave, 4); }else @@ -76021,7 +76413,10 @@ static SQLITE_NOINLINE int btreePrevious(BtCursor *pCur){ } pPage = pCur->pPage; - assert( pPage->isInit ); + if( sqlite3FaultSim(412) ) pPage->isInit = 0; + if( !pPage->isInit ){ + return SQLITE_CORRUPT_BKPT; + } if( !pPage->leaf ){ int idx = pCur->ix; rc = moveToChild(pCur, get4byte(findCell(pPage, idx))); @@ -83412,7 +83807,7 @@ static int valueFromFunction( #endif assert( pFunc ); if( (pFunc->funcFlags & (SQLITE_FUNC_CONSTANT|SQLITE_FUNC_SLOCHNG))==0 - || (pFunc->funcFlags & SQLITE_FUNC_NEEDCOLL) + || (pFunc->funcFlags & (SQLITE_FUNC_NEEDCOLL|SQLITE_FUNC_RUNONLY))!=0 ){ return SQLITE_OK; } @@ -84136,10 +84531,11 @@ static int growOpArray(Vdbe *v, int nOp){ ** sqlite3CantopenError(lineno) */ static void test_addop_breakpoint(int pc, Op *pOp){ - static int n = 0; + static u64 n = 0; (void)pc; (void)pOp; n++; + if( n==LARGEST_UINT64 ) abort(); /* so that n is used, preventing a warning */ } #endif @@ -85324,6 +85720,10 @@ static void freeP4(sqlite3 *db, int p4type, void *p4){ if( db->pnBytesFreed==0 ) sqlite3VtabUnlock((VTable *)p4); break; } + case P4_TABLEREF: { + if( db->pnBytesFreed==0 ) sqlite3DeleteTable(db, (Table*)p4); + break; + } } } @@ -85451,7 +85851,7 @@ static void SQLITE_NOINLINE vdbeChangeP4Full( int n ){ if( pOp->p4type ){ - freeP4(p->db, pOp->p4type, pOp->p4.p); + assert( pOp->p4type > P4_FREE_IF_LE ); pOp->p4type = 0; pOp->p4.p = 0; } @@ -89574,7 +89974,15 @@ SQLITE_API int sqlite3_clear_bindings(sqlite3_stmt *pStmt){ int rc = SQLITE_OK; Vdbe *p = (Vdbe*)pStmt; #if SQLITE_THREADSAFE - sqlite3_mutex *mutex = ((Vdbe*)pStmt)->db->mutex; + sqlite3_mutex *mutex; +#endif +#ifdef SQLITE_ENABLE_API_ARMOR + if( pStmt==0 ){ + return SQLITE_MISUSE_BKPT; + } +#endif +#if SQLITE_THREADSAFE + mutex = p->db->mutex; #endif sqlite3_mutex_enter(mutex); for(i=0; inVar; i++){ @@ -89953,6 +90361,18 @@ SQLITE_API void sqlite3_result_subtype(sqlite3_context *pCtx, unsigned int eSubt #ifdef SQLITE_ENABLE_API_ARMOR if( pCtx==0 ) return; #endif +#if defined(SQLITE_STRICT_SUBTYPE) && SQLITE_STRICT_SUBTYPE+0!=0 + if( pCtx->pFunc!=0 + && (pCtx->pFunc->funcFlags & SQLITE_RESULT_SUBTYPE)==0 + ){ + char zErr[200]; + sqlite3_snprintf(sizeof(zErr), zErr, + "misuse of sqlite3_result_subtype() by %s()", + pCtx->pFunc->zName); + sqlite3_result_error(pCtx, zErr, -1); + return; + } +#endif /* SQLITE_STRICT_SUBTYPE */ pOut = pCtx->pOut; assert( sqlite3_mutex_held(pOut->db->mutex) ); pOut->eSubtype = eSubtype & 0xff; @@ -90352,9 +90772,8 @@ SQLITE_API int sqlite3_step(sqlite3_stmt *pStmt){ SQLITE_API void *sqlite3_user_data(sqlite3_context *p){ #ifdef SQLITE_ENABLE_API_ARMOR if( p==0 ) return 0; -#else - assert( p && p->pFunc ); #endif + assert( p && p->pFunc ); return p->pFunc->pUserData; } @@ -92271,11 +92690,12 @@ SQLITE_API int sqlite3_found_count = 0; ** sqlite3CantopenError(lineno) */ static void test_trace_breakpoint(int pc, Op *pOp, Vdbe *v){ - static int n = 0; + static u64 n = 0; (void)pc; (void)pOp; (void)v; n++; + if( n==LARGEST_UINT64 ) abort(); /* So that n is used, preventing a warning */ } #endif @@ -94172,7 +94592,7 @@ case OP_AddImm: { /* in1 */ pIn1 = &aMem[pOp->p1]; memAboutToChange(p, pIn1); sqlite3VdbeMemIntegerify(pIn1); - pIn1->u.i += pOp->p2; + *(u64*)&pIn1->u.i += (u64)pOp->p2; break; } @@ -100318,24 +100738,23 @@ case OP_VCheck: { /* out2 */ pOut = &aMem[pOp->p2]; sqlite3VdbeMemSetNull(pOut); /* Innocent until proven guilty */ - assert( pOp->p4type==P4_TABLE ); + assert( pOp->p4type==P4_TABLEREF ); pTab = pOp->p4.pTab; assert( pTab!=0 ); + assert( pTab->nTabRef>0 ); assert( IsVirtual(pTab) ); - assert( pTab->u.vtab.p!=0 ); + if( pTab->u.vtab.p==0 ) break; pVtab = pTab->u.vtab.p->pVtab; assert( pVtab!=0 ); pModule = pVtab->pModule; assert( pModule!=0 ); assert( pModule->iVersion>=4 ); assert( pModule->xIntegrity!=0 ); - pTab->nTabRef++; sqlite3VtabLock(pTab->u.vtab.p); assert( pOp->p1>=0 && pOp->p1nDb ); rc = pModule->xIntegrity(pVtab, db->aDb[pOp->p1].zDbSName, pTab->zName, pOp->p3, &zErr); sqlite3VtabUnlock(pTab->u.vtab.p); - sqlite3DeleteTable(db, pTab); if( rc ){ sqlite3_free(zErr); goto abort_due_to_error; @@ -100460,6 +100879,7 @@ case OP_VColumn: { /* ncycle */ const sqlite3_module *pModule; Mem *pDest; sqlite3_context sContext; + FuncDef nullFunc; VdbeCursor *pCur = p->apCsr[pOp->p1]; assert( pCur!=0 ); @@ -100477,6 +100897,9 @@ case OP_VColumn: { /* ncycle */ memset(&sContext, 0, sizeof(sContext)); sContext.pOut = pDest; sContext.enc = encoding; + nullFunc.pUserData = 0; + nullFunc.funcFlags = SQLITE_RESULT_SUBTYPE; + sContext.pFunc = &nullFunc; assert( pOp->p5==OPFLAG_NOCHNG || pOp->p5==0 ); if( pOp->p5 & OPFLAG_NOCHNG ){ sqlite3VdbeMemSetNull(pDest); @@ -100809,6 +101232,42 @@ case OP_ClrSubtype: { /* in1 */ break; } +/* Opcode: GetSubtype P1 P2 * * * +** Synopsis: r[P2] = r[P1].subtype +** +** Extract the subtype value from register P1 and write that subtype +** into register P2. If P1 has no subtype, then P1 gets a NULL. +*/ +case OP_GetSubtype: { /* in1 out2 */ + pIn1 = &aMem[pOp->p1]; + pOut = &aMem[pOp->p2]; + if( pIn1->flags & MEM_Subtype ){ + sqlite3VdbeMemSetInt64(pOut, pIn1->eSubtype); + }else{ + sqlite3VdbeMemSetNull(pOut); + } + break; +} + +/* Opcode: SetSubtype P1 P2 * * * +** Synopsis: r[P2].subtype = r[P1] +** +** Set the subtype value of register P2 to the integer from register P1. +** If P1 is NULL, clear the subtype from p2. +*/ +case OP_SetSubtype: { /* in1 out2 */ + pIn1 = &aMem[pOp->p1]; + pOut = &aMem[pOp->p2]; + if( pIn1->flags & MEM_Null ){ + pOut->flags &= ~MEM_Subtype; + }else{ + assert( pIn1->flags & MEM_Int ); + pOut->flags |= MEM_Subtype; + pOut->eSubtype = (u8)(pIn1->u.i & 0xff); + } + break; +} + /* Opcode: FilterAdd P1 * P3 P4 * ** Synopsis: filter(P1) += key(P3@P4) ** @@ -105857,6 +106316,7 @@ SQLITE_PRIVATE Bitmask sqlite3ExprColUsed(Expr *pExpr){ assert( ExprUseYTab(pExpr) ); pExTab = pExpr->y.pTab; assert( pExTab!=0 ); + assert( n < pExTab->nCol ); if( (pExTab->tabFlags & TF_HasGenerated)!=0 && (pExTab->aCol[n].colFlags & COLFLAG_GENERATED)!=0 ){ @@ -106433,6 +106893,7 @@ static int lookupName( sqlite3RecordErrorOffsetOfExpr(pParse->db, pExpr); pParse->checkSchema = 1; pTopNC->nNcErr++; + eNewExprOp = TK_NULL; } assert( pFJMatch==0 ); @@ -106459,7 +106920,7 @@ static int lookupName( ** If a generated column is referenced, set bits for every column ** of the table. */ - if( pExpr->iColumn>=0 && pMatch!=0 ){ + if( pExpr->iColumn>=0 && cnt==1 && pMatch!=0 ){ pMatch->colUsed |= sqlite3ExprColUsed(pExpr); } @@ -106924,11 +107385,12 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ while( pNC2 && sqlite3ReferencesSrcList(pParse, pExpr, pNC2->pSrcList)==0 ){ - pExpr->op2++; + pExpr->op2 += (1 + pNC2->nNestedSelect); pNC2 = pNC2->pNext; } assert( pDef!=0 || IN_RENAME_OBJECT ); if( pNC2 && pDef ){ + pExpr->op2 += pNC2->nNestedSelect; assert( SQLITE_FUNC_MINMAX==NC_MinMaxAgg ); assert( SQLITE_FUNC_ANYORDER==NC_OrderAgg ); testcase( (pDef->funcFlags & SQLITE_FUNC_MINMAX)!=0 ); @@ -107487,6 +107949,7 @@ static int resolveSelectStep(Walker *pWalker, Select *p){ /* Recursively resolve names in all subqueries in the FROM clause */ + if( pOuterNC ) pOuterNC->nNestedSelect++; for(i=0; ipSrc->nSrc; i++){ SrcItem *pItem = &p->pSrc->a[i]; if( pItem->pSelect && (pItem->pSelect->selFlags & SF_Resolved)==0 ){ @@ -107511,6 +107974,9 @@ static int resolveSelectStep(Walker *pWalker, Select *p){ } } } + if( pOuterNC && ALWAYS(pOuterNC->nNestedSelect>0) ){ + pOuterNC->nNestedSelect--; + } /* Set up the local name-context to pass to sqlite3ResolveExprNames() to ** resolve the result-set expression list. @@ -109098,9 +109564,7 @@ SQLITE_PRIVATE void sqlite3ExprAddFunctionOrderBy( assert( ExprUseXList(pExpr) ); if( pExpr->x.pList==0 || NEVER(pExpr->x.pList->nExpr==0) ){ /* Ignore ORDER BY on zero-argument aggregates */ - sqlite3ParserAddCleanup(pParse, - (void(*)(sqlite3*,void*))sqlite3ExprListDelete, - pOrderBy); + sqlite3ParserAddCleanup(pParse, sqlite3ExprListDeleteGeneric, pOrderBy); return; } if( IsWindowFunc(pExpr) ){ @@ -109281,6 +109745,9 @@ static SQLITE_NOINLINE void sqlite3ExprDeleteNN(sqlite3 *db, Expr *p){ SQLITE_PRIVATE void sqlite3ExprDelete(sqlite3 *db, Expr *p){ if( p ) sqlite3ExprDeleteNN(db, p); } +SQLITE_PRIVATE void sqlite3ExprDeleteGeneric(sqlite3 *db, void *p){ + if( ALWAYS(p) ) sqlite3ExprDeleteNN(db, (Expr*)p); +} /* ** Clear both elements of an OnOrUsing object @@ -109306,9 +109773,7 @@ SQLITE_PRIVATE void sqlite3ClearOnOrUsing(sqlite3 *db, OnOrUsing *p){ ** pExpr to the pParse->pConstExpr list with a register number of 0. */ SQLITE_PRIVATE void sqlite3ExprDeferredDelete(Parse *pParse, Expr *pExpr){ - sqlite3ParserAddCleanup(pParse, - (void(*)(sqlite3*,void*))sqlite3ExprDelete, - pExpr); + sqlite3ParserAddCleanup(pParse, sqlite3ExprDeleteGeneric, pExpr); } /* Invoke sqlite3RenameExprUnmap() and sqlite3ExprDelete() on the @@ -110114,6 +110579,9 @@ static SQLITE_NOINLINE void exprListDeleteNN(sqlite3 *db, ExprList *pList){ SQLITE_PRIVATE void sqlite3ExprListDelete(sqlite3 *db, ExprList *pList){ if( pList ) exprListDeleteNN(db, pList); } +SQLITE_PRIVATE void sqlite3ExprListDeleteGeneric(sqlite3 *db, void *pList){ + if( ALWAYS(pList) ) exprListDeleteNN(db, (ExprList*)pList); +} /* ** Return the bitwise-OR of all Expr.flags fields in the given @@ -110613,9 +111081,10 @@ SQLITE_PRIVATE int sqlite3ExprCanBeNull(const Expr *p){ case TK_COLUMN: assert( ExprUseYTab(p) ); return ExprHasProperty(p, EP_CanBeNull) || - p->y.pTab==0 || /* Reference to column of index on expression */ + NEVER(p->y.pTab==0) || /* Reference to column of index on expr */ (p->iColumn>=0 && p->y.pTab->aCol!=0 /* Possible due to prior error */ + && ALWAYS(p->iColumny.pTab->nCol) && p->y.pTab->aCol[p->iColumn].notNull==0); default: return 1; @@ -113197,8 +113666,10 @@ SQLITE_PRIVATE void sqlite3ExprCode(Parse *pParse, Expr *pExpr, int target){ inReg = sqlite3ExprCodeTarget(pParse, pExpr, target); if( inReg!=target ){ u8 op; - if( ALWAYS(pExpr) - && (ExprHasProperty(pExpr,EP_Subquery) || pExpr->op==TK_REGISTER) + Expr *pX = sqlite3ExprSkipCollateAndLikely(pExpr); + testcase( pX!=pExpr ); + if( ALWAYS(pX) + && (ExprHasProperty(pX,EP_Subquery) || pX->op==TK_REGISTER) ){ op = OP_Copy; }else{ @@ -113918,8 +114389,8 @@ SQLITE_PRIVATE int sqlite3ExprListCompare(const ExprList *pA, const ExprList *pB */ SQLITE_PRIVATE int sqlite3ExprCompareSkip(Expr *pA,Expr *pB, int iTab){ return sqlite3ExprCompare(0, - sqlite3ExprSkipCollateAndLikely(pA), - sqlite3ExprSkipCollateAndLikely(pB), + sqlite3ExprSkipCollate(pA), + sqlite3ExprSkipCollate(pB), iTab); } @@ -114644,13 +115115,14 @@ static int analyzeAggregate(Walker *pWalker, Expr *pExpr){ case TK_AGG_FUNCTION: { if( (pNC->ncFlags & NC_InAggFunc)==0 && pWalker->walkerDepth==pExpr->op2 + && pExpr->pAggInfo==0 ){ /* Check to see if pExpr is a duplicate of another aggregate ** function that is already in the pAggInfo structure */ struct AggInfo_func *pItem = pAggInfo->aFunc; for(i=0; inFunc; i++, pItem++){ - if( pItem->pFExpr==pExpr ) break; + if( NEVER(pItem->pFExpr==pExpr) ) break; if( sqlite3ExprCompare(0, pItem->pFExpr, pExpr, -1)==0 ){ break; } @@ -114693,6 +115165,8 @@ static int analyzeAggregate(Walker *pWalker, Expr *pExpr){ }else{ pItem->bOBPayload = 1; } + pItem->bUseSubtype = + (pItem->pFunc->funcFlags & SQLITE_SUBTYPE)!=0; }else{ pItem->iOBTab = -1; } @@ -117459,9 +117933,9 @@ static void openStatTable( typedef struct StatAccum StatAccum; typedef struct StatSample StatSample; struct StatSample { - tRowcnt *anEq; /* sqlite_stat4.nEq */ tRowcnt *anDLt; /* sqlite_stat4.nDLt */ #ifdef SQLITE_ENABLE_STAT4 + tRowcnt *anEq; /* sqlite_stat4.nEq */ tRowcnt *anLt; /* sqlite_stat4.nLt */ union { i64 iRowid; /* Rowid in main table of the key */ @@ -117619,9 +118093,9 @@ static void statInit( /* Allocate the space required for the StatAccum object */ n = sizeof(*p) - + sizeof(tRowcnt)*nColUp /* StatAccum.anEq */ - + sizeof(tRowcnt)*nColUp; /* StatAccum.anDLt */ + + sizeof(tRowcnt)*nColUp; /* StatAccum.anDLt */ #ifdef SQLITE_ENABLE_STAT4 + n += sizeof(tRowcnt)*nColUp; /* StatAccum.anEq */ if( mxSample ){ n += sizeof(tRowcnt)*nColUp /* StatAccum.anLt */ + sizeof(StatSample)*(nCol+mxSample) /* StatAccum.aBest[], a[] */ @@ -117642,9 +118116,9 @@ static void statInit( p->nKeyCol = nKeyCol; p->nSkipAhead = 0; p->current.anDLt = (tRowcnt*)&p[1]; - p->current.anEq = &p->current.anDLt[nColUp]; #ifdef SQLITE_ENABLE_STAT4 + p->current.anEq = &p->current.anDLt[nColUp]; p->mxSample = p->nLimit==0 ? mxSample : 0; if( mxSample ){ u8 *pSpace; /* Allocated space not yet assigned */ @@ -117911,7 +118385,9 @@ static void statPush( if( p->nRow==0 ){ /* This is the first call to this function. Do initialization. */ +#ifdef SQLITE_ENABLE_STAT4 for(i=0; inCol; i++) p->current.anEq[i] = 1; +#endif }else{ /* Second and subsequent calls get processed here */ #ifdef SQLITE_ENABLE_STAT4 @@ -117920,15 +118396,17 @@ static void statPush( /* Update anDLt[], anLt[] and anEq[] to reflect the values that apply ** to the current row of the index. */ +#ifdef SQLITE_ENABLE_STAT4 for(i=0; icurrent.anEq[i]++; } +#endif for(i=iChng; inCol; i++){ p->current.anDLt[i]++; #ifdef SQLITE_ENABLE_STAT4 if( p->mxSample ) p->current.anLt[i] += p->current.anEq[i]; -#endif p->current.anEq[i] = 1; +#endif } } @@ -118062,7 +118540,9 @@ static void statGet( u64 iVal = (p->nRow + nDistinct - 1) / nDistinct; if( iVal==2 && p->nRow*10 <= nDistinct*11 ) iVal = 1; sqlite3_str_appendf(&sStat, " %llu", iVal); +#ifdef SQLITE_ENABLE_STAT4 assert( p->current.anEq[i] ); +#endif } sqlite3ResultStrAccum(context, &sStat); } @@ -118751,6 +119231,16 @@ static void decodeIntArray( while( z[0]!=0 && z[0]!=' ' ) z++; while( z[0]==' ' ) z++; } + + /* Set the bLowQual flag if the peak number of rows obtained + ** from a full equality match is so large that a full table scan + ** seems likely to be faster than using the index. + */ + if( aLog[0] > 66 /* Index has more than 100 rows */ + && aLog[0] <= aLog[nOut-1] /* And only a single value seen */ + ){ + pIndex->bLowQual = 1; + } } } @@ -120797,7 +121287,7 @@ SQLITE_PRIVATE void sqlite3ColumnSetExpr( */ SQLITE_PRIVATE Expr *sqlite3ColumnExpr(Table *pTab, Column *pCol){ if( pCol->iDflt==0 ) return 0; - if( NEVER(!IsOrdinaryTable(pTab)) ) return 0; + if( !IsOrdinaryTable(pTab) ) return 0; if( NEVER(pTab->u.tab.pDfltList==0) ) return 0; if( NEVER(pTab->u.tab.pDfltList->nExpriDflt) ) return 0; return pTab->u.tab.pDfltList->a[pCol->iDflt-1].pExpr; @@ -120950,6 +121440,9 @@ SQLITE_PRIVATE void sqlite3DeleteTable(sqlite3 *db, Table *pTable){ if( db->pnBytesFreed==0 && (--pTable->nTabRef)>0 ) return; deleteTable(db, pTable); } +SQLITE_PRIVATE void sqlite3DeleteTableGeneric(sqlite3 *db, void *pTable){ + sqlite3DeleteTable(db, (Table*)pTable); +} /* @@ -121487,7 +121980,8 @@ SQLITE_PRIVATE void sqlite3ColumnPropertiesFromName(Table *pTab, Column *pCol){ /* ** Clean up the data structures associated with the RETURNING clause. */ -static void sqlite3DeleteReturning(sqlite3 *db, Returning *pRet){ +static void sqlite3DeleteReturning(sqlite3 *db, void *pArg){ + Returning *pRet = (Returning*)pArg; Hash *pHash; pHash = &(db->aDb[1].pSchema->trigHash); sqlite3HashInsert(pHash, pRet->zName, 0); @@ -121529,8 +122023,7 @@ SQLITE_PRIVATE void sqlite3AddReturning(Parse *pParse, ExprList *pList){ pParse->u1.pReturning = pRet; pRet->pParse = pParse; pRet->pReturnEL = pList; - sqlite3ParserAddCleanup(pParse, - (void(*)(sqlite3*,void*))sqlite3DeleteReturning, pRet); + sqlite3ParserAddCleanup(pParse, sqlite3DeleteReturning, pRet); testcase( pParse->earlyCleanup ); if( db->mallocFailed ) return; sqlite3_snprintf(sizeof(pRet->zName), pRet->zName, @@ -121729,7 +122222,8 @@ SQLITE_PRIVATE char sqlite3AffinityType(const char *zIn, Column *pCol){ assert( zIn!=0 ); while( zIn[0] ){ - h = (h<<8) + sqlite3UpperToLower[(*zIn)&0xff]; + u8 x = *(u8*)zIn; + h = (h<<8) + sqlite3UpperToLower[x]; zIn++; if( h==(('c'<<24)+('h'<<16)+('a'<<8)+'r') ){ /* CHAR */ aff = SQLITE_AFF_TEXT; @@ -125594,7 +126088,7 @@ SQLITE_PRIVATE void sqlite3Reindex(Parse *pParse, Token *pName1, Token *pName2){ if( iDb<0 ) return; z = sqlite3NameFromToken(db, pObjName); if( z==0 ) return; - zDb = db->aDb[iDb].zDbSName; + zDb = pName2->n ? db->aDb[iDb].zDbSName : 0; pTab = sqlite3FindTable(db, z, zDb); if( pTab ){ reindexTable(pParse, pTab, 0); @@ -125604,6 +126098,7 @@ SQLITE_PRIVATE void sqlite3Reindex(Parse *pParse, Token *pName1, Token *pName2){ pIndex = sqlite3FindIndex(db, z, zDb); sqlite3DbFree(db, z); if( pIndex ){ + iDb = sqlite3SchemaToIndex(db, pIndex->pTable->pSchema); sqlite3BeginWriteOperation(pParse, 0, iDb); sqlite3RefillIndex(pParse, pIndex, -1); return; @@ -125769,6 +126264,9 @@ SQLITE_PRIVATE void sqlite3WithDelete(sqlite3 *db, With *pWith){ sqlite3DbFree(db, pWith); } } +SQLITE_PRIVATE void sqlite3WithDeleteGeneric(sqlite3 *db, void *pWith){ + sqlite3WithDelete(db, (With*)pWith); +} #endif /* !defined(SQLITE_OMIT_CTE) */ /************** End of build.c ***********************************************/ @@ -138994,7 +139492,8 @@ SQLITE_PRIVATE void sqlite3Pragma( if( pVTab->pModule->iVersion<4 ) continue; if( pVTab->pModule->xIntegrity==0 ) continue; sqlite3VdbeAddOp3(v, OP_VCheck, i, 3, isQuick); - sqlite3VdbeAppendP4(v, pTab, P4_TABLE); + pTab->nTabRef++; + sqlite3VdbeAppendP4(v, pTab, P4_TABLEREF); a1 = sqlite3VdbeAddOp1(v, OP_IsNull, 3); VdbeCoverage(v); integrityCheckResultRow(v); sqlite3VdbeJumpHere(v, a1); @@ -141021,6 +141520,7 @@ static int sqlite3LockAndPrepare( assert( (rc&db->errMask)==rc ); db->busyHandler.nBusy = 0; sqlite3_mutex_leave(db->mutex); + assert( rc==SQLITE_OK || (*ppStmt)==0 ); return rc; } @@ -141418,6 +141918,9 @@ SQLITE_PRIVATE Select *sqlite3SelectNew( SQLITE_PRIVATE void sqlite3SelectDelete(sqlite3 *db, Select *p){ if( OK_IF_ALWAYS_TRUE(p) ) clearSelect(db, p, 1); } +SQLITE_PRIVATE void sqlite3SelectDeleteGeneric(sqlite3 *db, void *p){ + if( ALWAYS(p) ) clearSelect(db, (Select*)p, 1); +} /* ** Return a pointer to the right-most SELECT statement in a compound. @@ -143553,7 +144056,8 @@ SQLITE_PRIVATE void sqlite3SubqueryColumnTypes( NameContext sNC; assert( pSelect!=0 ); - assert( (pSelect->selFlags & SF_Resolved)!=0 ); + testcase( (pSelect->selFlags & SF_Resolved)==0 ); + assert( (pSelect->selFlags & SF_Resolved)!=0 || IN_RENAME_OBJECT ); assert( pTab->nCol==pSelect->pEList->nExpr || pParse->nErr>0 ); assert( aff==SQLITE_AFF_NONE || aff==SQLITE_AFF_BLOB ); if( db->mallocFailed || IN_RENAME_OBJECT ) return; @@ -144437,9 +144941,7 @@ static int multiSelect( pDest->iSdst = dest.iSdst; pDest->nSdst = dest.nSdst; if( pDelete ){ - sqlite3ParserAddCleanup(pParse, - (void(*)(sqlite3*,void*))sqlite3SelectDelete, - pDelete); + sqlite3ParserAddCleanup(pParse, sqlite3SelectDeleteGeneric, pDelete); } return rc; } @@ -144990,8 +145492,7 @@ static int multiSelectOrderBy( /* Make arrangements to free the 2nd and subsequent arms of the compound ** after the parse has finished */ if( pSplit->pPrior ){ - sqlite3ParserAddCleanup(pParse, - (void(*)(sqlite3*,void*))sqlite3SelectDelete, pSplit->pPrior); + sqlite3ParserAddCleanup(pParse, sqlite3SelectDeleteGeneric, pSplit->pPrior); } pSplit->pPrior = pPrior; pPrior->pNext = pSplit; @@ -145812,9 +146313,7 @@ static int flattenSubquery( Table *pTabToDel = pSubitem->pTab; if( pTabToDel->nTabRef==1 ){ Parse *pToplevel = sqlite3ParseToplevel(pParse); - sqlite3ParserAddCleanup(pToplevel, - (void(*)(sqlite3*,void*))sqlite3DeleteTable, - pTabToDel); + sqlite3ParserAddCleanup(pToplevel, sqlite3DeleteTableGeneric, pTabToDel); testcase( pToplevel->earlyCleanup ); }else{ pTabToDel->nTabRef--; @@ -146861,8 +147360,7 @@ static struct Cte *searchWith( SQLITE_PRIVATE With *sqlite3WithPush(Parse *pParse, With *pWith, u8 bFree){ if( pWith ){ if( bFree ){ - pWith = (With*)sqlite3ParserAddCleanup(pParse, - (void(*)(sqlite3*,void*))sqlite3WithDelete, + pWith = (With*)sqlite3ParserAddCleanup(pParse, sqlite3WithDeleteGeneric, pWith); if( pWith==0 ) return 0; } @@ -147606,10 +148104,11 @@ static void selectAddSubqueryTypeInfo(Walker *pWalker, Select *p){ SrcList *pTabList; SrcItem *pFrom; - assert( p->selFlags & SF_Resolved ); if( p->selFlags & SF_HasTypeInfo ) return; p->selFlags |= SF_HasTypeInfo; pParse = pWalker->pParse; + testcase( (p->selFlags & SF_Resolved)==0 ); + assert( (p->selFlags & SF_Resolved) || IN_RENAME_OBJECT ); pTabList = p->pSrc; for(i=0, pFrom=pTabList->a; inSrc; i++, pFrom++){ Table *pTab = pFrom->pTab; @@ -147894,6 +148393,7 @@ static void resetAccumulator(Parse *pParse, AggInfo *pAggInfo){ assert( pFunc->pFExpr->pLeft!=0 ); assert( pFunc->pFExpr->pLeft->op==TK_ORDER ); assert( ExprUseXList(pFunc->pFExpr->pLeft) ); + assert( pFunc->pFunc!=0 ); pOBList = pFunc->pFExpr->pLeft->x.pList; if( !pFunc->bOBUnique ){ nExtra++; /* One extra column for the OP_Sequence */ @@ -147903,6 +148403,9 @@ static void resetAccumulator(Parse *pParse, AggInfo *pAggInfo){ assert( ExprUseXList(pFunc->pFExpr) ); nExtra += pFunc->pFExpr->x.pList->nExpr; } + if( pFunc->bUseSubtype ){ + nExtra += pFunc->pFExpr->x.pList->nExpr; + } pKeyInfo = sqlite3KeyInfoFromExprList(pParse, pOBList, 0, nExtra); if( !pFunc->bOBUnique && pParse->nErr==0 ){ pKeyInfo->nKeyField++; @@ -147929,9 +148432,9 @@ static void finalizeAggFunctions(Parse *pParse, AggInfo *pAggInfo){ assert( ExprUseXList(pF->pFExpr) ); pList = pF->pFExpr->x.pList; if( pF->iOBTab>=0 ){ - /* For an ORDER BY aggregate, calls to OP_AggStep where deferred and - ** all content was stored in emphermal table pF->iOBTab. Extract that - ** content now (in ORDER BY order) and make all calls to OP_AggStep + /* For an ORDER BY aggregate, calls to OP_AggStep were deferred. Inputs + ** were stored in emphermal table pF->iOBTab. Here, we extract those + ** inputs (in ORDER BY order) and make all calls to OP_AggStep ** before doing the OP_AggFinal call. */ int iTop; /* Start of loop for extracting columns */ int nArg; /* Number of columns to extract */ @@ -147939,6 +148442,7 @@ static void finalizeAggFunctions(Parse *pParse, AggInfo *pAggInfo){ int regAgg; /* Extract into this array */ int j; /* Loop counter */ + assert( pF->pFunc!=0 ); nArg = pList->nExpr; regAgg = sqlite3GetTempRange(pParse, nArg); @@ -147955,6 +148459,15 @@ static void finalizeAggFunctions(Parse *pParse, AggInfo *pAggInfo){ for(j=nArg-1; j>=0; j--){ sqlite3VdbeAddOp3(v, OP_Column, pF->iOBTab, nKey+j, regAgg+j); } + if( pF->bUseSubtype ){ + int regSubtype = sqlite3GetTempReg(pParse); + int iBaseCol = nKey + nArg + (pF->bOBPayload==0 && pF->bOBUnique==0); + for(j=nArg-1; j>=0; j--){ + sqlite3VdbeAddOp3(v, OP_Column, pF->iOBTab, iBaseCol+j, regSubtype); + sqlite3VdbeAddOp2(v, OP_SetSubtype, regSubtype, regAgg+j); + } + sqlite3ReleaseTempReg(pParse, regSubtype); + } sqlite3VdbeAddOp3(v, OP_AggStep, 0, regAgg, AggInfoFuncReg(pAggInfo,i)); sqlite3VdbeAppendP4(v, pF->pFunc, P4_FUNCDEF); sqlite3VdbeChangeP5(v, (u8)nArg); @@ -148009,6 +148522,7 @@ static void updateAccumulator( ExprList *pList; assert( ExprUseXList(pF->pFExpr) ); assert( !IsWindowFunc(pF->pFExpr) ); + assert( pF->pFunc!=0 ); pList = pF->pFExpr->x.pList; if( ExprHasProperty(pF->pFExpr, EP_WinFunc) ){ Expr *pFilter = pF->pFExpr->y.pWin->pFilter; @@ -148053,6 +148567,9 @@ static void updateAccumulator( if( pF->bOBPayload ){ regAggSz += nArg; } + if( pF->bUseSubtype ){ + regAggSz += nArg; + } regAggSz++; /* One extra register to hold result of MakeRecord */ regAgg = sqlite3GetTempRange(pParse, regAggSz); regDistinct = regAgg; @@ -148065,6 +148582,14 @@ static void updateAccumulator( if( pF->bOBPayload ){ regDistinct = regAgg+jj; sqlite3ExprCodeExprList(pParse, pList, regDistinct, 0, SQLITE_ECEL_DUP); + jj += nArg; + } + if( pF->bUseSubtype ){ + int kk; + int regBase = pF->bOBPayload ? regDistinct : regAgg; + for(kk=0; kknExpr; @@ -148269,7 +148794,8 @@ static SrcItem *isSelfJoinView( /* ** Deallocate a single AggInfo object */ -static void agginfoFree(sqlite3 *db, AggInfo *p){ +static void agginfoFree(sqlite3 *db, void *pArg){ + AggInfo *p = (AggInfo*)pArg; sqlite3DbFree(db, p->aCol); sqlite3DbFree(db, p->aFunc); sqlite3DbFreeNN(db, p); @@ -148343,7 +148869,7 @@ static int countOfViewOptimization(Parse *pParse, Select *p){ pSub->selFlags |= SF_Aggregate; pSub->selFlags &= ~SF_Compound; pSub->nSelectRow = 0; - sqlite3ExprListDelete(db, pSub->pEList); + sqlite3ParserAddCleanup(pParse, sqlite3ExprListDeleteGeneric, pSub->pEList); pTerm = pPrior ? sqlite3ExprDup(db, pCount, 0) : pCount; pSub->pEList = sqlite3ExprListAppend(pParse, 0, pTerm); pTerm = sqlite3PExpr(pParse, TK_SELECT, 0, 0); @@ -148523,9 +149049,8 @@ SQLITE_PRIVATE int sqlite3Select( sqlite3TreeViewExprList(0, p->pOrderBy, 0, "ORDERBY"); } #endif - sqlite3ParserAddCleanup(pParse, - (void(*)(sqlite3*,void*))sqlite3ExprListDelete, - p->pOrderBy); + sqlite3ParserAddCleanup(pParse, sqlite3ExprListDeleteGeneric, + p->pOrderBy); testcase( pParse->earlyCleanup ); p->pOrderBy = 0; } @@ -148631,6 +149156,7 @@ SQLITE_PRIVATE int sqlite3Select( TREETRACE(0x1000,pParse,p, ("LEFT-JOIN simplifies to JOIN on term %d\n",i)); pItem->fg.jointype &= ~(JT_LEFT|JT_OUTER); + unsetJoinExpr(p->pWhere, pItem->iCursor, 0); } } if( pItem->fg.jointype & JT_LTORJ ){ @@ -148645,17 +149171,15 @@ SQLITE_PRIVATE int sqlite3Select( TREETRACE(0x1000,pParse,p, ("RIGHT-JOIN simplifies to JOIN on term %d\n",j)); pI2->fg.jointype &= ~(JT_RIGHT|JT_OUTER); + unsetJoinExpr(p->pWhere, pI2->iCursor, 1); } } } - for(j=pTabList->nSrc-1; j>=i; j--){ + for(j=pTabList->nSrc-1; j>=0; j--){ pTabList->a[j].fg.jointype &= ~JT_LTORJ; if( pTabList->a[j].fg.jointype & JT_RIGHT ) break; } } - assert( pItem->iCursor>=0 ); - unsetJoinExpr(p->pWhere, pItem->iCursor, - pTabList->a[0].fg.jointype & JT_LTORJ); } /* No further action if this term of the FROM clause is not a subquery */ @@ -148718,9 +149242,8 @@ SQLITE_PRIVATE int sqlite3Select( ){ TREETRACE(0x800,pParse,p, ("omit superfluous ORDER BY on %r FROM-clause subquery\n",i+1)); - sqlite3ParserAddCleanup(pParse, - (void(*)(sqlite3*,void*))sqlite3ExprListDelete, - pSub->pOrderBy); + sqlite3ParserAddCleanup(pParse, sqlite3ExprListDeleteGeneric, + pSub->pOrderBy); pSub->pOrderBy = 0; } @@ -149249,8 +149772,7 @@ SQLITE_PRIVATE int sqlite3Select( */ pAggInfo = sqlite3DbMallocZero(db, sizeof(*pAggInfo) ); if( pAggInfo ){ - sqlite3ParserAddCleanup(pParse, - (void(*)(sqlite3*,void*))agginfoFree, pAggInfo); + sqlite3ParserAddCleanup(pParse, agginfoFree, pAggInfo); testcase( pParse->earlyCleanup ); } if( db->mallocFailed ){ @@ -153899,7 +154421,6 @@ SQLITE_PRIVATE void sqlite3VtabUnlockList(sqlite3 *db){ if( p ){ db->pDisconnect = 0; - sqlite3ExpirePreparedStatements(db, 0); do { VTable *pNext = p->pNext; sqlite3VtabUnlock(p); @@ -155465,7 +155986,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereGetMask(WhereMaskSet*,int); #ifdef WHERETRACE_ENABLED SQLITE_PRIVATE void sqlite3WhereClausePrint(WhereClause *pWC); SQLITE_PRIVATE void sqlite3WhereTermPrint(WhereTerm *pTerm, int iTerm); -SQLITE_PRIVATE void sqlite3WhereLoopPrint(WhereLoop *p, WhereClause *pWC); +SQLITE_PRIVATE void sqlite3WhereLoopPrint(const WhereLoop *p, const WhereClause *pWC); #endif SQLITE_PRIVATE WhereTerm *sqlite3WhereFindTerm( WhereClause *pWC, /* The WHERE clause to be searched */ @@ -160927,12 +161448,22 @@ static void translateColumnToCopy( for(; iStartp1!=iTabCur ) continue; if( pOp->opcode==OP_Column ){ +#ifdef SQLITE_DEBUG + if( pParse->db->flags & SQLITE_VdbeAddopTrace ){ + printf("TRANSLATE OP_Column to OP_Copy at %d\n", iStart); + } +#endif pOp->opcode = OP_Copy; pOp->p1 = pOp->p2 + iRegister; pOp->p2 = pOp->p3; pOp->p3 = 0; pOp->p5 = 2; /* Cause the MEM_Subtype flag to be cleared */ }else if( pOp->opcode==OP_Rowid ){ +#ifdef SQLITE_DEBUG + if( pParse->db->flags & SQLITE_VdbeAddopTrace ){ + printf("TRANSLATE OP_Rowid to OP_Sequence at %d\n", iStart); + } +#endif pOp->opcode = OP_Sequence; pOp->p1 = iAutoidxCur; #ifdef SQLITE_ALLOW_ROWID_IN_VIEW @@ -162259,7 +162790,8 @@ static int whereRangeScanEst( ** sample, then assume they are 4x more selective. This brings ** the estimated selectivity more in line with what it would be ** if estimated without the use of STAT4 tables. */ - if( iLwrIdx==iUprIdx ) nNew -= 20; assert( 20==sqlite3LogEst(4) ); + if( iLwrIdx==iUprIdx ){ nNew -= 20; } + assert( 20==sqlite3LogEst(4) ); }else{ nNew = 10; assert( 10==sqlite3LogEst(2) ); } @@ -162483,17 +163015,34 @@ SQLITE_PRIVATE void sqlite3WhereClausePrint(WhereClause *pWC){ #ifdef WHERETRACE_ENABLED /* ** Print a WhereLoop object for debugging purposes -*/ -SQLITE_PRIVATE void sqlite3WhereLoopPrint(WhereLoop *p, WhereClause *pWC){ - WhereInfo *pWInfo = pWC->pWInfo; - int nb = 1+(pWInfo->pTabList->nSrc+3)/4; - SrcItem *pItem = pWInfo->pTabList->a + p->iTab; - Table *pTab = pItem->pTab; - Bitmask mAll = (((Bitmask)1)<<(nb*4)) - 1; - sqlite3DebugPrintf("%c%2d.%0*llx.%0*llx", p->cId, - p->iTab, nb, p->maskSelf, nb, p->prereq & mAll); - sqlite3DebugPrintf(" %12s", - pItem->zAlias ? pItem->zAlias : pTab->zName); +** +** Format example: +** +** .--- Position in WHERE clause rSetup, rRun, nOut ---. +** | | +** | .--- selfMask nTerm ------. | +** | | | | +** | | .-- prereq Idx wsFlags----. | | +** | | | Name | | | +** | | | __|__ nEq ---. ___|__ | __|__ +** | / \ / \ / \ | / \ / \ / \ +** 1.002.001 t2.t2xy 2 f 010241 N 2 cost 0,56,31 +*/ +SQLITE_PRIVATE void sqlite3WhereLoopPrint(const WhereLoop *p, const WhereClause *pWC){ + if( pWC ){ + WhereInfo *pWInfo = pWC->pWInfo; + int nb = 1+(pWInfo->pTabList->nSrc+3)/4; + SrcItem *pItem = pWInfo->pTabList->a + p->iTab; + Table *pTab = pItem->pTab; + Bitmask mAll = (((Bitmask)1)<<(nb*4)) - 1; + sqlite3DebugPrintf("%c%2d.%0*llx.%0*llx", p->cId, + p->iTab, nb, p->maskSelf, nb, p->prereq & mAll); + sqlite3DebugPrintf(" %12s", + pItem->zAlias ? pItem->zAlias : pTab->zName); + }else{ + sqlite3DebugPrintf("%c%2d.%03llx.%03llx %c%d", + p->cId, p->iTab, p->maskSelf, p->prereq & 0xfff, p->cId, p->iTab); + } if( (p->wsFlags & WHERE_VIRTUALTABLE)==0 ){ const char *zName; if( p->u.btree.pIndex && (zName = p->u.btree.pIndex->zName)!=0 ){ @@ -162530,6 +163079,15 @@ SQLITE_PRIVATE void sqlite3WhereLoopPrint(WhereLoop *p, WhereClause *pWC){ } } } +SQLITE_PRIVATE void sqlite3ShowWhereLoop(const WhereLoop *p){ + if( p ) sqlite3WhereLoopPrint(p, 0); +} +SQLITE_PRIVATE void sqlite3ShowWhereLoopList(const WhereLoop *p){ + while( p ){ + sqlite3ShowWhereLoop(p); + p = p->pNextLoop; + } +} #endif /* @@ -162642,46 +163200,60 @@ static void whereInfoFree(sqlite3 *db, WhereInfo *pWInfo){ } /* -** Return TRUE if all of the following are true: +** Return TRUE if X is a proper subset of Y but is of equal or less cost. +** In other words, return true if all constraints of X are also part of Y +** and Y has additional constraints that might speed the search that X lacks +** but the cost of running X is not more than the cost of running Y. +** +** In other words, return true if the cost relationwship between X and Y +** is inverted and needs to be adjusted. +** +** Case 1: ** -** (1) X has the same or lower cost, or returns the same or fewer rows, -** than Y. -** (2) X uses fewer WHERE clause terms than Y -** (3) Every WHERE clause term used by X is also used by Y -** (4) X skips at least as many columns as Y -** (5) If X is a covering index, than Y is too +** (1a) X and Y use the same index. +** (1b) X has fewer == terms than Y +** (1c) Neither X nor Y use skip-scan +** (1d) X does not have a a greater cost than Y ** -** Conditions (2) and (3) mean that X is a "proper subset" of Y. -** If X is a proper subset of Y then Y is a better choice and ought -** to have a lower cost. This routine returns TRUE when that cost -** relationship is inverted and needs to be adjusted. Constraint (4) -** was added because if X uses skip-scan less than Y it still might -** deserve a lower cost even if it is a proper subset of Y. Constraint (5) -** was added because a covering index probably deserves to have a lower cost -** than a non-covering index even if it is a proper subset. +** Case 2: +** +** (2a) X has the same or lower cost, or returns the same or fewer rows, +** than Y. +** (2b) X uses fewer WHERE clause terms than Y +** (2c) Every WHERE clause term used by X is also used by Y +** (2d) X skips at least as many columns as Y +** (2e) If X is a covering index, than Y is too */ static int whereLoopCheaperProperSubset( const WhereLoop *pX, /* First WhereLoop to compare */ const WhereLoop *pY /* Compare against this WhereLoop */ ){ int i, j; + if( pX->rRun>pY->rRun && pX->nOut>pY->nOut ) return 0; /* (1d) and (2a) */ + assert( (pX->wsFlags & WHERE_VIRTUALTABLE)==0 ); + assert( (pY->wsFlags & WHERE_VIRTUALTABLE)==0 ); + if( pX->u.btree.nEq < pY->u.btree.nEq /* (1b) */ + && pX->u.btree.pIndex==pY->u.btree.pIndex /* (1a) */ + && pX->nSkip==0 && pY->nSkip==0 /* (1c) */ + ){ + return 1; /* Case 1 is true */ + } if( pX->nLTerm-pX->nSkip >= pY->nLTerm-pY->nSkip ){ - return 0; /* X is not a subset of Y */ + return 0; /* (2b) */ } - if( pX->rRun>pY->rRun && pX->nOut>pY->nOut ) return 0; - if( pY->nSkip > pX->nSkip ) return 0; + if( pY->nSkip > pX->nSkip ) return 0; /* (2d) */ for(i=pX->nLTerm-1; i>=0; i--){ if( pX->aLTerm[i]==0 ) continue; for(j=pY->nLTerm-1; j>=0; j--){ if( pY->aLTerm[j]==pX->aLTerm[i] ) break; } - if( j<0 ) return 0; /* X not a subset of Y since term X[i] not used by Y */ + if( j<0 ) return 0; /* (2c) */ } if( (pX->wsFlags&WHERE_IDX_ONLY)!=0 && (pY->wsFlags&WHERE_IDX_ONLY)==0 ){ - return 0; /* Constraint (5) */ + return 0; /* (2e) */ } - return 1; /* All conditions meet */ + return 1; /* Case 2 is true */ } /* @@ -163171,7 +163743,10 @@ static int whereLoopAddBtreeIndex( assert( pNew->u.btree.nBtm==0 ); opMask = WO_EQ|WO_IN|WO_GT|WO_GE|WO_LT|WO_LE|WO_ISNULL|WO_IS; } - if( pProbe->bUnordered ) opMask &= ~(WO_GT|WO_GE|WO_LT|WO_LE); + if( pProbe->bUnordered || pProbe->bLowQual ){ + if( pProbe->bUnordered ) opMask &= ~(WO_GT|WO_GE|WO_LT|WO_LE); + if( pProbe->bLowQual ) opMask &= ~(WO_EQ|WO_IN|WO_IS); + } assert( pNew->u.btree.nEqnColumn ); assert( pNew->u.btree.nEqnKeyCol @@ -166059,6 +166634,20 @@ static SQLITE_NOINLINE void whereAddIndexedExpr( continue; } if( sqlite3ExprIsConstant(pExpr) ) continue; + if( pExpr->op==TK_FUNCTION ){ + /* Functions that might set a subtype should not be replaced by the + ** value taken from an expression index since the index omits the + ** subtype. https://sqlite.org/forum/forumpost/68d284c86b082c3e */ + int n; + FuncDef *pDef; + sqlite3 *db = pParse->db; + assert( ExprUseXList(pExpr) ); + n = pExpr->x.pList ? pExpr->x.pList->nExpr : 0; + pDef = sqlite3FindFunction(db, pExpr->u.zToken, n, ENC(db), 0); + if( pDef==0 || (pDef->funcFlags & SQLITE_RESULT_SUBTYPE)!=0 ){ + continue; + } + } p = sqlite3DbMallocRaw(pParse->db, sizeof(IndexedExpr)); if( p==0 ) break; p->pIENext = pParse->pIdxEpr; @@ -166237,7 +166826,10 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( /* An ORDER/GROUP BY clause of more than 63 terms cannot be optimized */ testcase( pOrderBy && pOrderBy->nExpr==BMS-1 ); - if( pOrderBy && pOrderBy->nExpr>=BMS ) pOrderBy = 0; + if( pOrderBy && pOrderBy->nExpr>=BMS ){ + pOrderBy = 0; + wctrlFlags &= ~WHERE_WANT_DISTINCT; + } /* The number of tables in the FROM clause is limited by the number of ** bits in a Bitmask @@ -166262,7 +166854,10 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( ** field (type Bitmask) it must be aligned on an 8-byte boundary on ** some architectures. Hence the ROUND8() below. */ - nByteWInfo = ROUND8P(sizeof(WhereInfo)+(nTabList-1)*sizeof(WhereLevel)); + nByteWInfo = ROUND8P(sizeof(WhereInfo)); + if( nTabList>1 ){ + nByteWInfo = ROUND8P(nByteWInfo + (nTabList-1)*sizeof(WhereLevel)); + } pWInfo = sqlite3DbMallocRawNN(db, nByteWInfo + sizeof(WhereLoop)); if( db->mallocFailed ){ sqlite3DbFree(db, pWInfo); @@ -166824,6 +167419,11 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( pParse->nQueryLoop = pWInfo->savedNQueryLoop; whereInfoFree(db, pWInfo); } +#ifdef WHERETRACE_ENABLED + /* Prevent harmless compiler warnings about debugging routines + ** being declared but never used */ + sqlite3ShowWhereLoopList(0); +#endif /* WHERETRACE_ENABLED */ return 0; } @@ -168241,7 +168841,7 @@ SQLITE_PRIVATE int sqlite3WindowRewrite(Parse *pParse, Select *p){ assert( ExprUseXList(pWin->pOwner) ); assert( pWin->pWFunc!=0 ); pArgs = pWin->pOwner->x.pList; - if( pWin->pWFunc->funcFlags & SQLITE_FUNC_SUBTYPE ){ + if( pWin->pWFunc->funcFlags & SQLITE_SUBTYPE ){ selectWindowRewriteEList(pParse, pMWin, pSrc, pArgs, pTab, &pSublist); pWin->iArgCol = (pSublist ? pSublist->nExpr : 0); pWin->bExprArgs = 1; @@ -179415,7 +180015,7 @@ SQLITE_PRIVATE int sqlite3CreateFunc( assert( SQLITE_FUNC_CONSTANT==SQLITE_DETERMINISTIC ); assert( SQLITE_FUNC_DIRECT==SQLITE_DIRECTONLY ); extraFlags = enc & (SQLITE_DETERMINISTIC|SQLITE_DIRECTONLY| - SQLITE_SUBTYPE|SQLITE_INNOCUOUS); + SQLITE_SUBTYPE|SQLITE_INNOCUOUS|SQLITE_RESULT_SUBTYPE); enc &= (SQLITE_FUNC_ENCMASK|SQLITE_ANY); /* The SQLITE_INNOCUOUS flag is the same bit as SQLITE_FUNC_UNSAFE. But @@ -182160,6 +182760,28 @@ SQLITE_API int sqlite3_test_control(int op, ...){ break; } #endif + + /* sqlite3_test_control(SQLITE_TESTCTRL_JSON_SELFCHECK, &onOff); + ** + ** Activate or deactivate validation of JSONB that is generated from + ** text. Off by default, as the validation is slow. Validation is + ** only available if compiled using SQLITE_DEBUG. + ** + ** If onOff is initially 1, then turn it on. If onOff is initially + ** off, turn it off. If onOff is initially -1, then change onOff + ** to be the current setting. + */ + case SQLITE_TESTCTRL_JSON_SELFCHECK: { +#if defined(SQLITE_DEBUG) && !defined(SQLITE_OMIT_WSD) + int *pOnOff = va_arg(ap, int*); + if( *pOnOff<0 ){ + *pOnOff = sqlite3Config.bJsonSelfcheck; + }else{ + sqlite3Config.bJsonSelfcheck = (u8)((*pOnOff)&0xff); + } +#endif + break; + } } va_end(ap); #endif /* SQLITE_UNTESTABLE */ @@ -184144,6 +184766,8 @@ SQLITE_PRIVATE int sqlite3FtsUnicodeIsdiacritic(int); SQLITE_PRIVATE int sqlite3Fts3ExprIterate(Fts3Expr*, int (*x)(Fts3Expr*,int,void*), void*); +SQLITE_PRIVATE int sqlite3Fts3IntegrityCheck(Fts3Table *p, int *pbOk); + #endif /* !SQLITE_CORE || SQLITE_ENABLE_FTS3 */ #endif /* _FTSINT_H */ @@ -187866,7 +188490,7 @@ static int fts3ShadowName(const char *zName){ ** Implementation of the xIntegrity() method on the FTS3/FTS4 virtual ** table. */ -static int fts3Integrity( +static int fts3IntegrityMethod( sqlite3_vtab *pVtab, /* The virtual table to be checked */ const char *zSchema, /* Name of schema in which pVtab lives */ const char *zTabname, /* Name of the pVTab table */ @@ -187874,30 +188498,21 @@ static int fts3Integrity( char **pzErr /* Write error message here */ ){ Fts3Table *p = (Fts3Table*)pVtab; - char *zSql; int rc; - char *zErr = 0; + int bOk = 0; - assert( pzErr!=0 ); - assert( *pzErr==0 ); UNUSED_PARAMETER(isQuick); - zSql = sqlite3_mprintf( - "INSERT INTO \"%w\".\"%w\"(\"%w\") VALUES('integrity-check');", - zSchema, zTabname, zTabname); - if( zSql==0 ){ - return SQLITE_NOMEM; - } - rc = sqlite3_exec(p->db, zSql, 0, 0, &zErr); - sqlite3_free(zSql); - if( (rc&0xff)==SQLITE_CORRUPT ){ - *pzErr = sqlite3_mprintf("malformed inverted index for FTS%d table %s.%s", - p->bFts4 ? 4 : 3, zSchema, zTabname); - }else if( rc!=SQLITE_OK ){ + rc = sqlite3Fts3IntegrityCheck(p, &bOk); + assert( rc!=SQLITE_CORRUPT_VTAB || bOk==0 ); + if( rc!=SQLITE_OK && rc!=SQLITE_CORRUPT_VTAB ){ *pzErr = sqlite3_mprintf("unable to validate the inverted index for" " FTS%d table %s.%s: %s", - p->bFts4 ? 4 : 3, zSchema, zTabname, zErr); + p->bFts4 ? 4 : 3, zSchema, zTabname, sqlite3_errstr(rc)); + }else if( bOk==0 ){ + *pzErr = sqlite3_mprintf("malformed inverted index for FTS%d table %s.%s", + p->bFts4 ? 4 : 3, zSchema, zTabname); } - sqlite3_free(zErr); + sqlite3Fts3SegmentsClose(p); return SQLITE_OK; } @@ -187928,7 +188543,7 @@ static const sqlite3_module fts3Module = { /* xRelease */ fts3ReleaseMethod, /* xRollbackTo */ fts3RollbackToMethod, /* xShadowName */ fts3ShadowName, - /* xIntegrity */ fts3Integrity, + /* xIntegrity */ fts3IntegrityMethod, }; /* @@ -199482,7 +200097,7 @@ static u64 fts3ChecksumIndex( ** If an error occurs (e.g. an OOM or IO error), return an SQLite error ** code. The final value of *pbOk is undefined in this case. */ -static int fts3IntegrityCheck(Fts3Table *p, int *pbOk){ +SQLITE_PRIVATE int sqlite3Fts3IntegrityCheck(Fts3Table *p, int *pbOk){ int rc = SQLITE_OK; /* Return code */ u64 cksum1 = 0; /* Checksum based on FTS index contents */ u64 cksum2 = 0; /* Checksum based on %_content contents */ @@ -199560,7 +200175,7 @@ static int fts3IntegrityCheck(Fts3Table *p, int *pbOk){ sqlite3_finalize(pStmt); } - *pbOk = (cksum1==cksum2); + *pbOk = (rc==SQLITE_OK && cksum1==cksum2); return rc; } @@ -199600,7 +200215,7 @@ static int fts3DoIntegrityCheck( ){ int rc; int bOk = 0; - rc = fts3IntegrityCheck(p, &bOk); + rc = sqlite3Fts3IntegrityCheck(p, &bOk); if( rc==SQLITE_OK && bOk==0 ) rc = FTS_CORRUPT_VTAB; return rc; } @@ -202574,24 +203189,145 @@ SQLITE_PRIVATE int sqlite3FtsUnicodeFold(int c, int eRemoveDiacritic){ ** ****************************************************************************** ** -** This SQLite JSON functions. +** SQLite JSON functions. ** ** This file began as an extension in ext/misc/json1.c in 2015. That ** extension proved so useful that it has now been moved into the core. ** -** For the time being, all JSON is stored as pure text. (We might add -** a JSONB type in the future which stores a binary encoding of JSON in -** a BLOB, but there is no support for JSONB in the current implementation. -** This implementation parses JSON text at 250 MB/s, so it is hard to see -** how JSONB might improve on that.) +** The original design stored all JSON as pure text, canonical RFC-8259. +** Support for JSON-5 extensions was added with version 3.42.0 (2023-05-16). +** All generated JSON text still conforms strictly to RFC-8259, but text +** with JSON-5 extensions is accepted as input. +** +** Beginning with version 3.45.0 (circa 2024-01-01), these routines also +** accept BLOB values that have JSON encoded using a binary representation +** called "JSONB". The name JSONB comes from PostgreSQL, however the on-disk +** format SQLite JSONB is completely different and incompatible with +** PostgreSQL JSONB. +** +** Decoding and interpreting JSONB is still O(N) where N is the size of +** the input, the same as text JSON. However, the constant of proportionality +** for JSONB is much smaller due to faster parsing. The size of each +** element in JSONB is encoded in its header, so there is no need to search +** for delimiters using persnickety syntax rules. JSONB seems to be about +** 3x faster than text JSON as a result. JSONB is also tends to be slightly +** smaller than text JSON, by 5% or 10%, but there are corner cases where +** JSONB can be slightly larger. So you are not far mistaken to say that +** a JSONB blob is the same size as the equivalent RFC-8259 text. +** +** +** THE JSONB ENCODING: +** +** Every JSON element is encoded in JSONB as a header and a payload. +** The header is between 1 and 9 bytes in size. The payload is zero +** or more bytes. +** +** The lower 4 bits of the first byte of the header determines the +** element type: +** +** 0: NULL +** 1: TRUE +** 2: FALSE +** 3: INT -- RFC-8259 integer literal +** 4: INT5 -- JSON5 integer literal +** 5: FLOAT -- RFC-8259 floating point literal +** 6: FLOAT5 -- JSON5 floating point literal +** 7: TEXT -- Text literal acceptable to both SQL and JSON +** 8: TEXTJ -- Text containing RFC-8259 escapes +** 9: TEXT5 -- Text containing JSON5 and/or RFC-8259 escapes +** 10: TEXTRAW -- Text containing unescaped syntax characters +** 11: ARRAY +** 12: OBJECT +** +** The other three possible values (13-15) are reserved for future +** enhancements. +** +** The upper 4 bits of the first byte determine the size of the header +** and sometimes also the size of the payload. If X is the first byte +** of the element and if X>>4 is between 0 and 11, then the payload +** will be that many bytes in size and the header is exactly one byte +** in size. Other four values for X>>4 (12-15) indicate that the header +** is more than one byte in size and that the payload size is determined +** by the remainder of the header, interpreted as a unsigned big-endian +** integer. +** +** Value of X>>4 Size integer Total header size +** ------------- -------------------- ----------------- +** 12 1 byte (0-255) 2 +** 13 2 byte (0-65535) 3 +** 14 4 byte (0-4294967295) 5 +** 15 8 byte (0-1.8e19) 9 +** +** The payload size need not be expressed in its minimal form. For example, +** if the payload size is 10, the size can be expressed in any of 5 different +** ways: (1) (X>>4)==10, (2) (X>>4)==12 following by on 0x0a byte, +** (3) (X>>4)==13 followed by 0x00 and 0x0a, (4) (X>>4)==14 followed by +** 0x00 0x00 0x00 0x0a, or (5) (X>>4)==15 followed by 7 bytes of 0x00 and +** a single byte of 0x0a. The shorter forms are preferred, of course, but +** sometimes when generating JSONB, the payload size is not known in advance +** and it is convenient to reserve sufficient header space to cover the +** largest possible payload size and then come back later and patch up +** the size when it becomes known, resulting in a non-minimal encoding. +** +** The value (X>>4)==15 is not actually used in the current implementation +** (as SQLite is currently unable handle BLOBs larger than about 2GB) +** but is included in the design to allow for future enhancements. +** +** The payload follows the header. NULL, TRUE, and FALSE have no payload and +** their payload size must always be zero. The payload for INT, INT5, +** FLOAT, FLOAT5, TEXT, TEXTJ, TEXT5, and TEXTROW is text. Note that the +** "..." or '...' delimiters are omitted from the various text encodings. +** The payload for ARRAY and OBJECT is a list of additional elements that +** are the content for the array or object. The payload for an OBJECT +** must be an even number of elements. The first element of each pair is +** the label and must be of type TEXT, TEXTJ, TEXT5, or TEXTRAW. +** +** A valid JSONB blob consists of a single element, as described above. +** Usually this will be an ARRAY or OBJECT element which has many more +** elements as its content. But the overall blob is just a single element. +** +** Input validation for JSONB blobs simply checks that the element type +** code is between 0 and 12 and that the total size of the element +** (header plus payload) is the same as the size of the BLOB. If those +** checks are true, the BLOB is assumed to be JSONB and processing continues. +** Errors are only raised if some other miscoding is discovered during +** processing. +** +** Additional information can be found in the doc/jsonb.md file of the +** canonical SQLite source tree. */ #ifndef SQLITE_OMIT_JSON /* #include "sqliteInt.h" */ +/* JSONB element types +*/ +#define JSONB_NULL 0 /* "null" */ +#define JSONB_TRUE 1 /* "true" */ +#define JSONB_FALSE 2 /* "false" */ +#define JSONB_INT 3 /* integer acceptable to JSON and SQL */ +#define JSONB_INT5 4 /* integer in 0x000 notation */ +#define JSONB_FLOAT 5 /* float acceptable to JSON and SQL */ +#define JSONB_FLOAT5 6 /* float with JSON5 extensions */ +#define JSONB_TEXT 7 /* Text compatible with both JSON and SQL */ +#define JSONB_TEXTJ 8 /* Text with JSON escapes */ +#define JSONB_TEXT5 9 /* Text with JSON-5 escape */ +#define JSONB_TEXTRAW 10 /* SQL text that needs escaping for JSON */ +#define JSONB_ARRAY 11 /* An array */ +#define JSONB_OBJECT 12 /* An object */ + +/* Human-readable names for the JSONB values. The index for each +** string must correspond to the JSONB_* integer above. +*/ +static const char * const jsonbType[] = { + "null", "true", "false", "integer", "integer", + "real", "real", "text", "text", "text", + "text", "array", "object", "", "", "", "" +}; + /* ** Growing our own isspace() routine this way is twice as fast as ** the library isspace() function, resulting in a 7% overall performance -** increase for the parser. (Ubuntu14.10 gcc 4.8.4 x64 with -Os). +** increase for the text-JSON parser. (Ubuntu14.10 gcc 4.8.4 x64 with -Os). */ static const char jsonIsSpace[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, @@ -202612,11 +203348,19 @@ static const char jsonIsSpace[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; -#define fast_isspace(x) (jsonIsSpace[(unsigned char)x]) +#define jsonIsspace(x) (jsonIsSpace[(unsigned char)x]) /* -** Characters that are special to JSON. Control charaters, -** '"' and '\\'. +** The set of all space characters recognized by jsonIsspace(). +** Useful as the second argument to strspn(). +*/ +static const char jsonSpaces[] = "\011\012\015\040"; + +/* +** Characters that are special to JSON. Control characters, +** '"' and '\\' and '\''. Actually, '\'' is not special to +** canonical JSON, but it is special in JSON-5, so we include +** it in the set of special characters. */ static const char jsonIsOk[256] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -202638,22 +203382,49 @@ static const char jsonIsOk[256] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; - -#if !defined(SQLITE_DEBUG) && !defined(SQLITE_COVERAGE_TEST) -# define VVA(X) -#else -# define VVA(X) X -#endif - /* Objects */ +typedef struct JsonCache JsonCache; typedef struct JsonString JsonString; -typedef struct JsonNode JsonNode; typedef struct JsonParse JsonParse; -typedef struct JsonCleanup JsonCleanup; + +/* +** Magic number used for the JSON parse cache in sqlite3_get_auxdata() +*/ +#define JSON_CACHE_ID (-429938) /* Cache entry */ +#define JSON_CACHE_SIZE 4 /* Max number of cache entries */ + +/* +** jsonUnescapeOneChar() returns this invalid code point if it encounters +** a syntax error. +*/ +#define JSON_INVALID_CHAR 0x99999 + +/* A cache mapping JSON text into JSONB blobs. +** +** Each cache entry is a JsonParse object with the following restrictions: +** +** * The bReadOnly flag must be set +** +** * The aBlob[] array must be owned by the JsonParse object. In other +** words, nBlobAlloc must be non-zero. +** +** * eEdit and delta must be zero. +** +** * zJson must be an RCStr. In other words bJsonIsRCStr must be true. +*/ +struct JsonCache { + sqlite3 *db; /* Database connection */ + int nUsed; /* Number of active entries in the cache */ + JsonParse *a[JSON_CACHE_SIZE]; /* One line for each cache entry */ +}; /* An instance of this object represents a JSON string ** under construction. Really, this is a generic string accumulator ** that can be and is used to create strings other than JSON. +** +** If the generated string is longer than will fit into the zSpace[] buffer, +** then it will be an RCStr string. This aids with caching of large +** JSON strings. */ struct JsonString { sqlite3_context *pCtx; /* Function context - put error messages here */ @@ -202661,121 +203432,75 @@ struct JsonString { u64 nAlloc; /* Bytes of storage available in zBuf[] */ u64 nUsed; /* Bytes of zBuf[] currently used */ u8 bStatic; /* True if zBuf is static space */ - u8 bErr; /* True if an error has been encountered */ + u8 eErr; /* True if an error has been encountered */ char zSpace[100]; /* Initial static space */ }; -/* A deferred cleanup task. A list of JsonCleanup objects might be -** run when the JsonParse object is destroyed. -*/ -struct JsonCleanup { - JsonCleanup *pJCNext; /* Next in a list */ - void (*xOp)(void*); /* Routine to run */ - void *pArg; /* Argument to xOp() */ -}; +/* Allowed values for JsonString.eErr */ +#define JSTRING_OOM 0x01 /* Out of memory */ +#define JSTRING_MALFORMED 0x02 /* Malformed JSONB */ +#define JSTRING_ERR 0x04 /* Error already sent to sqlite3_result */ -/* JSON type values +/* The "subtype" set for text JSON values passed through using +** sqlite3_result_subtype() and sqlite3_value_subtype(). */ -#define JSON_SUBST 0 /* Special edit node. Uses u.iPrev */ -#define JSON_NULL 1 -#define JSON_TRUE 2 -#define JSON_FALSE 3 -#define JSON_INT 4 -#define JSON_REAL 5 -#define JSON_STRING 6 -#define JSON_ARRAY 7 -#define JSON_OBJECT 8 - -/* The "subtype" set for JSON values */ #define JSON_SUBTYPE 74 /* Ascii for "J" */ /* -** Names of the various JSON types: +** Bit values for the flags passed into various SQL function implementations +** via the sqlite3_user_data() value. */ -static const char * const jsonType[] = { - "subst", - "null", "true", "false", "integer", "real", "text", "array", "object" -}; - -/* Bit values for the JsonNode.jnFlag field -*/ -#define JNODE_RAW 0x01 /* Content is raw, not JSON encoded */ -#define JNODE_ESCAPE 0x02 /* Content is text with \ escapes */ -#define JNODE_REMOVE 0x04 /* Do not output */ -#define JNODE_REPLACE 0x08 /* Target of a JSON_SUBST node */ -#define JNODE_APPEND 0x10 /* More ARRAY/OBJECT entries at u.iAppend */ -#define JNODE_LABEL 0x20 /* Is a label of an object */ -#define JNODE_JSON5 0x40 /* Node contains JSON5 enhancements */ - - -/* A single node of parsed JSON. An array of these nodes describes -** a parse of JSON + edits. -** -** Use the json_parse() SQL function (available when compiled with -** -DSQLITE_DEBUG) to see a dump of complete JsonParse objects, including -** a complete listing and decoding of the array of JsonNodes. -*/ -struct JsonNode { - u8 eType; /* One of the JSON_ type values */ - u8 jnFlags; /* JNODE flags */ - u8 eU; /* Which union element to use */ - u32 n; /* Bytes of content for INT, REAL or STRING - ** Number of sub-nodes for ARRAY and OBJECT - ** Node that SUBST applies to */ - union { - const char *zJContent; /* 1: Content for INT, REAL, and STRING */ - u32 iAppend; /* 2: More terms for ARRAY and OBJECT */ - u32 iKey; /* 3: Key for ARRAY objects in json_tree() */ - u32 iPrev; /* 4: Previous SUBST node, or 0 */ - } u; -}; +#define JSON_JSON 0x01 /* Result is always JSON */ +#define JSON_SQL 0x02 /* Result is always SQL */ +#define JSON_ABPATH 0x03 /* Allow abbreviated JSON path specs */ +#define JSON_ISSET 0x04 /* json_set(), not json_insert() */ +#define JSON_BLOB 0x08 /* Use the BLOB output format */ -/* A parsed and possibly edited JSON string. Lifecycle: -** -** 1. JSON comes in and is parsed into an array aNode[]. The original -** JSON text is stored in zJson. +/* A parsed JSON value. Lifecycle: ** -** 2. Zero or more changes are made (via json_remove() or json_replace() -** or similar) to the aNode[] array. +** 1. JSON comes in and is parsed into a JSONB value in aBlob. The +** original text is stored in zJson. This step is skipped if the +** input is JSONB instead of text JSON. ** -** 3. A new, edited and mimified JSON string is generated from aNode -** and stored in zAlt. The JsonParse object always owns zAlt. +** 2. The aBlob[] array is searched using the JSON path notation, if needed. ** -** Step 1 always happens. Step 2 and 3 may or may not happen, depending -** on the operation. +** 3. Zero or more changes are made to aBlob[] (via json_remove() or +** json_replace() or json_patch() or similar). ** -** aNode[].u.zJContent entries typically point into zJson. Hence zJson -** must remain valid for the lifespan of the parse. For edits, -** aNode[].u.zJContent might point to malloced space other than zJson. -** Entries in pClup are responsible for freeing that extra malloced space. -** -** When walking the parse tree in aNode[], edits are ignored if useMod is -** false. +** 4. New JSON text is generated from the aBlob[] for output. This step +** is skipped if the function is one of the jsonb_* functions that +** returns JSONB instead of text JSON. */ struct JsonParse { - u32 nNode; /* Number of slots of aNode[] used */ - u32 nAlloc; /* Number of slots of aNode[] allocated */ - JsonNode *aNode; /* Array of nodes containing the parse */ - char *zJson; /* Original JSON string (before edits) */ - char *zAlt; /* Revised and/or mimified JSON */ - u32 *aUp; /* Index of parent of each node */ - JsonCleanup *pClup;/* Cleanup operations prior to freeing this object */ + u8 *aBlob; /* JSONB representation of JSON value */ + u32 nBlob; /* Bytes of aBlob[] actually used */ + u32 nBlobAlloc; /* Bytes allocated to aBlob[]. 0 if aBlob is external */ + char *zJson; /* Json text used for parsing */ + sqlite3 *db; /* The database connection to which this object belongs */ + int nJson; /* Length of the zJson string in bytes */ + u32 nJPRef; /* Number of references to this object */ + u32 iErr; /* Error location in zJson[] */ u16 iDepth; /* Nesting depth */ u8 nErr; /* Number of errors seen */ u8 oom; /* Set to true if out of memory */ u8 bJsonIsRCStr; /* True if zJson is an RCStr */ u8 hasNonstd; /* True if input uses non-standard features like JSON5 */ - u8 useMod; /* Actually use the edits contain inside aNode */ - u8 hasMod; /* aNode contains edits from the original zJson */ - u32 nJPRef; /* Number of references to this object */ - int nJson; /* Length of the zJson string in bytes */ - int nAlt; /* Length of alternative JSON string zAlt, in bytes */ - u32 iErr; /* Error location in zJson[] */ - u32 iSubst; /* Last JSON_SUBST entry in aNode[] */ - u32 iHold; /* Age of this entry in the cache for LRU replacement */ + u8 bReadOnly; /* Do not modify. */ + /* Search and edit information. See jsonLookupStep() */ + u8 eEdit; /* Edit operation to apply */ + int delta; /* Size change due to the edit */ + u32 nIns; /* Number of bytes to insert */ + u32 iLabel; /* Location of label if search landed on an object value */ + u8 *aIns; /* Content to be inserted */ }; +/* Allowed values for JsonParse.eEdit */ +#define JEDIT_DEL 1 /* Delete if exists */ +#define JEDIT_REPL 2 /* Overwrite if exists */ +#define JEDIT_INS 3 /* Insert if not exists */ +#define JEDIT_SET 4 /* Insert or overwrite */ + /* ** Maximum nesting depth of JSON for this implementation. ** @@ -202783,15 +203508,151 @@ struct JsonParse { ** descent parser. A depth of 1000 is far deeper than any sane JSON ** should go. Historical note: This limit was 2000 prior to version 3.42.0 */ -#define JSON_MAX_DEPTH 1000 +#ifndef SQLITE_JSON_MAX_DEPTH +# define JSON_MAX_DEPTH 1000 +#else +# define JSON_MAX_DEPTH SQLITE_JSON_MAX_DEPTH +#endif + +/* +** Allowed values for the flgs argument to jsonParseFuncArg(); +*/ +#define JSON_EDITABLE 0x01 /* Generate a writable JsonParse object */ +#define JSON_KEEPERROR 0x02 /* Return non-NULL even if there is an error */ + +/************************************************************************** +** Forward references +**************************************************************************/ +static void jsonReturnStringAsBlob(JsonString*); +static int jsonFuncArgMightBeBinary(sqlite3_value *pJson); +static u32 jsonTranslateBlobToText(const JsonParse*,u32,JsonString*); +static void jsonReturnParse(sqlite3_context*,JsonParse*); +static JsonParse *jsonParseFuncArg(sqlite3_context*,sqlite3_value*,u32); +static void jsonParseFree(JsonParse*); +static u32 jsonbPayloadSize(const JsonParse*, u32, u32*); +static u32 jsonUnescapeOneChar(const char*, u32, u32*); + +/************************************************************************** +** Utility routines for dealing with JsonCache objects +**************************************************************************/ + +/* +** Free a JsonCache object. +*/ +static void jsonCacheDelete(JsonCache *p){ + int i; + for(i=0; inUsed; i++){ + jsonParseFree(p->a[i]); + } + sqlite3DbFree(p->db, p); +} +static void jsonCacheDeleteGeneric(void *p){ + jsonCacheDelete((JsonCache*)p); +} + +/* +** Insert a new entry into the cache. If the cache is full, expel +** the least recently used entry. Return SQLITE_OK on success or a +** result code otherwise. +** +** Cache entries are stored in age order, oldest first. +*/ +static int jsonCacheInsert( + sqlite3_context *ctx, /* The SQL statement context holding the cache */ + JsonParse *pParse /* The parse object to be added to the cache */ +){ + JsonCache *p; + + assert( pParse->zJson!=0 ); + assert( pParse->bJsonIsRCStr ); + assert( pParse->delta==0 ); + p = sqlite3_get_auxdata(ctx, JSON_CACHE_ID); + if( p==0 ){ + sqlite3 *db = sqlite3_context_db_handle(ctx); + p = sqlite3DbMallocZero(db, sizeof(*p)); + if( p==0 ) return SQLITE_NOMEM; + p->db = db; + sqlite3_set_auxdata(ctx, JSON_CACHE_ID, p, jsonCacheDeleteGeneric); + p = sqlite3_get_auxdata(ctx, JSON_CACHE_ID); + if( p==0 ) return SQLITE_NOMEM; + } + if( p->nUsed >= JSON_CACHE_SIZE ){ + jsonParseFree(p->a[0]); + memmove(p->a, &p->a[1], (JSON_CACHE_SIZE-1)*sizeof(p->a[0])); + p->nUsed = JSON_CACHE_SIZE-1; + } + assert( pParse->nBlobAlloc>0 ); + pParse->eEdit = 0; + pParse->nJPRef++; + pParse->bReadOnly = 1; + p->a[p->nUsed] = pParse; + p->nUsed++; + return SQLITE_OK; +} + +/* +** Search for a cached translation the json text supplied by pArg. Return +** the JsonParse object if found. Return NULL if not found. +** +** When a match if found, the matching entry is moved to become the +** most-recently used entry if it isn't so already. +** +** The JsonParse object returned still belongs to the Cache and might +** be deleted at any moment. If the caller whants the JsonParse to +** linger, it needs to increment the nPJRef reference counter. +*/ +static JsonParse *jsonCacheSearch( + sqlite3_context *ctx, /* The SQL statement context holding the cache */ + sqlite3_value *pArg /* Function argument containing SQL text */ +){ + JsonCache *p; + int i; + const char *zJson; + int nJson; + + if( sqlite3_value_type(pArg)!=SQLITE_TEXT ){ + return 0; + } + zJson = (const char*)sqlite3_value_text(pArg); + if( zJson==0 ) return 0; + nJson = sqlite3_value_bytes(pArg); + + p = sqlite3_get_auxdata(ctx, JSON_CACHE_ID); + if( p==0 ){ + return 0; + } + for(i=0; inUsed; i++){ + if( p->a[i]->zJson==zJson ) break; + } + if( i>=p->nUsed ){ + for(i=0; inUsed; i++){ + if( p->a[i]->nJson!=nJson ) continue; + if( memcmp(p->a[i]->zJson, zJson, nJson)==0 ) break; + } + } + if( inUsed ){ + if( inUsed-1 ){ + /* Make the matching entry the most recently used entry */ + JsonParse *tmp = p->a[i]; + memmove(&p->a[i], &p->a[i+1], (p->nUsed-i-1)*sizeof(tmp)); + p->a[p->nUsed-1] = tmp; + i = p->nUsed - 1; + } + assert( p->a[i]->delta==0 ); + return p->a[i]; + }else{ + return 0; + } +} /************************************************************************** ** Utility routines for dealing with JsonString objects **************************************************************************/ -/* Set the JsonString object to an empty string +/* Turn uninitialized bulk memory into a valid JsonString object +** holding a zero-length string. */ -static void jsonZero(JsonString *p){ +static void jsonStringZero(JsonString *p){ p->zBuf = p->zSpace; p->nAlloc = sizeof(p->zSpace); p->nUsed = 0; @@ -202800,39 +203661,39 @@ static void jsonZero(JsonString *p){ /* Initialize the JsonString object */ -static void jsonInit(JsonString *p, sqlite3_context *pCtx){ +static void jsonStringInit(JsonString *p, sqlite3_context *pCtx){ p->pCtx = pCtx; - p->bErr = 0; - jsonZero(p); + p->eErr = 0; + jsonStringZero(p); } /* Free all allocated memory and reset the JsonString object back to its ** initial state. */ -static void jsonReset(JsonString *p){ +static void jsonStringReset(JsonString *p){ if( !p->bStatic ) sqlite3RCStrUnref(p->zBuf); - jsonZero(p); + jsonStringZero(p); } /* Report an out-of-memory (OOM) condition */ -static void jsonOom(JsonString *p){ - p->bErr = 1; - sqlite3_result_error_nomem(p->pCtx); - jsonReset(p); +static void jsonStringOom(JsonString *p){ + p->eErr |= JSTRING_OOM; + if( p->pCtx ) sqlite3_result_error_nomem(p->pCtx); + jsonStringReset(p); } /* Enlarge pJson->zBuf so that it can hold at least N more bytes. ** Return zero on success. Return non-zero on an OOM error */ -static int jsonGrow(JsonString *p, u32 N){ +static int jsonStringGrow(JsonString *p, u32 N){ u64 nTotal = NnAlloc ? p->nAlloc*2 : p->nAlloc+N+10; char *zNew; if( p->bStatic ){ - if( p->bErr ) return 1; + if( p->eErr ) return 1; zNew = sqlite3RCStrNew(nTotal); if( zNew==0 ){ - jsonOom(p); + jsonStringOom(p); return SQLITE_NOMEM; } memcpy(zNew, p->zBuf, (size_t)p->nUsed); @@ -202841,8 +203702,8 @@ static int jsonGrow(JsonString *p, u32 N){ }else{ p->zBuf = sqlite3RCStrResize(p->zBuf, nTotal); if( p->zBuf==0 ){ - p->bErr = 1; - jsonZero(p); + p->eErr |= JSTRING_OOM; + jsonStringZero(p); return SQLITE_NOMEM; } } @@ -202852,20 +203713,20 @@ static int jsonGrow(JsonString *p, u32 N){ /* Append N bytes from zIn onto the end of the JsonString string. */ -static SQLITE_NOINLINE void jsonAppendExpand( +static SQLITE_NOINLINE void jsonStringExpandAndAppend( JsonString *p, const char *zIn, u32 N ){ assert( N>0 ); - if( jsonGrow(p,N) ) return; + if( jsonStringGrow(p,N) ) return; memcpy(p->zBuf+p->nUsed, zIn, N); p->nUsed += N; } static void jsonAppendRaw(JsonString *p, const char *zIn, u32 N){ if( N==0 ) return; if( N+p->nUsed >= p->nAlloc ){ - jsonAppendExpand(p,zIn,N); + jsonStringExpandAndAppend(p,zIn,N); }else{ memcpy(p->zBuf+p->nUsed, zIn, N); p->nUsed += N; @@ -202874,7 +203735,7 @@ static void jsonAppendRaw(JsonString *p, const char *zIn, u32 N){ static void jsonAppendRawNZ(JsonString *p, const char *zIn, u32 N){ assert( N>0 ); if( N+p->nUsed >= p->nAlloc ){ - jsonAppendExpand(p,zIn,N); + jsonStringExpandAndAppend(p,zIn,N); }else{ memcpy(p->zBuf+p->nUsed, zIn, N); p->nUsed += N; @@ -202886,7 +203747,7 @@ static void jsonAppendRawNZ(JsonString *p, const char *zIn, u32 N){ */ static void jsonPrintf(int N, JsonString *p, const char *zFormat, ...){ va_list ap; - if( (p->nUsed + N >= p->nAlloc) && jsonGrow(p, N) ) return; + if( (p->nUsed + N >= p->nAlloc) && jsonStringGrow(p, N) ) return; va_start(ap, zFormat); sqlite3_vsnprintf(N, p->zBuf+p->nUsed, zFormat, ap); va_end(ap); @@ -202896,7 +203757,7 @@ static void jsonPrintf(int N, JsonString *p, const char *zFormat, ...){ /* Append a single character */ static SQLITE_NOINLINE void jsonAppendCharExpand(JsonString *p, char c){ - if( jsonGrow(p,1) ) return; + if( jsonStringGrow(p,1) ) return; p->zBuf[p->nUsed++] = c; } static void jsonAppendChar(JsonString *p, char c){ @@ -202907,24 +203768,27 @@ static void jsonAppendChar(JsonString *p, char c){ } } -/* Try to force the string to be a zero-terminated RCStr string. +/* Remove a single character from the end of the string +*/ +static void jsonStringTrimOneChar(JsonString *p){ + if( p->eErr==0 ){ + assert( p->nUsed>0 ); + p->nUsed--; + } +} + + +/* Make sure there is a zero terminator on p->zBuf[] ** ** Return true on success. Return false if an OOM prevents this ** from happening. */ -static int jsonForceRCStr(JsonString *p){ +static int jsonStringTerminate(JsonString *p){ jsonAppendChar(p, 0); - if( p->bErr ) return 0; - p->nUsed--; - if( p->bStatic==0 ) return 1; - p->nAlloc = 0; - p->nUsed++; - jsonGrow(p, p->nUsed); - p->nUsed--; - return p->bStatic==0; + jsonStringTrimOneChar(p); + return p->eErr==0; } - /* Append a comma separator to the output buffer, if the previous ** character is not '[' or '{'. */ @@ -202937,21 +203801,66 @@ static void jsonAppendSeparator(JsonString *p){ } /* Append the N-byte string in zIn to the end of the JsonString string -** under construction. Enclose the string in "..." and escape -** any double-quotes or backslash characters contained within the +** under construction. Enclose the string in double-quotes ("...") and +** escape any double-quotes or backslash characters contained within the ** string. +** +** This routine is a high-runner. There is a measurable performance +** increase associated with unwinding the jsonIsOk[] loop. */ static void jsonAppendString(JsonString *p, const char *zIn, u32 N){ - u32 i; - if( zIn==0 || ((N+p->nUsed+2 >= p->nAlloc) && jsonGrow(p,N+2)!=0) ) return; + u32 k; + u8 c; + const u8 *z = (const u8*)zIn; + if( z==0 ) return; + if( (N+p->nUsed+2 >= p->nAlloc) && jsonStringGrow(p,N+2)!=0 ) return; p->zBuf[p->nUsed++] = '"'; - for(i=0; izBuf[p->nUsed++] = c; - }else if( c=='"' || c=='\\' ){ + while( 1 /*exit-by-break*/ ){ + k = 0; + /* The following while() is the 4-way unwound equivalent of + ** + ** while( k=N ){ + while( k=N ){ + if( k>0 ){ + memcpy(&p->zBuf[p->nUsed], z, k); + p->nUsed += k; + } + break; + } + if( k>0 ){ + memcpy(&p->zBuf[p->nUsed], z, k); + p->nUsed += k; + z += k; + N -= k; + } + c = z[0]; + if( c=='"' || c=='\\' ){ json_simple_escape: - if( (p->nUsed+N+3-i > p->nAlloc) && jsonGrow(p,N+3-i)!=0 ) return; + if( (p->nUsed+N+3 > p->nAlloc) && jsonStringGrow(p,N+3)!=0 ) return; p->zBuf[p->nUsed++] = '\\'; p->zBuf[p->nUsed++] = c; }else if( c=='\'' ){ @@ -202972,7 +203881,7 @@ static void jsonAppendString(JsonString *p, const char *zIn, u32 N){ c = aSpecial[c]; goto json_simple_escape; } - if( (p->nUsed+N+7+i > p->nAlloc) && jsonGrow(p,N+7-i)!=0 ) return; + if( (p->nUsed+N+7 > p->nAlloc) && jsonStringGrow(p,N+7)!=0 ) return; p->zBuf[p->nUsed++] = '\\'; p->zBuf[p->nUsed++] = 'u'; p->zBuf[p->nUsed++] = '0'; @@ -202980,140 +203889,18 @@ static void jsonAppendString(JsonString *p, const char *zIn, u32 N){ p->zBuf[p->nUsed++] = "0123456789abcdef"[c>>4]; p->zBuf[p->nUsed++] = "0123456789abcdef"[c&0xf]; } + z++; + N--; } p->zBuf[p->nUsed++] = '"'; assert( p->nUsednAlloc ); } /* -** The zIn[0..N] string is a JSON5 string literal. Append to p a translation -** of the string literal that standard JSON and that omits all JSON5 -** features. -*/ -static void jsonAppendNormalizedString(JsonString *p, const char *zIn, u32 N){ - u32 i; - jsonAppendChar(p, '"'); - zIn++; - N -= 2; - while( N>0 ){ - for(i=0; i0 ){ - jsonAppendRawNZ(p, zIn, i); - zIn += i; - N -= i; - if( N==0 ) break; - } - assert( zIn[0]=='\\' ); - switch( (u8)zIn[1] ){ - case '\'': - jsonAppendChar(p, '\''); - break; - case 'v': - jsonAppendRawNZ(p, "\\u0009", 6); - break; - case 'x': - jsonAppendRawNZ(p, "\\u00", 4); - jsonAppendRawNZ(p, &zIn[2], 2); - zIn += 2; - N -= 2; - break; - case '0': - jsonAppendRawNZ(p, "\\u0000", 6); - break; - case '\r': - if( zIn[2]=='\n' ){ - zIn++; - N--; - } - break; - case '\n': - break; - case 0xe2: - assert( N>=4 ); - assert( 0x80==(u8)zIn[2] ); - assert( 0xa8==(u8)zIn[3] || 0xa9==(u8)zIn[3] ); - zIn += 2; - N -= 2; - break; - default: - jsonAppendRawNZ(p, zIn, 2); - break; - } - zIn += 2; - N -= 2; - } - jsonAppendChar(p, '"'); -} - -/* -** The zIn[0..N] string is a JSON5 integer literal. Append to p a translation -** of the string literal that standard JSON and that omits all JSON5 -** features. -*/ -static void jsonAppendNormalizedInt(JsonString *p, const char *zIn, u32 N){ - if( zIn[0]=='+' ){ - zIn++; - N--; - }else if( zIn[0]=='-' ){ - jsonAppendChar(p, '-'); - zIn++; - N--; - } - if( zIn[0]=='0' && (zIn[1]=='x' || zIn[1]=='X') ){ - sqlite3_int64 i = 0; - int rc = sqlite3DecOrHexToI64(zIn, &i); - if( rc<=1 ){ - jsonPrintf(100,p,"%lld",i); - }else{ - assert( rc==2 ); - jsonAppendRawNZ(p, "9.0e999", 7); - } - return; - } - assert( N>0 ); - jsonAppendRawNZ(p, zIn, N); -} - -/* -** The zIn[0..N] string is a JSON5 real literal. Append to p a translation -** of the string literal that standard JSON and that omits all JSON5 -** features. -*/ -static void jsonAppendNormalizedReal(JsonString *p, const char *zIn, u32 N){ - u32 i; - if( zIn[0]=='+' ){ - zIn++; - N--; - }else if( zIn[0]=='-' ){ - jsonAppendChar(p, '-'); - zIn++; - N--; - } - if( zIn[0]=='.' ){ - jsonAppendChar(p, '0'); - } - for(i=0; i0 ){ - jsonAppendRawNZ(p, zIn, N); - } -} - - - -/* -** Append a function parameter value to the JSON string under -** construction. +** Append an sqlite3_value (such as a function parameter) to the JSON +** string under construction in p. */ -static void jsonAppendValue( +static void jsonAppendSqlValue( JsonString *p, /* Append to this JSON string */ sqlite3_value *pValue /* Value to append */ ){ @@ -203143,290 +203930,127 @@ static void jsonAppendValue( break; } default: { - if( p->bErr==0 ){ + if( jsonFuncArgMightBeBinary(pValue) ){ + JsonParse px; + memset(&px, 0, sizeof(px)); + px.aBlob = (u8*)sqlite3_value_blob(pValue); + px.nBlob = sqlite3_value_bytes(pValue); + jsonTranslateBlobToText(&px, 0, p); + }else if( p->eErr==0 ){ sqlite3_result_error(p->pCtx, "JSON cannot hold BLOB values", -1); - p->bErr = 2; - jsonReset(p); + p->eErr = JSTRING_ERR; + jsonStringReset(p); } break; } } } - -/* Make the JSON in p the result of the SQL function. +/* Make the text in p (which is probably a generated JSON text string) +** the result of the SQL function. +** +** The JsonString is reset. ** -** The JSON string is reset. +** If pParse and ctx are both non-NULL, then the SQL string in p is +** loaded into the zJson field of the pParse object as a RCStr and the +** pParse is added to the cache. */ -static void jsonResult(JsonString *p){ - if( p->bErr==0 ){ - if( p->bStatic ){ +static void jsonReturnString( + JsonString *p, /* String to return */ + JsonParse *pParse, /* JSONB source or NULL */ + sqlite3_context *ctx /* Where to cache */ +){ + assert( (pParse!=0)==(ctx!=0) ); + assert( ctx==0 || ctx==p->pCtx ); + if( p->eErr==0 ){ + int flags = SQLITE_PTR_TO_INT(sqlite3_user_data(p->pCtx)); + if( flags & JSON_BLOB ){ + jsonReturnStringAsBlob(p); + }else if( p->bStatic ){ sqlite3_result_text64(p->pCtx, p->zBuf, p->nUsed, SQLITE_TRANSIENT, SQLITE_UTF8); - }else if( jsonForceRCStr(p) ){ - sqlite3RCStrRef(p->zBuf); - sqlite3_result_text64(p->pCtx, p->zBuf, p->nUsed, + }else if( jsonStringTerminate(p) ){ + if( pParse && pParse->bJsonIsRCStr==0 && pParse->nBlobAlloc>0 ){ + int rc; + pParse->zJson = sqlite3RCStrRef(p->zBuf); + pParse->nJson = p->nUsed; + pParse->bJsonIsRCStr = 1; + rc = jsonCacheInsert(ctx, pParse); + if( rc==SQLITE_NOMEM ){ + sqlite3_result_error_nomem(ctx); + jsonStringReset(p); + return; + } + } + sqlite3_result_text64(p->pCtx, sqlite3RCStrRef(p->zBuf), p->nUsed, sqlite3RCStrUnref, SQLITE_UTF8); + }else{ + sqlite3_result_error_nomem(p->pCtx); } - } - if( p->bErr==1 ){ + }else if( p->eErr & JSTRING_OOM ){ sqlite3_result_error_nomem(p->pCtx); + }else if( p->eErr & JSTRING_MALFORMED ){ + sqlite3_result_error(p->pCtx, "malformed JSON", -1); } - jsonReset(p); + jsonStringReset(p); } /************************************************************************** -** Utility routines for dealing with JsonNode and JsonParse objects +** Utility routines for dealing with JsonParse objects **************************************************************************/ -/* -** Return the number of consecutive JsonNode slots need to represent -** the parsed JSON at pNode. The minimum answer is 1. For ARRAY and -** OBJECT types, the number might be larger. -** -** Appended elements are not counted. The value returned is the number -** by which the JsonNode counter should increment in order to go to the -** next peer value. -*/ -static u32 jsonNodeSize(JsonNode *pNode){ - return pNode->eType>=JSON_ARRAY ? pNode->n+1 : 1; -} - /* ** Reclaim all memory allocated by a JsonParse object. But do not ** delete the JsonParse object itself. */ static void jsonParseReset(JsonParse *pParse){ - while( pParse->pClup ){ - JsonCleanup *pTask = pParse->pClup; - pParse->pClup = pTask->pJCNext; - pTask->xOp(pTask->pArg); - sqlite3_free(pTask); - } assert( pParse->nJPRef<=1 ); - if( pParse->aNode ){ - sqlite3_free(pParse->aNode); - pParse->aNode = 0; - } - pParse->nNode = 0; - pParse->nAlloc = 0; - if( pParse->aUp ){ - sqlite3_free(pParse->aUp); - pParse->aUp = 0; - } if( pParse->bJsonIsRCStr ){ sqlite3RCStrUnref(pParse->zJson); pParse->zJson = 0; + pParse->nJson = 0; pParse->bJsonIsRCStr = 0; } - if( pParse->zAlt ){ - sqlite3RCStrUnref(pParse->zAlt); - pParse->zAlt = 0; + if( pParse->nBlobAlloc ){ + sqlite3DbFree(pParse->db, pParse->aBlob); + pParse->aBlob = 0; + pParse->nBlob = 0; + pParse->nBlobAlloc = 0; } } /* -** Free a JsonParse object that was obtained from sqlite3_malloc(). -** -** Note that destroying JsonParse might call sqlite3RCStrUnref() to -** destroy the zJson value. The RCStr object might recursively invoke -** JsonParse to destroy this pParse object again. Take care to ensure -** that this recursive destructor sequence terminates harmlessly. +** Decrement the reference count on the JsonParse object. When the +** count reaches zero, free the object. */ static void jsonParseFree(JsonParse *pParse){ - if( pParse->nJPRef>1 ){ - pParse->nJPRef--; - }else{ - jsonParseReset(pParse); - sqlite3_free(pParse); - } -} - -/* -** Add a cleanup task to the JsonParse object. -** -** If an OOM occurs, the cleanup operation happens immediately -** and this function returns SQLITE_NOMEM. -*/ -static int jsonParseAddCleanup( - JsonParse *pParse, /* Add the cleanup task to this parser */ - void(*xOp)(void*), /* The cleanup task */ - void *pArg /* Argument to the cleanup */ -){ - JsonCleanup *pTask = sqlite3_malloc64( sizeof(*pTask) ); - if( pTask==0 ){ - pParse->oom = 1; - xOp(pArg); - return SQLITE_ERROR; - } - pTask->pJCNext = pParse->pClup; - pParse->pClup = pTask; - pTask->xOp = xOp; - pTask->pArg = pArg; - return SQLITE_OK; -} - -/* -** Convert the JsonNode pNode into a pure JSON string and -** append to pOut. Subsubstructure is also included. Return -** the number of JsonNode objects that are encoded. -*/ -static void jsonRenderNode( - JsonParse *pParse, /* the complete parse of the JSON */ - JsonNode *pNode, /* The node to render */ - JsonString *pOut /* Write JSON here */ -){ - assert( pNode!=0 ); - while( (pNode->jnFlags & JNODE_REPLACE)!=0 && pParse->useMod ){ - u32 idx = (u32)(pNode - pParse->aNode); - u32 i = pParse->iSubst; - while( 1 /*exit-by-break*/ ){ - assert( inNode ); - assert( pParse->aNode[i].eType==JSON_SUBST ); - assert( pParse->aNode[i].eU==4 ); - assert( pParse->aNode[i].u.iPrevaNode[i].n==idx ){ - pNode = &pParse->aNode[i+1]; - break; - } - i = pParse->aNode[i].u.iPrev; - } - } - switch( pNode->eType ){ - default: { - assert( pNode->eType==JSON_NULL ); - jsonAppendRawNZ(pOut, "null", 4); - break; - } - case JSON_TRUE: { - jsonAppendRawNZ(pOut, "true", 4); - break; - } - case JSON_FALSE: { - jsonAppendRawNZ(pOut, "false", 5); - break; - } - case JSON_STRING: { - assert( pNode->eU==1 ); - if( pNode->jnFlags & JNODE_RAW ){ - if( pNode->jnFlags & JNODE_LABEL ){ - jsonAppendChar(pOut, '"'); - jsonAppendRaw(pOut, pNode->u.zJContent, pNode->n); - jsonAppendChar(pOut, '"'); - }else{ - jsonAppendString(pOut, pNode->u.zJContent, pNode->n); - } - }else if( pNode->jnFlags & JNODE_JSON5 ){ - jsonAppendNormalizedString(pOut, pNode->u.zJContent, pNode->n); - }else{ - assert( pNode->n>0 ); - jsonAppendRawNZ(pOut, pNode->u.zJContent, pNode->n); - } - break; - } - case JSON_REAL: { - assert( pNode->eU==1 ); - if( pNode->jnFlags & JNODE_JSON5 ){ - jsonAppendNormalizedReal(pOut, pNode->u.zJContent, pNode->n); - }else{ - assert( pNode->n>0 ); - jsonAppendRawNZ(pOut, pNode->u.zJContent, pNode->n); - } - break; - } - case JSON_INT: { - assert( pNode->eU==1 ); - if( pNode->jnFlags & JNODE_JSON5 ){ - jsonAppendNormalizedInt(pOut, pNode->u.zJContent, pNode->n); - }else{ - assert( pNode->n>0 ); - jsonAppendRawNZ(pOut, pNode->u.zJContent, pNode->n); - } - break; - } - case JSON_ARRAY: { - u32 j = 1; - jsonAppendChar(pOut, '['); - for(;;){ - while( j<=pNode->n ){ - if( (pNode[j].jnFlags & JNODE_REMOVE)==0 || pParse->useMod==0 ){ - jsonAppendSeparator(pOut); - jsonRenderNode(pParse, &pNode[j], pOut); - } - j += jsonNodeSize(&pNode[j]); - } - if( (pNode->jnFlags & JNODE_APPEND)==0 ) break; - if( pParse->useMod==0 ) break; - assert( pNode->eU==2 ); - pNode = &pParse->aNode[pNode->u.iAppend]; - j = 1; - } - jsonAppendChar(pOut, ']'); - break; - } - case JSON_OBJECT: { - u32 j = 1; - jsonAppendChar(pOut, '{'); - for(;;){ - while( j<=pNode->n ){ - if( (pNode[j+1].jnFlags & JNODE_REMOVE)==0 || pParse->useMod==0 ){ - jsonAppendSeparator(pOut); - jsonRenderNode(pParse, &pNode[j], pOut); - jsonAppendChar(pOut, ':'); - jsonRenderNode(pParse, &pNode[j+1], pOut); - } - j += 1 + jsonNodeSize(&pNode[j+1]); - } - if( (pNode->jnFlags & JNODE_APPEND)==0 ) break; - if( pParse->useMod==0 ) break; - assert( pNode->eU==2 ); - pNode = &pParse->aNode[pNode->u.iAppend]; - j = 1; - } - jsonAppendChar(pOut, '}'); - break; + if( pParse ){ + if( pParse->nJPRef>1 ){ + pParse->nJPRef--; + }else{ + jsonParseReset(pParse); + sqlite3DbFree(pParse->db, pParse); } } } -/* -** Return a JsonNode and all its descendants as a JSON string. -*/ -static void jsonReturnJson( - JsonParse *pParse, /* The complete JSON */ - JsonNode *pNode, /* Node to return */ - sqlite3_context *pCtx, /* Return value for this function */ - int bGenerateAlt /* Also store the rendered text in zAlt */ -){ - JsonString s; - if( pParse->oom ){ - sqlite3_result_error_nomem(pCtx); - return; - } - if( pParse->nErr==0 ){ - jsonInit(&s, pCtx); - jsonRenderNode(pParse, pNode, &s); - if( bGenerateAlt && pParse->zAlt==0 && jsonForceRCStr(&s) ){ - pParse->zAlt = sqlite3RCStrRef(s.zBuf); - pParse->nAlt = s.nUsed; - } - jsonResult(&s); - sqlite3_result_subtype(pCtx, JSON_SUBTYPE); - } -} +/************************************************************************** +** Utility routines for the JSON text parser +**************************************************************************/ /* ** Translate a single byte of Hex into an integer. -** This routine only works if h really is a valid hexadecimal -** character: 0..9a..fA..F +** This routine only gives a correct answer if h really is a valid hexadecimal +** character: 0..9a..fA..F. But unlike sqlite3HexToInt(), it does not +** assert() if the digit is not hex. */ static u8 jsonHexToInt(int h){ - assert( (h>='0' && h<='9') || (h>='a' && h<='f') || (h>='A' && h<='F') ); +#ifdef SQLITE_ASCII + h += 9*(1&(h>>6)); +#endif #ifdef SQLITE_EBCDIC h += 9*(1&~(h>>4)); -#else - h += 9*(1&(h>>6)); #endif return (u8)(h & 0xf); } @@ -203436,10 +204060,6 @@ static u8 jsonHexToInt(int h){ */ static u32 jsonHexToInt4(const char *z){ u32 v; - assert( sqlite3Isxdigit(z[0]) ); - assert( sqlite3Isxdigit(z[1]) ); - assert( sqlite3Isxdigit(z[2]) ); - assert( sqlite3Isxdigit(z[3]) ); v = (jsonHexToInt(z[0])<<12) + (jsonHexToInt(z[1])<<8) + (jsonHexToInt(z[2])<<4) @@ -203447,281 +204067,6 @@ static u32 jsonHexToInt4(const char *z){ return v; } -/* -** Make the JsonNode the return value of the function. -*/ -static void jsonReturn( - JsonParse *pParse, /* Complete JSON parse tree */ - JsonNode *pNode, /* Node to return */ - sqlite3_context *pCtx /* Return value for this function */ -){ - switch( pNode->eType ){ - default: { - assert( pNode->eType==JSON_NULL ); - sqlite3_result_null(pCtx); - break; - } - case JSON_TRUE: { - sqlite3_result_int(pCtx, 1); - break; - } - case JSON_FALSE: { - sqlite3_result_int(pCtx, 0); - break; - } - case JSON_INT: { - sqlite3_int64 i = 0; - int rc; - int bNeg = 0; - const char *z; - - assert( pNode->eU==1 ); - z = pNode->u.zJContent; - if( z[0]=='-' ){ z++; bNeg = 1; } - else if( z[0]=='+' ){ z++; } - rc = sqlite3DecOrHexToI64(z, &i); - if( rc<=1 ){ - sqlite3_result_int64(pCtx, bNeg ? -i : i); - }else if( rc==3 && bNeg ){ - sqlite3_result_int64(pCtx, SMALLEST_INT64); - }else{ - goto to_double; - } - break; - } - case JSON_REAL: { - double r; - const char *z; - assert( pNode->eU==1 ); - to_double: - z = pNode->u.zJContent; - sqlite3AtoF(z, &r, sqlite3Strlen30(z), SQLITE_UTF8); - sqlite3_result_double(pCtx, r); - break; - } - case JSON_STRING: { - if( pNode->jnFlags & JNODE_RAW ){ - assert( pNode->eU==1 ); - sqlite3_result_text(pCtx, pNode->u.zJContent, pNode->n, - SQLITE_TRANSIENT); - }else if( (pNode->jnFlags & JNODE_ESCAPE)==0 ){ - /* JSON formatted without any backslash-escapes */ - assert( pNode->eU==1 ); - sqlite3_result_text(pCtx, pNode->u.zJContent+1, pNode->n-2, - SQLITE_TRANSIENT); - }else{ - /* Translate JSON formatted string into raw text */ - u32 i; - u32 n = pNode->n; - const char *z; - char *zOut; - u32 j; - u32 nOut = n; - assert( pNode->eU==1 ); - z = pNode->u.zJContent; - zOut = sqlite3_malloc( nOut+1 ); - if( zOut==0 ){ - sqlite3_result_error_nomem(pCtx); - break; - } - for(i=1, j=0; i>6)); - zOut[j++] = 0x80 | (v&0x3f); - }else{ - u32 vlo; - if( (v&0xfc00)==0xd800 - && i>18); - zOut[j++] = 0x80 | ((v>>12)&0x3f); - zOut[j++] = 0x80 | ((v>>6)&0x3f); - zOut[j++] = 0x80 | (v&0x3f); - }else{ - zOut[j++] = 0xe0 | (v>>12); - zOut[j++] = 0x80 | ((v>>6)&0x3f); - zOut[j++] = 0x80 | (v&0x3f); - } - } - continue; - }else if( c=='b' ){ - c = '\b'; - }else if( c=='f' ){ - c = '\f'; - }else if( c=='n' ){ - c = '\n'; - }else if( c=='r' ){ - c = '\r'; - }else if( c=='t' ){ - c = '\t'; - }else if( c=='v' ){ - c = '\v'; - }else if( c=='\'' || c=='"' || c=='/' || c=='\\' ){ - /* pass through unchanged */ - }else if( c=='0' ){ - c = 0; - }else if( c=='x' ){ - c = (jsonHexToInt(z[i+1])<<4) | jsonHexToInt(z[i+2]); - i += 2; - }else if( c=='\r' && z[i+1]=='\n' ){ - i++; - continue; - }else if( 0xe2==(u8)c ){ - assert( 0x80==(u8)z[i+1] ); - assert( 0xa8==(u8)z[i+2] || 0xa9==(u8)z[i+2] ); - i += 2; - continue; - }else{ - continue; - } - } /* end if( c=='\\' ) */ - zOut[j++] = c; - } /* end for() */ - zOut[j] = 0; - sqlite3_result_text(pCtx, zOut, j, sqlite3_free); - } - break; - } - case JSON_ARRAY: - case JSON_OBJECT: { - jsonReturnJson(pParse, pNode, pCtx, 0); - break; - } - } -} - -/* Forward reference */ -static int jsonParseAddNode(JsonParse*,u32,u32,const char*); - -/* -** A macro to hint to the compiler that a function should not be -** inlined. -*/ -#if defined(__GNUC__) -# define JSON_NOINLINE __attribute__((noinline)) -#elif defined(_MSC_VER) && _MSC_VER>=1310 -# define JSON_NOINLINE __declspec(noinline) -#else -# define JSON_NOINLINE -#endif - - -/* -** Add a single node to pParse->aNode after first expanding the -** size of the aNode array. Return the index of the new node. -** -** If an OOM error occurs, set pParse->oom and return -1. -*/ -static JSON_NOINLINE int jsonParseAddNodeExpand( - JsonParse *pParse, /* Append the node to this object */ - u32 eType, /* Node type */ - u32 n, /* Content size or sub-node count */ - const char *zContent /* Content */ -){ - u32 nNew; - JsonNode *pNew; - assert( pParse->nNode>=pParse->nAlloc ); - if( pParse->oom ) return -1; - nNew = pParse->nAlloc*2 + 10; - pNew = sqlite3_realloc64(pParse->aNode, sizeof(JsonNode)*nNew); - if( pNew==0 ){ - pParse->oom = 1; - return -1; - } - pParse->nAlloc = sqlite3_msize(pNew)/sizeof(JsonNode); - pParse->aNode = pNew; - assert( pParse->nNodenAlloc ); - return jsonParseAddNode(pParse, eType, n, zContent); -} - -/* -** Create a new JsonNode instance based on the arguments and append that -** instance to the JsonParse. Return the index in pParse->aNode[] of the -** new node, or -1 if a memory allocation fails. -*/ -static int jsonParseAddNode( - JsonParse *pParse, /* Append the node to this object */ - u32 eType, /* Node type */ - u32 n, /* Content size or sub-node count */ - const char *zContent /* Content */ -){ - JsonNode *p; - assert( pParse->aNode!=0 || pParse->nNode>=pParse->nAlloc ); - if( pParse->nNode>=pParse->nAlloc ){ - return jsonParseAddNodeExpand(pParse, eType, n, zContent); - } - assert( pParse->aNode!=0 ); - p = &pParse->aNode[pParse->nNode]; - assert( p!=0 ); - p->eType = (u8)(eType & 0xff); - p->jnFlags = (u8)(eType >> 8); - VVA( p->eU = zContent ? 1 : 0 ); - p->n = n; - p->u.zJContent = zContent; - return pParse->nNode++; -} - -/* -** Add an array of new nodes to the current pParse->aNode array. -** Return the index of the first node added. -** -** If an OOM error occurs, set pParse->oom. -*/ -static void jsonParseAddNodeArray( - JsonParse *pParse, /* Append the node to this object */ - JsonNode *aNode, /* Array of nodes to add */ - u32 nNode /* Number of elements in aNew */ -){ - assert( aNode!=0 ); - assert( nNode>=1 ); - if( pParse->nNode + nNode > pParse->nAlloc ){ - u32 nNew = pParse->nNode + nNode; - JsonNode *aNew = sqlite3_realloc64(pParse->aNode, nNew*sizeof(JsonNode)); - if( aNew==0 ){ - pParse->oom = 1; - return; - } - pParse->nAlloc = sqlite3_msize(aNew)/sizeof(JsonNode); - pParse->aNode = aNew; - } - memcpy(&pParse->aNode[pParse->nNode], aNode, nNode*sizeof(JsonNode)); - pParse->nNode += nNode; -} - -/* -** Add a new JSON_SUBST node. The node immediately following -** this new node will be the substitute content for iNode. -*/ -static int jsonParseAddSubstNode( - JsonParse *pParse, /* Add the JSON_SUBST here */ - u32 iNode /* References this node */ -){ - int idx = jsonParseAddNode(pParse, JSON_SUBST, iNode, 0); - if( pParse->oom ) return -1; - pParse->aNode[iNode].jnFlags |= JNODE_REPLACE; - pParse->aNode[idx].eU = 4; - pParse->aNode[idx].u.iPrev = pParse->iSubst; - pParse->iSubst = idx; - pParse->hasMod = 1; - pParse->useMod = 1; - return idx; -} - /* ** Return true if z[] begins with 2 (or more) hexadecimal digits */ @@ -203875,63 +204220,500 @@ static const struct NanInfName { char *zMatch; char *zRepl; } aNanInfName[] = { - { 'i', 'I', 3, JSON_REAL, 7, "inf", "9.0e999" }, - { 'i', 'I', 8, JSON_REAL, 7, "infinity", "9.0e999" }, - { 'n', 'N', 3, JSON_NULL, 4, "NaN", "null" }, - { 'q', 'Q', 4, JSON_NULL, 4, "QNaN", "null" }, - { 's', 'S', 4, JSON_NULL, 4, "SNaN", "null" }, + { 'i', 'I', 3, JSONB_FLOAT, 7, "inf", "9.0e999" }, + { 'i', 'I', 8, JSONB_FLOAT, 7, "infinity", "9.0e999" }, + { 'n', 'N', 3, JSONB_NULL, 4, "NaN", "null" }, + { 'q', 'Q', 4, JSONB_NULL, 4, "QNaN", "null" }, + { 's', 'S', 4, JSONB_NULL, 4, "SNaN", "null" }, }; + /* -** Parse a single JSON value which begins at pParse->zJson[i]. Return the -** index of the first character past the end of the value parsed. +** Report the wrong number of arguments for json_insert(), json_replace() +** or json_set(). +*/ +static void jsonWrongNumArgs( + sqlite3_context *pCtx, + const char *zFuncName +){ + char *zMsg = sqlite3_mprintf("json_%s() needs an odd number of arguments", + zFuncName); + sqlite3_result_error(pCtx, zMsg, -1); + sqlite3_free(zMsg); +} + +/**************************************************************************** +** Utility routines for dealing with the binary BLOB representation of JSON +****************************************************************************/ + +/* +** Expand pParse->aBlob so that it holds at least N bytes. ** -** Special return values: +** Return the number of errors. +*/ +static int jsonBlobExpand(JsonParse *pParse, u32 N){ + u8 *aNew; + u32 t; + assert( N>pParse->nBlobAlloc ); + if( pParse->nBlobAlloc==0 ){ + t = 100; + }else{ + t = pParse->nBlobAlloc*2; + } + if( tdb, pParse->aBlob, t); + if( aNew==0 ){ pParse->oom = 1; return 1; } + pParse->aBlob = aNew; + pParse->nBlobAlloc = t; + return 0; +} + +/* +** If pParse->aBlob is not previously editable (because it is taken +** from sqlite3_value_blob(), as indicated by the fact that +** pParse->nBlobAlloc==0 and pParse->nBlob>0) then make it editable +** by making a copy into space obtained from malloc. +** +** Return true on success. Return false on OOM. +*/ +static int jsonBlobMakeEditable(JsonParse *pParse, u32 nExtra){ + u8 *aOld; + u32 nSize; + assert( !pParse->bReadOnly ); + if( pParse->oom ) return 0; + if( pParse->nBlobAlloc>0 ) return 1; + aOld = pParse->aBlob; + nSize = pParse->nBlob + nExtra; + pParse->aBlob = 0; + if( jsonBlobExpand(pParse, nSize) ){ + return 0; + } + assert( pParse->nBlobAlloc >= pParse->nBlob + nExtra ); + memcpy(pParse->aBlob, aOld, pParse->nBlob); + return 1; +} + +/* Expand pParse->aBlob and append one bytes. +*/ +static SQLITE_NOINLINE void jsonBlobExpandAndAppendOneByte( + JsonParse *pParse, + u8 c +){ + jsonBlobExpand(pParse, pParse->nBlob+1); + if( pParse->oom==0 ){ + assert( pParse->nBlob+1<=pParse->nBlobAlloc ); + pParse->aBlob[pParse->nBlob++] = c; + } +} + +/* Append a single character. +*/ +static void jsonBlobAppendOneByte(JsonParse *pParse, u8 c){ + if( pParse->nBlob >= pParse->nBlobAlloc ){ + jsonBlobExpandAndAppendOneByte(pParse, c); + }else{ + pParse->aBlob[pParse->nBlob++] = c; + } +} + +/* Slow version of jsonBlobAppendNode() that first resizes the +** pParse->aBlob structure. +*/ +static void jsonBlobAppendNode(JsonParse*,u8,u32,const void*); +static SQLITE_NOINLINE void jsonBlobExpandAndAppendNode( + JsonParse *pParse, + u8 eType, + u32 szPayload, + const void *aPayload +){ + if( jsonBlobExpand(pParse, pParse->nBlob+szPayload+9) ) return; + jsonBlobAppendNode(pParse, eType, szPayload, aPayload); +} + + +/* Append an node type byte together with the payload size and +** possibly also the payload. +** +** If aPayload is not NULL, then it is a pointer to the payload which +** is also appended. If aPayload is NULL, the pParse->aBlob[] array +** is resized (if necessary) so that it is big enough to hold the +** payload, but the payload is not appended and pParse->nBlob is left +** pointing to where the first byte of payload will eventually be. +*/ +static void jsonBlobAppendNode( + JsonParse *pParse, /* The JsonParse object under construction */ + u8 eType, /* Node type. One of JSONB_* */ + u32 szPayload, /* Number of bytes of payload */ + const void *aPayload /* The payload. Might be NULL */ +){ + u8 *a; + if( pParse->nBlob+szPayload+9 > pParse->nBlobAlloc ){ + jsonBlobExpandAndAppendNode(pParse,eType,szPayload,aPayload); + return; + } + assert( pParse->aBlob!=0 ); + a = &pParse->aBlob[pParse->nBlob]; + if( szPayload<=11 ){ + a[0] = eType | (szPayload<<4); + pParse->nBlob += 1; + }else if( szPayload<=0xff ){ + a[0] = eType | 0xc0; + a[1] = szPayload & 0xff; + pParse->nBlob += 2; + }else if( szPayload<=0xffff ){ + a[0] = eType | 0xd0; + a[1] = (szPayload >> 8) & 0xff; + a[2] = szPayload & 0xff; + pParse->nBlob += 3; + }else{ + a[0] = eType | 0xe0; + a[1] = (szPayload >> 24) & 0xff; + a[2] = (szPayload >> 16) & 0xff; + a[3] = (szPayload >> 8) & 0xff; + a[4] = szPayload & 0xff; + pParse->nBlob += 5; + } + if( aPayload ){ + pParse->nBlob += szPayload; + memcpy(&pParse->aBlob[pParse->nBlob-szPayload], aPayload, szPayload); + } +} + +/* Change the payload size for the node at index i to be szPayload. +*/ +static int jsonBlobChangePayloadSize( + JsonParse *pParse, + u32 i, + u32 szPayload +){ + u8 *a; + u8 szType; + u8 nExtra; + u8 nNeeded; + int delta; + if( pParse->oom ) return 0; + a = &pParse->aBlob[i]; + szType = a[0]>>4; + if( szType<=11 ){ + nExtra = 0; + }else if( szType==12 ){ + nExtra = 1; + }else if( szType==13 ){ + nExtra = 2; + }else{ + nExtra = 4; + } + if( szPayload<=11 ){ + nNeeded = 0; + }else if( szPayload<=0xff ){ + nNeeded = 1; + }else if( szPayload<=0xffff ){ + nNeeded = 2; + }else{ + nNeeded = 4; + } + delta = nNeeded - nExtra; + if( delta ){ + u32 newSize = pParse->nBlob + delta; + if( delta>0 ){ + if( newSize>pParse->nBlobAlloc && jsonBlobExpand(pParse, newSize) ){ + return 0; /* OOM error. Error state recorded in pParse->oom. */ + } + a = &pParse->aBlob[i]; + memmove(&a[1+delta], &a[1], pParse->nBlob - (i+1)); + }else{ + memmove(&a[1], &a[1-delta], pParse->nBlob - (i+1-delta)); + } + pParse->nBlob = newSize; + } + if( nNeeded==0 ){ + a[0] = (a[0] & 0x0f) | (szPayload<<4); + }else if( nNeeded==1 ){ + a[0] = (a[0] & 0x0f) | 0xc0; + a[1] = szPayload & 0xff; + }else if( nNeeded==2 ){ + a[0] = (a[0] & 0x0f) | 0xd0; + a[1] = (szPayload >> 8) & 0xff; + a[2] = szPayload & 0xff; + }else{ + a[0] = (a[0] & 0x0f) | 0xe0; + a[1] = (szPayload >> 24) & 0xff; + a[2] = (szPayload >> 16) & 0xff; + a[3] = (szPayload >> 8) & 0xff; + a[4] = szPayload & 0xff; + } + return delta; +} + +/* +** If z[0] is 'u' and is followed by exactly 4 hexadecimal character, +** then set *pOp to JSONB_TEXTJ and return true. If not, do not make +** any changes to *pOp and return false. +*/ +static int jsonIs4HexB(const char *z, int *pOp){ + if( z[0]!='u' ) return 0; + if( !jsonIs4Hex(&z[1]) ) return 0; + *pOp = JSONB_TEXTJ; + return 1; +} + +/* +** Check a single element of the JSONB in pParse for validity. +** +** The element to be checked starts at offset i and must end at on the +** last byte before iEnd. +** +** Return 0 if everything is correct. Return the 1-based byte offset of the +** error if a problem is detected. (In other words, if the error is at offset +** 0, return 1). +*/ +static u32 jsonbValidityCheck( + const JsonParse *pParse, /* Input JSONB. Only aBlob and nBlob are used */ + u32 i, /* Start of element as pParse->aBlob[i] */ + u32 iEnd, /* One more than the last byte of the element */ + u32 iDepth /* Current nesting depth */ +){ + u32 n, sz, j, k; + const u8 *z; + u8 x; + if( iDepth>JSON_MAX_DEPTH ) return i+1; + sz = 0; + n = jsonbPayloadSize(pParse, i, &sz); + if( NEVER(n==0) ) return i+1; /* Checked by caller */ + if( NEVER(i+n+sz!=iEnd) ) return i+1; /* Checked by caller */ + z = pParse->aBlob; + x = z[i] & 0x0f; + switch( x ){ + case JSONB_NULL: + case JSONB_TRUE: + case JSONB_FALSE: { + return n+sz==1 ? 0 : i+1; + } + case JSONB_INT: { + if( sz<1 ) return i+1; + j = i+n; + if( z[j]=='-' ){ + j++; + if( sz<2 ) return i+1; + } + k = i+n+sz; + while( jk ) return j+1; + if( z[j+1]!='.' && z[j+1]!='e' && z[j+1]!='E' ) return j+1; + j++; + } + for(; j0 ) return j+1; + if( x==JSONB_FLOAT && (j==k-1 || !sqlite3Isdigit(z[j+1])) ){ + return j+1; + } + seen = 1; + continue; + } + if( z[j]=='e' || z[j]=='E' ){ + if( seen==2 ) return j+1; + if( j==k-1 ) return j+1; + if( z[j+1]=='+' || z[j+1]=='-' ){ + j++; + if( j==k-1 ) return j+1; + } + seen = 2; + continue; + } + return j+1; + } + if( seen==0 ) return i+1; + return 0; + } + case JSONB_TEXT: { + j = i+n; + k = j+sz; + while( j=k ){ + return j+1; + }else if( strchr("\"\\/bfnrt",z[j+1])!=0 ){ + j++; + }else if( z[j+1]=='u' ){ + if( j+5>=k ) return j+1; + if( !jsonIs4Hex((const char*)&z[j+2]) ) return j+1; + j++; + }else if( x!=JSONB_TEXT5 ){ + return j+1; + }else{ + u32 c = 0; + u32 szC = jsonUnescapeOneChar((const char*)&z[j], k-j, &c); + if( c==JSON_INVALID_CHAR ) return j+1; + j += szC - 1; + } + } + j++; + } + return 0; + } + case JSONB_TEXTRAW: { + return 0; + } + case JSONB_ARRAY: { + u32 sub; + j = i+n; + k = j+sz; + while( jk ) return j+1; + sub = jsonbValidityCheck(pParse, j, j+n+sz, iDepth+1); + if( sub ) return sub; + j += n + sz; + } + assert( j==k ); + return 0; + } + case JSONB_OBJECT: { + u32 cnt = 0; + u32 sub; + j = i+n; + k = j+sz; + while( jk ) return j+1; + if( (cnt & 1)==0 ){ + x = z[j] & 0x0f; + if( xJSONB_TEXTRAW ) return j+1; + } + sub = jsonbValidityCheck(pParse, j, j+n+sz, iDepth+1); + if( sub ) return sub; + cnt++; + j += n + sz; + } + assert( j==k ); + if( (cnt & 1)!=0 ) return j+1; + return 0; + } + default: { + return i+1; + } + } +} + +/* +** Translate a single element of JSON text at pParse->zJson[i] into +** its equivalent binary JSONB representation. Append the translation into +** pParse->aBlob[] beginning at pParse->nBlob. The size of +** pParse->aBlob[] is increased as necessary. +** +** Return the index of the first character past the end of the element parsed, +** or one of the following special result codes: ** ** 0 End of input -** -1 Syntax error -** -2 '}' seen -** -3 ']' seen -** -4 ',' seen -** -5 ':' seen +** -1 Syntax error or OOM +** -2 '}' seen \ +** -3 ']' seen \___ For these returns, pParse->iErr is set to +** -4 ',' seen / the index in zJson[] of the seen character +** -5 ':' seen / */ -static int jsonParseValue(JsonParse *pParse, u32 i){ +static int jsonTranslateTextToBlob(JsonParse *pParse, u32 i){ char c; u32 j; - int iThis; + u32 iThis, iStart; int x; - JsonNode *pNode; + u8 t; const char *z = pParse->zJson; json_parse_restart: switch( (u8)z[i] ){ case '{': { /* Parse object */ - iThis = jsonParseAddNode(pParse, JSON_OBJECT, 0, 0); - if( iThis<0 ) return -1; + iThis = pParse->nBlob; + jsonBlobAppendNode(pParse, JSONB_OBJECT, pParse->nJson-i, 0); if( ++pParse->iDepth > JSON_MAX_DEPTH ){ pParse->iErr = i; return -1; } + iStart = pParse->nBlob; for(j=i+1;;j++){ - u32 nNode = pParse->nNode; - x = jsonParseValue(pParse, j); + u32 iBlob = pParse->nBlob; + x = jsonTranslateTextToBlob(pParse, j); if( x<=0 ){ + int op; if( x==(-2) ){ j = pParse->iErr; - if( pParse->nNode!=(u32)iThis+1 ) pParse->hasNonstd = 1; + if( pParse->nBlob!=(u32)iStart ) pParse->hasNonstd = 1; break; } j += json5Whitespace(&z[j]); + op = JSONB_TEXT; if( sqlite3JsonId1(z[j]) - || (z[j]=='\\' && z[j+1]=='u' && jsonIs4Hex(&z[j+2])) + || (z[j]=='\\' && jsonIs4HexB(&z[j+1], &op)) ){ int k = j+1; while( (sqlite3JsonId2(z[k]) && json5Whitespace(&z[k])==0) - || (z[k]=='\\' && z[k+1]=='u' && jsonIs4Hex(&z[k+2])) + || (z[k]=='\\' && jsonIs4HexB(&z[k+1], &op)) ){ k++; } - jsonParseAddNode(pParse, JSON_STRING | (JNODE_RAW<<8), k-j, &z[j]); + assert( iBlob==pParse->nBlob ); + jsonBlobAppendNode(pParse, op, k-j, &z[j]); pParse->hasNonstd = 1; x = k; }else{ @@ -203940,24 +204722,24 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ } } if( pParse->oom ) return -1; - pNode = &pParse->aNode[nNode]; - if( pNode->eType!=JSON_STRING ){ + t = pParse->aBlob[iBlob] & 0x0f; + if( tJSONB_TEXTRAW ){ pParse->iErr = j; return -1; } - pNode->jnFlags |= JNODE_LABEL; j = x; if( z[j]==':' ){ j++; }else{ - if( fast_isspace(z[j]) ){ - do{ j++; }while( fast_isspace(z[j]) ); + if( jsonIsspace(z[j]) ){ + /* strspn() is not helpful here */ + do{ j++; }while( jsonIsspace(z[j]) ); if( z[j]==':' ){ j++; goto parse_object_value; } } - x = jsonParseValue(pParse, j); + x = jsonTranslateTextToBlob(pParse, j); if( x!=(-5) ){ if( x!=(-1) ) pParse->iErr = j; return -1; @@ -203965,7 +204747,7 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ j = pParse->iErr+1; } parse_object_value: - x = jsonParseValue(pParse, j); + x = jsonTranslateTextToBlob(pParse, j); if( x<=0 ){ if( x!=(-1) ) pParse->iErr = j; return -1; @@ -203976,15 +204758,15 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ }else if( z[j]=='}' ){ break; }else{ - if( fast_isspace(z[j]) ){ - do{ j++; }while( fast_isspace(z[j]) ); + if( jsonIsspace(z[j]) ){ + j += 1 + (u32)strspn(&z[j+1], jsonSpaces); if( z[j]==',' ){ continue; }else if( z[j]=='}' ){ break; } } - x = jsonParseValue(pParse, j); + x = jsonTranslateTextToBlob(pParse, j); if( x==(-4) ){ j = pParse->iErr; continue; @@ -203997,25 +204779,26 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ pParse->iErr = j; return -1; } - pParse->aNode[iThis].n = pParse->nNode - (u32)iThis - 1; + jsonBlobChangePayloadSize(pParse, iThis, pParse->nBlob - iStart); pParse->iDepth--; return j+1; } case '[': { /* Parse array */ - iThis = jsonParseAddNode(pParse, JSON_ARRAY, 0, 0); - if( iThis<0 ) return -1; + iThis = pParse->nBlob; + jsonBlobAppendNode(pParse, JSONB_ARRAY, pParse->nJson - i, 0); + iStart = pParse->nBlob; + if( pParse->oom ) return -1; if( ++pParse->iDepth > JSON_MAX_DEPTH ){ pParse->iErr = i; return -1; } - memset(&pParse->aNode[iThis].u, 0, sizeof(pParse->aNode[iThis].u)); for(j=i+1;;j++){ - x = jsonParseValue(pParse, j); + x = jsonTranslateTextToBlob(pParse, j); if( x<=0 ){ if( x==(-3) ){ j = pParse->iErr; - if( pParse->nNode!=(u32)iThis+1 ) pParse->hasNonstd = 1; + if( pParse->nBlob!=iStart ) pParse->hasNonstd = 1; break; } if( x!=(-1) ) pParse->iErr = j; @@ -204027,15 +204810,15 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ }else if( z[j]==']' ){ break; }else{ - if( fast_isspace(z[j]) ){ - do{ j++; }while( fast_isspace(z[j]) ); + if( jsonIsspace(z[j]) ){ + j += 1 + (u32)strspn(&z[j+1], jsonSpaces); if( z[j]==',' ){ continue; }else if( z[j]==']' ){ break; } } - x = jsonParseValue(pParse, j); + x = jsonTranslateTextToBlob(pParse, j); if( x==(-4) ){ j = pParse->iErr; continue; @@ -204048,23 +204831,33 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ pParse->iErr = j; return -1; } - pParse->aNode[iThis].n = pParse->nNode - (u32)iThis - 1; + jsonBlobChangePayloadSize(pParse, iThis, pParse->nBlob - iStart); pParse->iDepth--; return j+1; } case '\'': { - u8 jnFlags; + u8 opcode; char cDelim; pParse->hasNonstd = 1; - jnFlags = JNODE_JSON5; + opcode = JSONB_TEXT; goto parse_string; case '"': /* Parse string */ - jnFlags = 0; + opcode = JSONB_TEXT; parse_string: cDelim = z[i]; - for(j=i+1; 1; j++){ - if( jsonIsOk[(unsigned char)z[j]] ) continue; + j = i+1; + while( 1 /*exit-by-break*/ ){ + if( jsonIsOk[(u8)z[j]] ){ + if( !jsonIsOk[(u8)z[j+1]] ){ + j += 1; + }else if( !jsonIsOk[(u8)z[j+2]] ){ + j += 2; + }else{ + j += 3; + continue; + } + } c = z[j]; if( c==cDelim ){ break; @@ -204073,16 +204866,16 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ if( c=='"' || c=='\\' || c=='/' || c=='b' || c=='f' || c=='n' || c=='r' || c=='t' || (c=='u' && jsonIs4Hex(&z[j+1])) ){ - jnFlags |= JNODE_ESCAPE; + if( opcode==JSONB_TEXT ) opcode = JSONB_TEXTJ; }else if( c=='\'' || c=='0' || c=='v' || c=='\n' || (0xe2==(u8)c && 0x80==(u8)z[j+1] && (0xa8==(u8)z[j+2] || 0xa9==(u8)z[j+2])) || (c=='x' && jsonIs2Hex(&z[j+1])) ){ - jnFlags |= (JNODE_ESCAPE|JNODE_JSON5); + opcode = JSONB_TEXT5; pParse->hasNonstd = 1; }else if( c=='\r' ){ if( z[j+1]=='\n' ) j++; - jnFlags |= (JNODE_ESCAPE|JNODE_JSON5); + opcode = JSONB_TEXT5; pParse->hasNonstd = 1; }else{ pParse->iErr = j; @@ -204092,14 +204885,17 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ /* Control characters are not allowed in strings */ pParse->iErr = j; return -1; + }else if( c=='"' ){ + opcode = JSONB_TEXT5; } + j++; } - jsonParseAddNode(pParse, JSON_STRING | (jnFlags<<8), j+1-i, &z[i]); + jsonBlobAppendNode(pParse, opcode, j-1-i, &z[i+1]); return j+1; } case 't': { if( strncmp(z+i,"true",4)==0 && !sqlite3Isalnum(z[i+4]) ){ - jsonParseAddNode(pParse, JSON_TRUE, 0, 0); + jsonBlobAppendOneByte(pParse, JSONB_TRUE); return i+4; } pParse->iErr = i; @@ -204107,23 +204903,22 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ } case 'f': { if( strncmp(z+i,"false",5)==0 && !sqlite3Isalnum(z[i+5]) ){ - jsonParseAddNode(pParse, JSON_FALSE, 0, 0); + jsonBlobAppendOneByte(pParse, JSONB_FALSE); return i+5; } pParse->iErr = i; return -1; } case '+': { - u8 seenDP, seenE, jnFlags; + u8 seenE; pParse->hasNonstd = 1; - jnFlags = JNODE_JSON5; + t = 0x00; /* Bit 0x01: JSON5. Bit 0x02: FLOAT */ goto parse_number; case '.': if( sqlite3Isdigit(z[i+1]) ){ pParse->hasNonstd = 1; - jnFlags = JNODE_JSON5; + t = 0x03; /* Bit 0x01: JSON5. Bit 0x02: FLOAT */ seenE = 0; - seenDP = JSON_REAL; goto parse_number_2; } pParse->iErr = i; @@ -204140,9 +204935,8 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ case '8': case '9': /* Parse number */ - jnFlags = 0; + t = 0x00; /* Bit 0x01: JSON5. Bit 0x02: FLOAT */ parse_number: - seenDP = JSON_INT; seenE = 0; assert( '-' < '0' ); assert( '+' < '0' ); @@ -204152,9 +204946,9 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ if( c<='0' ){ if( c=='0' ){ if( (z[i+1]=='x' || z[i+1]=='X') && sqlite3Isxdigit(z[i+2]) ){ - assert( seenDP==JSON_INT ); + assert( t==0x00 ); pParse->hasNonstd = 1; - jnFlags |= JNODE_JSON5; + t = 0x01; for(j=i+3; sqlite3Isxdigit(z[j]); j++){} goto parse_number_finish; }else if( sqlite3Isdigit(z[i+1]) ){ @@ -204171,15 +204965,15 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ ){ pParse->hasNonstd = 1; if( z[i]=='-' ){ - jsonParseAddNode(pParse, JSON_REAL, 8, "-9.0e999"); + jsonBlobAppendNode(pParse, JSONB_FLOAT, 6, "-9e999"); }else{ - jsonParseAddNode(pParse, JSON_REAL, 7, "9.0e999"); + jsonBlobAppendNode(pParse, JSONB_FLOAT, 5, "9e999"); } return i + (sqlite3StrNICmp(&z[i+4],"inity",5)==0 ? 9 : 4); } if( z[i+1]=='.' ){ pParse->hasNonstd = 1; - jnFlags |= JNODE_JSON5; + t |= 0x01; goto parse_number_2; } pParse->iErr = i; @@ -204191,30 +204985,31 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ return -1; }else if( (z[i+2]=='x' || z[i+2]=='X') && sqlite3Isxdigit(z[i+3]) ){ pParse->hasNonstd = 1; - jnFlags |= JNODE_JSON5; + t |= 0x01; for(j=i+4; sqlite3Isxdigit(z[j]); j++){} goto parse_number_finish; } } } } + parse_number_2: for(j=i+1;; j++){ c = z[j]; if( sqlite3Isdigit(c) ) continue; if( c=='.' ){ - if( seenDP==JSON_REAL ){ + if( (t & 0x02)!=0 ){ pParse->iErr = j; return -1; } - seenDP = JSON_REAL; + t |= 0x02; continue; } if( c=='e' || c=='E' ){ if( z[j-1]<'0' ){ if( ALWAYS(z[j-1]=='.') && ALWAYS(j-2>=i) && sqlite3Isdigit(z[j-2]) ){ pParse->hasNonstd = 1; - jnFlags |= JNODE_JSON5; + t |= 0x01; }else{ pParse->iErr = j; return -1; @@ -204224,7 +205019,7 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ pParse->iErr = j; return -1; } - seenDP = JSON_REAL; + t |= 0x02; seenE = 1; c = z[j+1]; if( c=='+' || c=='-' ){ @@ -204242,14 +205037,18 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ if( z[j-1]<'0' ){ if( ALWAYS(z[j-1]=='.') && ALWAYS(j-2>=i) && sqlite3Isdigit(z[j-2]) ){ pParse->hasNonstd = 1; - jnFlags |= JNODE_JSON5; + t |= 0x01; }else{ pParse->iErr = j; return -1; } } parse_number_finish: - jsonParseAddNode(pParse, seenDP | (jnFlags<<8), j - i, &z[i]); + assert( JSONB_INT+0x01==JSONB_INT5 ); + assert( JSONB_FLOAT+0x01==JSONB_FLOAT5 ); + assert( JSONB_INT+0x02==JSONB_FLOAT ); + if( z[i]=='+' ) i++; + jsonBlobAppendNode(pParse, JSONB_INT+t, j-i, &z[i]); return j; } case '}': { @@ -204275,9 +205074,7 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ case 0x0a: case 0x0d: case 0x20: { - do{ - i++; - }while( fast_isspace(z[i]) ); + i += 1 + (u32)strspn(&z[i+1], jsonSpaces); goto json_parse_restart; } case 0x0b: @@ -204299,7 +205096,7 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ } case 'n': { if( strncmp(z+i,"null",4)==0 && !sqlite3Isalnum(z[i+4]) ){ - jsonParseAddNode(pParse, JSON_NULL, 0, 0); + jsonBlobAppendOneByte(pParse, JSONB_NULL); return i+4; } /* fall-through into the default case that checks for NaN */ @@ -204315,8 +205112,11 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ continue; } if( sqlite3Isalnum(z[i+nn]) ) continue; - jsonParseAddNode(pParse, aNanInfName[k].eType, - aNanInfName[k].nRepl, aNanInfName[k].zRepl); + if( aNanInfName[k].eType==JSONB_FLOAT ){ + jsonBlobAppendNode(pParse, JSONB_FLOAT, 5, "9e999"); + }else{ + jsonBlobAppendOneByte(pParse, JSONB_NULL); + } pParse->hasNonstd = 1; return i + nn; } @@ -204326,6 +205126,7 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ } /* End switch(z[i]) */ } + /* ** Parse a complete JSON string. Return 0 on success or non-zero if there ** are any errors. If an error occurs, free all memory held by pParse, @@ -204334,20 +205135,26 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ ** pParse must be initialized to an empty parse object prior to calling ** this routine. */ -static int jsonParse( +static int jsonConvertTextToBlob( JsonParse *pParse, /* Initialize and fill this JsonParse object */ sqlite3_context *pCtx /* Report errors here */ ){ int i; const char *zJson = pParse->zJson; - i = jsonParseValue(pParse, 0); + i = jsonTranslateTextToBlob(pParse, 0); if( pParse->oom ) i = -1; if( i>0 ){ +#ifdef SQLITE_DEBUG assert( pParse->iDepth==0 ); - while( fast_isspace(zJson[i]) ) i++; + if( sqlite3Config.bJsonSelfcheck ){ + assert( jsonbValidityCheck(pParse, 0, pParse->nBlob, 0)==0 ); + } +#endif + while( jsonIsspace(zJson[i]) ) i++; if( zJson[i] ){ i += json5Whitespace(&zJson[i]); if( zJson[i] ){ + if( pCtx ) sqlite3_result_error(pCtx, "malformed JSON", -1); jsonParseReset(pParse); return 1; } @@ -204368,248 +205175,715 @@ static int jsonParse( return 0; } +/* +** The input string pStr is a well-formed JSON text string. Convert +** this into the JSONB format and make it the return value of the +** SQL function. +*/ +static void jsonReturnStringAsBlob(JsonString *pStr){ + JsonParse px; + memset(&px, 0, sizeof(px)); + jsonStringTerminate(pStr); + px.zJson = pStr->zBuf; + px.nJson = pStr->nUsed; + px.db = sqlite3_context_db_handle(pStr->pCtx); + (void)jsonTranslateTextToBlob(&px, 0); + if( px.oom ){ + sqlite3DbFree(px.db, px.aBlob); + sqlite3_result_error_nomem(pStr->pCtx); + }else{ + assert( px.nBlobAlloc>0 ); + assert( !px.bReadOnly ); + sqlite3_result_blob(pStr->pCtx, px.aBlob, px.nBlob, SQLITE_DYNAMIC); + } +} -/* Mark node i of pParse as being a child of iParent. Call recursively -** to fill in all the descendants of node i. +/* The byte at index i is a node type-code. This routine +** determines the payload size for that node and writes that +** payload size in to *pSz. It returns the offset from i to the +** beginning of the payload. Return 0 on error. */ -static void jsonParseFillInParentage(JsonParse *pParse, u32 i, u32 iParent){ - JsonNode *pNode = &pParse->aNode[i]; - u32 j; - pParse->aUp[i] = iParent; - switch( pNode->eType ){ - case JSON_ARRAY: { - for(j=1; j<=pNode->n; j += jsonNodeSize(pNode+j)){ - jsonParseFillInParentage(pParse, i+j, i); +static u32 jsonbPayloadSize(const JsonParse *pParse, u32 i, u32 *pSz){ + u8 x; + u32 sz; + u32 n; + if( NEVER(i>pParse->nBlob) ){ + *pSz = 0; + return 0; + } + x = pParse->aBlob[i]>>4; + if( x<=11 ){ + sz = x; + n = 1; + }else if( x==12 ){ + if( i+1>=pParse->nBlob ){ + *pSz = 0; + return 0; + } + sz = pParse->aBlob[i+1]; + n = 2; + }else if( x==13 ){ + if( i+2>=pParse->nBlob ){ + *pSz = 0; + return 0; + } + sz = (pParse->aBlob[i+1]<<8) + pParse->aBlob[i+2]; + n = 3; + }else if( x==14 ){ + if( i+4>=pParse->nBlob ){ + *pSz = 0; + return 0; + } + sz = ((u32)pParse->aBlob[i+1]<<24) + (pParse->aBlob[i+2]<<16) + + (pParse->aBlob[i+3]<<8) + pParse->aBlob[i+4]; + n = 5; + }else{ + if( i+8>=pParse->nBlob + || pParse->aBlob[i+1]!=0 + || pParse->aBlob[i+2]!=0 + || pParse->aBlob[i+3]!=0 + || pParse->aBlob[i+4]!=0 + ){ + *pSz = 0; + return 0; + } + sz = (pParse->aBlob[i+5]<<24) + (pParse->aBlob[i+6]<<16) + + (pParse->aBlob[i+7]<<8) + pParse->aBlob[i+8]; + n = 9; + } + if( (i64)i+sz+n > pParse->nBlob + && (i64)i+sz+n > pParse->nBlob-pParse->delta + ){ + sz = 0; + n = 0; + } + *pSz = sz; + return n; +} + + +/* +** Translate the binary JSONB representation of JSON beginning at +** pParse->aBlob[i] into a JSON text string. Append the JSON +** text onto the end of pOut. Return the index in pParse->aBlob[] +** of the first byte past the end of the element that is translated. +** +** If an error is detected in the BLOB input, the pOut->eErr flag +** might get set to JSTRING_MALFORMED. But not all BLOB input errors +** are detected. So a malformed JSONB input might either result +** in an error, or in incorrect JSON. +** +** The pOut->eErr JSTRING_OOM flag is set on a OOM. +*/ +static u32 jsonTranslateBlobToText( + const JsonParse *pParse, /* the complete parse of the JSON */ + u32 i, /* Start rendering at this index */ + JsonString *pOut /* Write JSON here */ +){ + u32 sz, n, j, iEnd; + + n = jsonbPayloadSize(pParse, i, &sz); + if( n==0 ){ + pOut->eErr |= JSTRING_MALFORMED; + return pParse->nBlob+1; + } + switch( pParse->aBlob[i] & 0x0f ){ + case JSONB_NULL: { + jsonAppendRawNZ(pOut, "null", 4); + return i+1; + } + case JSONB_TRUE: { + jsonAppendRawNZ(pOut, "true", 4); + return i+1; + } + case JSONB_FALSE: { + jsonAppendRawNZ(pOut, "false", 5); + return i+1; + } + case JSONB_INT: + case JSONB_FLOAT: { + if( sz==0 ) goto malformed_jsonb; + jsonAppendRaw(pOut, (const char*)&pParse->aBlob[i+n], sz); + break; + } + case JSONB_INT5: { /* Integer literal in hexadecimal notation */ + u32 k = 2; + sqlite3_uint64 u = 0; + const char *zIn = (const char*)&pParse->aBlob[i+n]; + int bOverflow = 0; + if( sz==0 ) goto malformed_jsonb; + if( zIn[0]=='-' ){ + jsonAppendChar(pOut, '-'); + k++; + }else if( zIn[0]=='+' ){ + k++; + } + for(; keErr |= JSTRING_MALFORMED; + break; + }else if( (u>>60)!=0 ){ + bOverflow = 1; + }else{ + u = u*16 + sqlite3HexToInt(zIn[k]); + } + } + jsonPrintf(100,pOut,bOverflow?"9.0e999":"%llu", u); + break; + } + case JSONB_FLOAT5: { /* Float literal missing digits beside "." */ + u32 k = 0; + const char *zIn = (const char*)&pParse->aBlob[i+n]; + if( sz==0 ) goto malformed_jsonb; + if( zIn[0]=='-' ){ + jsonAppendChar(pOut, '-'); + k++; + } + if( zIn[k]=='.' ){ + jsonAppendChar(pOut, '0'); + } + for(; kn; j += jsonNodeSize(pNode+j+1)+1){ - pParse->aUp[i+j] = i; - jsonParseFillInParentage(pParse, i+j+1, i); + case JSONB_TEXT: + case JSONB_TEXTJ: { + jsonAppendChar(pOut, '"'); + jsonAppendRaw(pOut, (const char*)&pParse->aBlob[i+n], sz); + jsonAppendChar(pOut, '"'); + break; + } + case JSONB_TEXT5: { + const char *zIn; + u32 k; + u32 sz2 = sz; + zIn = (const char*)&pParse->aBlob[i+n]; + jsonAppendChar(pOut, '"'); + while( sz2>0 ){ + for(k=0; k0 ){ + jsonAppendRawNZ(pOut, zIn, k); + if( k>=sz2 ){ + break; + } + zIn += k; + sz2 -= k; + } + if( zIn[0]=='"' ){ + jsonAppendRawNZ(pOut, "\\\"", 2); + zIn++; + sz2--; + continue; + } + assert( zIn[0]=='\\' ); + assert( sz2>=1 ); + if( sz2<2 ){ + pOut->eErr |= JSTRING_MALFORMED; + break; + } + switch( (u8)zIn[1] ){ + case '\'': + jsonAppendChar(pOut, '\''); + break; + case 'v': + jsonAppendRawNZ(pOut, "\\u0009", 6); + break; + case 'x': + if( sz2<4 ){ + pOut->eErr |= JSTRING_MALFORMED; + sz2 = 2; + break; + } + jsonAppendRawNZ(pOut, "\\u00", 4); + jsonAppendRawNZ(pOut, &zIn[2], 2); + zIn += 2; + sz2 -= 2; + break; + case '0': + jsonAppendRawNZ(pOut, "\\u0000", 6); + break; + case '\r': + if( sz2>2 && zIn[2]=='\n' ){ + zIn++; + sz2--; + } + break; + case '\n': + break; + case 0xe2: + /* '\' followed by either U+2028 or U+2029 is ignored as + ** whitespace. Not that in UTF8, U+2028 is 0xe2 0x80 0x29. + ** U+2029 is the same except for the last byte */ + if( sz2<4 + || 0x80!=(u8)zIn[2] + || (0xa8!=(u8)zIn[3] && 0xa9!=(u8)zIn[3]) + ){ + pOut->eErr |= JSTRING_MALFORMED; + sz2 = 2; + break; + } + zIn += 2; + sz2 -= 2; + break; + default: + jsonAppendRawNZ(pOut, zIn, 2); + break; + } + assert( sz2>=2 ); + zIn += 2; + sz2 -= 2; } + jsonAppendChar(pOut, '"'); + break; + } + case JSONB_TEXTRAW: { + jsonAppendString(pOut, (const char*)&pParse->aBlob[i+n], sz); + break; + } + case JSONB_ARRAY: { + jsonAppendChar(pOut, '['); + j = i+n; + iEnd = j+sz; + while( jeErr==0 ){ + j = jsonTranslateBlobToText(pParse, j, pOut); + jsonAppendChar(pOut, ','); + } + if( j>iEnd ) pOut->eErr |= JSTRING_MALFORMED; + if( sz>0 ) jsonStringTrimOneChar(pOut); + jsonAppendChar(pOut, ']'); + break; + } + case JSONB_OBJECT: { + int x = 0; + jsonAppendChar(pOut, '{'); + j = i+n; + iEnd = j+sz; + while( jeErr==0 ){ + j = jsonTranslateBlobToText(pParse, j, pOut); + jsonAppendChar(pOut, (x++ & 1) ? ',' : ':'); + } + if( (x & 1)!=0 || j>iEnd ) pOut->eErr |= JSTRING_MALFORMED; + if( sz>0 ) jsonStringTrimOneChar(pOut); + jsonAppendChar(pOut, '}'); break; } + default: { + malformed_jsonb: + pOut->eErr |= JSTRING_MALFORMED; break; } } + return i+n+sz; +} + +/* Return true if the input pJson +** +** For performance reasons, this routine does not do a detailed check of the +** input BLOB to ensure that it is well-formed. Hence, false positives are +** possible. False negatives should never occur, however. +*/ +static int jsonFuncArgMightBeBinary(sqlite3_value *pJson){ + u32 sz, n; + const u8 *aBlob; + int nBlob; + JsonParse s; + if( sqlite3_value_type(pJson)!=SQLITE_BLOB ) return 0; + aBlob = sqlite3_value_blob(pJson); + nBlob = sqlite3_value_bytes(pJson); + if( nBlob<1 ) return 0; + if( NEVER(aBlob==0) || (aBlob[0] & 0x0f)>JSONB_OBJECT ) return 0; + memset(&s, 0, sizeof(s)); + s.aBlob = (u8*)aBlob; + s.nBlob = nBlob; + n = jsonbPayloadSize(&s, 0, &sz); + if( n==0 ) return 0; + if( sz+n!=(u32)nBlob ) return 0; + if( (aBlob[0] & 0x0f)<=JSONB_FALSE && sz>0 ) return 0; + return sz+n==(u32)nBlob; } /* -** Compute the parentage of all nodes in a completed parse. +** Given that a JSONB_ARRAY object starts at offset i, return +** the number of entries in that array. */ -static int jsonParseFindParents(JsonParse *pParse){ - u32 *aUp; - assert( pParse->aUp==0 ); - aUp = pParse->aUp = sqlite3_malloc64( sizeof(u32)*pParse->nNode ); - if( aUp==0 ){ - pParse->oom = 1; - return SQLITE_NOMEM; +static u32 jsonbArrayCount(JsonParse *pParse, u32 iRoot){ + u32 n, sz, i, iEnd; + u32 k = 0; + n = jsonbPayloadSize(pParse, iRoot, &sz); + iEnd = iRoot+n+sz; + for(i=iRoot+n; n>0 && idelta. */ -#define JSON_CACHE_ID (-429938) /* First cache entry */ -#define JSON_CACHE_SZ 4 /* Max number of cache entries */ +static void jsonAfterEditSizeAdjust(JsonParse *pParse, u32 iRoot){ + u32 sz = 0; + u32 nBlob; + assert( pParse->delta!=0 ); + assert( pParse->nBlobAlloc >= pParse->nBlob ); + nBlob = pParse->nBlob; + pParse->nBlob = pParse->nBlobAlloc; + (void)jsonbPayloadSize(pParse, iRoot, &sz); + pParse->nBlob = nBlob; + sz += pParse->delta; + pParse->delta += jsonBlobChangePayloadSize(pParse, iRoot, sz); +} /* -** Obtain a complete parse of the JSON found in the pJson argument +** Modify the JSONB blob at pParse->aBlob by removing nDel bytes of +** content beginning at iDel, and replacing them with nIns bytes of +** content given by aIns. ** -** Use the sqlite3_get_auxdata() cache to find a preexisting parse -** if it is available. If the cache is not available or if it -** is no longer valid, parse the JSON again and return the new parse. -** Also register the new parse so that it will be available for -** future sqlite3_get_auxdata() calls. +** nDel may be zero, in which case no bytes are removed. But iDel is +** still important as new bytes will be insert beginning at iDel. ** -** If an error occurs and pErrCtx!=0 then report the error on pErrCtx -** and return NULL. -** -** The returned pointer (if it is not NULL) is owned by the cache in -** most cases, not the caller. The caller does NOT need to invoke -** jsonParseFree(), in most cases. -** -** Except, if an error occurs and pErrCtx==0 then return the JsonParse -** object with JsonParse.nErr non-zero and the caller will own the JsonParse -** object. In that case, it will be the responsibility of the caller to -** invoke jsonParseFree(). To summarize: +** aIns may be zero, in which case space is created to hold nIns bytes +** beginning at iDel, but that space is uninitialized. ** -** pErrCtx!=0 || p->nErr==0 ==> Return value p is owned by the -** cache. Call does not need to -** free it. -** -** pErrCtx==0 && p->nErr!=0 ==> Return value is owned by the caller -** and so the caller must free it. +** Set pParse->oom if an OOM occurs. */ -static JsonParse *jsonParseCached( - sqlite3_context *pCtx, /* Context to use for cache search */ - sqlite3_value *pJson, /* Function param containing JSON text */ - sqlite3_context *pErrCtx, /* Write parse errors here if not NULL */ - int bUnedited /* No prior edits allowed */ +static void jsonBlobEdit( + JsonParse *pParse, /* The JSONB to be modified is in pParse->aBlob */ + u32 iDel, /* First byte to be removed */ + u32 nDel, /* Number of bytes to remove */ + const u8 *aIns, /* Content to insert */ + u32 nIns /* Bytes of content to insert */ ){ - char *zJson = (char*)sqlite3_value_text(pJson); - int nJson = sqlite3_value_bytes(pJson); - JsonParse *p; - JsonParse *pMatch = 0; - int iKey; - int iMinKey = 0; - u32 iMinHold = 0xffffffff; - u32 iMaxHold = 0; - int bJsonRCStr; + i64 d = (i64)nIns - (i64)nDel; + if( d!=0 ){ + if( pParse->nBlob + d > pParse->nBlobAlloc ){ + jsonBlobExpand(pParse, pParse->nBlob+d); + if( pParse->oom ) return; + } + memmove(&pParse->aBlob[iDel+nIns], + &pParse->aBlob[iDel+nDel], + pParse->nBlob - (iDel+nDel)); + pParse->nBlob += d; + pParse->delta += d; + } + if( nIns && aIns ) memcpy(&pParse->aBlob[iDel], aIns, nIns); +} - if( zJson==0 ) return 0; - for(iKey=0; iKeynJson==nJson - && (p->hasMod==0 || bUnedited==0) - && (p->zJson==zJson || memcmp(p->zJson,zJson,nJson)==0) - ){ - p->nErr = 0; - p->useMod = 0; - pMatch = p; - }else - if( pMatch==0 - && p->zAlt!=0 - && bUnedited==0 - && p->nAlt==nJson - && memcmp(p->zAlt, zJson, nJson)==0 - ){ - p->nErr = 0; - p->useMod = 1; - pMatch = p; - }else if( p->iHoldiHold; - iMinKey = iKey; + if( z[i+1]=='\r' ){ + if( i+2iHold>iMaxHold ){ - iMaxHold = p->iHold; + if( 0xe2==(u8)z[i+1] + && i+3nErr = 0; - pMatch->iHold = iMaxHold+1; - assert( pMatch->nJPRef>0 ); /* pMatch is owned by the cache */ - return pMatch; - } + return i; +} - /* The input JSON was not found anywhere in the cache. We will need - ** to parse it ourselves and generate a new JsonParse object. - */ - bJsonRCStr = sqlite3ValueIsOfClass(pJson,sqlite3RCStrUnref); - p = sqlite3_malloc64( sizeof(*p) + (bJsonRCStr ? 0 : nJson+1) ); - if( p==0 ){ - sqlite3_result_error_nomem(pCtx); - return 0; +/* +** Input z[0..n] defines JSON escape sequence including the leading '\\'. +** Decode that escape sequence into a single character. Write that +** character into *piOut. Return the number of bytes in the escape sequence. +** +** If there is a syntax error of some kind (for example too few characters +** after the '\\' to complete the encoding) then *piOut is set to +** JSON_INVALID_CHAR. +*/ +static u32 jsonUnescapeOneChar(const char *z, u32 n, u32 *piOut){ + assert( n>0 ); + assert( z[0]=='\\' ); + if( n<2 ){ + *piOut = JSON_INVALID_CHAR; + return n; } - memset(p, 0, sizeof(*p)); - if( bJsonRCStr ){ - p->zJson = sqlite3RCStrRef(zJson); - p->bJsonIsRCStr = 1; - }else{ - p->zJson = (char*)&p[1]; - memcpy(p->zJson, zJson, nJson+1); + switch( (u8)z[1] ){ + case 'u': { + u32 v, vlo; + if( n<6 ){ + *piOut = JSON_INVALID_CHAR; + return n; + } + v = jsonHexToInt4(&z[2]); + if( (v & 0xfc00)==0xd800 + && n>=12 + && z[6]=='\\' + && z[7]=='u' + && ((vlo = jsonHexToInt4(&z[8]))&0xfc00)==0xdc00 + ){ + *piOut = ((v&0x3ff)<<10) + (vlo&0x3ff) + 0x10000; + return 12; + }else{ + *piOut = v; + return 6; + } + } + case 'b': { *piOut = '\b'; return 2; } + case 'f': { *piOut = '\f'; return 2; } + case 'n': { *piOut = '\n'; return 2; } + case 'r': { *piOut = '\r'; return 2; } + case 't': { *piOut = '\t'; return 2; } + case 'v': { *piOut = '\v'; return 2; } + case '0': { *piOut = 0; return 2; } + case '\'': + case '"': + case '/': + case '\\':{ *piOut = z[1]; return 2; } + case 'x': { + if( n<4 ){ + *piOut = JSON_INVALID_CHAR; + return n; + } + *piOut = (jsonHexToInt(z[2])<<4) | jsonHexToInt(z[3]); + return 4; + } + case 0xe2: + case '\r': + case '\n': { + u32 nSkip = jsonBytesToBypass(z, n); + if( nSkip==0 ){ + *piOut = JSON_INVALID_CHAR; + return n; + }else if( nSkip==n ){ + *piOut = 0; + return n; + }else if( z[nSkip]=='\\' ){ + return nSkip + jsonUnescapeOneChar(&z[nSkip], n-nSkip, piOut); + }else{ + int sz = sqlite3Utf8ReadLimited((u8*)&z[nSkip], n-nSkip, piOut); + return nSkip + sz; + } + } + default: { + *piOut = JSON_INVALID_CHAR; + return 2; + } } - p->nJPRef = 1; - if( jsonParse(p, pErrCtx) ){ - if( pErrCtx==0 ){ - p->nErr = 1; - assert( p->nJPRef==1 ); /* Caller will own the new JsonParse object p */ - return p; +} + + +/* +** Compare two object labels. Return 1 if they are equal and +** 0 if they differ. +** +** In this version, we know that one or the other or both of the +** two comparands contains an escape sequence. +*/ +static SQLITE_NOINLINE int jsonLabelCompareEscaped( + const char *zLeft, /* The left label */ + u32 nLeft, /* Size of the left label in bytes */ + int rawLeft, /* True if zLeft contains no escapes */ + const char *zRight, /* The right label */ + u32 nRight, /* Size of the right label in bytes */ + int rawRight /* True if zRight is escape-free */ +){ + u32 cLeft, cRight; + assert( rawLeft==0 || rawRight==0 ); + while( 1 /*exit-by-return*/ ){ + if( nLeft==0 ){ + cLeft = 0; + }else if( rawLeft || zLeft[0]!='\\' ){ + cLeft = ((u8*)zLeft)[0]; + if( cLeft>=0xc0 ){ + int sz = sqlite3Utf8ReadLimited((u8*)zLeft, nLeft, &cLeft); + zLeft += sz; + nLeft -= sz; + }else{ + zLeft++; + nLeft--; + } + }else{ + u32 n = jsonUnescapeOneChar(zLeft, nLeft, &cLeft); + zLeft += n; + assert( n<=nLeft ); + nLeft -= n; + } + if( nRight==0 ){ + cRight = 0; + }else if( rawRight || zRight[0]!='\\' ){ + cRight = ((u8*)zRight)[0]; + if( cRight>=0xc0 ){ + int sz = sqlite3Utf8ReadLimited((u8*)zRight, nRight, &cRight); + zRight += sz; + nRight -= sz; + }else{ + zRight++; + nRight--; + } + }else{ + u32 n = jsonUnescapeOneChar(zRight, nRight, &cRight); + zRight += n; + assert( n<=nRight ); + nRight -= n; } - jsonParseFree(p); - return 0; + if( cLeft!=cRight ) return 0; + if( cLeft==0 ) return 1; } - p->nJson = nJson; - p->iHold = iMaxHold+1; - /* Transfer ownership of the new JsonParse to the cache */ - sqlite3_set_auxdata(pCtx, JSON_CACHE_ID+iMinKey, p, - (void(*)(void*))jsonParseFree); - return (JsonParse*)sqlite3_get_auxdata(pCtx, JSON_CACHE_ID+iMinKey); } /* -** Compare the OBJECT label at pNode against zKey,nKey. Return true on -** a match. +** Compare two object labels. Return 1 if they are equal and +** 0 if they differ. Return -1 if an OOM occurs. */ -static int jsonLabelCompare(const JsonNode *pNode, const char *zKey, u32 nKey){ - assert( pNode->eU==1 ); - if( pNode->jnFlags & JNODE_RAW ){ - if( pNode->n!=nKey ) return 0; - return strncmp(pNode->u.zJContent, zKey, nKey)==0; +static int jsonLabelCompare( + const char *zLeft, /* The left label */ + u32 nLeft, /* Size of the left label in bytes */ + int rawLeft, /* True if zLeft contains no escapes */ + const char *zRight, /* The right label */ + u32 nRight, /* Size of the right label in bytes */ + int rawRight /* True if zRight is escape-free */ +){ + if( rawLeft && rawRight ){ + /* Simpliest case: Neither label contains escapes. A simple + ** memcmp() is sufficient. */ + if( nLeft!=nRight ) return 0; + return memcmp(zLeft, zRight, nLeft)==0; }else{ - if( pNode->n!=nKey+2 ) return 0; - return strncmp(pNode->u.zJContent+1, zKey, nKey)==0; + return jsonLabelCompareEscaped(zLeft, nLeft, rawLeft, + zRight, nRight, rawRight); } } -static int jsonSameLabel(const JsonNode *p1, const JsonNode *p2){ - if( p1->jnFlags & JNODE_RAW ){ - return jsonLabelCompare(p2, p1->u.zJContent, p1->n); - }else if( p2->jnFlags & JNODE_RAW ){ - return jsonLabelCompare(p1, p2->u.zJContent, p2->n); + +/* +** Error returns from jsonLookupStep() +*/ +#define JSON_LOOKUP_ERROR 0xffffffff +#define JSON_LOOKUP_NOTFOUND 0xfffffffe +#define JSON_LOOKUP_PATHERROR 0xfffffffd +#define JSON_LOOKUP_ISERROR(x) ((x)>=JSON_LOOKUP_PATHERROR) + +/* Forward declaration */ +static u32 jsonLookupStep(JsonParse*,u32,const char*,u32); + + +/* This helper routine for jsonLookupStep() populates pIns with +** binary data that is to be inserted into pParse. +** +** In the common case, pIns just points to pParse->aIns and pParse->nIns. +** But if the zPath of the original edit operation includes path elements +** that go deeper, additional substructure must be created. +** +** For example: +** +** json_insert('{}', '$.a.b.c', 123); +** +** The search stops at '$.a' But additional substructure must be +** created for the ".b.c" part of the patch so that the final result +** is: {"a":{"b":{"c"::123}}}. This routine populates pIns with +** the binary equivalent of {"b":{"c":123}} so that it can be inserted. +** +** The caller is responsible for resetting pIns when it has finished +** using the substructure. +*/ +static u32 jsonCreateEditSubstructure( + JsonParse *pParse, /* The original JSONB that is being edited */ + JsonParse *pIns, /* Populate this with the blob data to insert */ + const char *zTail /* Tail of the path that determins substructure */ +){ + static const u8 emptyObject[] = { JSONB_ARRAY, JSONB_OBJECT }; + int rc; + memset(pIns, 0, sizeof(*pIns)); + pIns->db = pParse->db; + if( zTail[0]==0 ){ + /* No substructure. Just insert what is given in pParse. */ + pIns->aBlob = pParse->aIns; + pIns->nBlob = pParse->nIns; + rc = 0; }else{ - return p1->n==p2->n && strncmp(p1->u.zJContent,p2->u.zJContent,p1->n)==0; + /* Construct the binary substructure */ + pIns->nBlob = 1; + pIns->aBlob = (u8*)&emptyObject[zTail[0]=='.']; + pIns->eEdit = pParse->eEdit; + pIns->nIns = pParse->nIns; + pIns->aIns = pParse->aIns; + rc = jsonLookupStep(pIns, 0, zTail, 0); + pParse->oom |= pIns->oom; } + return rc; /* Error code only */ } -/* forward declaration */ -static JsonNode *jsonLookupAppend(JsonParse*,const char*,int*,const char**); - /* -** Search along zPath to find the node specified. Return a pointer -** to that node, or NULL if zPath is malformed or if there is no such -** node. +** Search along zPath to find the Json element specified. Return an +** index into pParse->aBlob[] for the start of that element's value. +** +** If the value found by this routine is the value half of label/value pair +** within an object, then set pPath->iLabel to the start of the corresponding +** label, before returning. ** -** If pApnd!=0, then try to append new nodes to complete zPath if it is -** possible to do so and if no existing node corresponds to zPath. If -** new nodes are appended *pApnd is set to 1. +** Return one of the JSON_LOOKUP error codes if problems are seen. +** +** This routine will also modify the blob. If pParse->eEdit is one of +** JEDIT_DEL, JEDIT_REPL, JEDIT_INS, or JEDIT_SET, then changes might be +** made to the selected value. If an edit is performed, then the return +** value does not necessarily point to the select element. If an edit +** is performed, the return value is only useful for detecting error +** conditions. */ -static JsonNode *jsonLookupStep( +static u32 jsonLookupStep( JsonParse *pParse, /* The JSON to search */ - u32 iRoot, /* Begin the search at this node */ + u32 iRoot, /* Begin the search at this element of aBlob[] */ const char *zPath, /* The path to search */ - int *pApnd, /* Append nodes to complete path if not NULL */ - const char **pzErr /* Make *pzErr point to any syntax error in zPath */ + u32 iLabel /* Label if iRoot is a value of in an object */ ){ - u32 i, j, nKey; + u32 i, j, k, nKey, sz, n, iEnd, rc; const char *zKey; - JsonNode *pRoot; - if( pParse->oom ) return 0; - pRoot = &pParse->aNode[iRoot]; - if( pRoot->jnFlags & (JNODE_REPLACE|JNODE_REMOVE) && pParse->useMod ){ - while( (pRoot->jnFlags & JNODE_REPLACE)!=0 ){ - u32 idx = (u32)(pRoot - pParse->aNode); - i = pParse->iSubst; - while( 1 /*exit-by-break*/ ){ - assert( inNode ); - assert( pParse->aNode[i].eType==JSON_SUBST ); - assert( pParse->aNode[i].eU==4 ); - assert( pParse->aNode[i].u.iPrevaNode[i].n==idx ){ - pRoot = &pParse->aNode[i+1]; - iRoot = i+1; - break; - } - i = pParse->aNode[i].u.iPrev; + u8 x; + + if( zPath[0]==0 ){ + if( pParse->eEdit && jsonBlobMakeEditable(pParse, pParse->nIns) ){ + n = jsonbPayloadSize(pParse, iRoot, &sz); + sz += n; + if( pParse->eEdit==JEDIT_DEL ){ + if( iLabel>0 ){ + sz += iRoot - iLabel; + iRoot = iLabel; + } + jsonBlobEdit(pParse, iRoot, sz, 0, 0); + }else if( pParse->eEdit==JEDIT_INS ){ + /* Already exists, so json_insert() is a no-op */ + }else{ + /* json_set() or json_replace() */ + jsonBlobEdit(pParse, iRoot, sz, pParse->aIns, pParse->nIns); } } - if( pRoot->jnFlags & JNODE_REMOVE ){ - return 0; - } + pParse->iLabel = iLabel; + return iRoot; } - if( zPath[0]==0 ) return pRoot; if( zPath[0]=='.' ){ - if( pRoot->eType!=JSON_OBJECT ) return 0; + int rawKey = 1; + x = pParse->aBlob[iRoot]; zPath++; if( zPath[0]=='"' ){ zKey = zPath + 1; @@ -204618,315 +205892,829 @@ static JsonNode *jsonLookupStep( if( zPath[i] ){ i++; }else{ - *pzErr = zPath; - return 0; + return JSON_LOOKUP_PATHERROR; } testcase( nKey==0 ); + rawKey = memchr(zKey, '\\', nKey)==0; }else{ zKey = zPath; for(i=0; zPath[i] && zPath[i]!='.' && zPath[i]!='['; i++){} nKey = i; if( nKey==0 ){ - *pzErr = zPath; - return 0; - } - } - j = 1; - for(;;){ - while( j<=pRoot->n ){ - if( jsonLabelCompare(pRoot+j, zKey, nKey) ){ - return jsonLookupStep(pParse, iRoot+j+1, &zPath[i], pApnd, pzErr); - } - j++; - j += jsonNodeSize(&pRoot[j]); + return JSON_LOOKUP_PATHERROR; + } + } + if( (x & 0x0f)!=JSONB_OBJECT ) return JSON_LOOKUP_NOTFOUND; + n = jsonbPayloadSize(pParse, iRoot, &sz); + j = iRoot + n; /* j is the index of a label */ + iEnd = j+sz; + while( jaBlob[j] & 0x0f; + if( xJSONB_TEXTRAW ) return JSON_LOOKUP_ERROR; + n = jsonbPayloadSize(pParse, j, &sz); + if( n==0 ) return JSON_LOOKUP_ERROR; + k = j+n; /* k is the index of the label text */ + if( k+sz>=iEnd ) return JSON_LOOKUP_ERROR; + zLabel = (const char*)&pParse->aBlob[k]; + rawLabel = x==JSONB_TEXT || x==JSONB_TEXTRAW; + if( jsonLabelCompare(zKey, nKey, rawKey, zLabel, sz, rawLabel) ){ + u32 v = k+sz; /* v is the index of the value */ + if( ((pParse->aBlob[v])&0x0f)>JSONB_OBJECT ) return JSON_LOOKUP_ERROR; + n = jsonbPayloadSize(pParse, v, &sz); + if( n==0 || v+n+sz>iEnd ) return JSON_LOOKUP_ERROR; + assert( j>0 ); + rc = jsonLookupStep(pParse, v, &zPath[i], j); + if( pParse->delta ) jsonAfterEditSizeAdjust(pParse, iRoot); + return rc; } - if( (pRoot->jnFlags & JNODE_APPEND)==0 ) break; - if( pParse->useMod==0 ) break; - assert( pRoot->eU==2 ); - iRoot = pRoot->u.iAppend; - pRoot = &pParse->aNode[iRoot]; - j = 1; - } - if( pApnd ){ - u32 iStart, iLabel; - JsonNode *pNode; - assert( pParse->useMod ); - iStart = jsonParseAddNode(pParse, JSON_OBJECT, 2, 0); - iLabel = jsonParseAddNode(pParse, JSON_STRING, nKey, zKey); - zPath += i; - pNode = jsonLookupAppend(pParse, zPath, pApnd, pzErr); - if( pParse->oom ) return 0; - if( pNode ){ - pRoot = &pParse->aNode[iRoot]; - assert( pRoot->eU==0 ); - pRoot->u.iAppend = iStart; - pRoot->jnFlags |= JNODE_APPEND; - VVA( pRoot->eU = 2 ); - pParse->aNode[iLabel].jnFlags |= JNODE_RAW; - } - return pNode; + j = k+sz; + if( ((pParse->aBlob[j])&0x0f)>JSONB_OBJECT ) return JSON_LOOKUP_ERROR; + n = jsonbPayloadSize(pParse, j, &sz); + if( n==0 ) return JSON_LOOKUP_ERROR; + j += n+sz; + } + if( j>iEnd ) return JSON_LOOKUP_ERROR; + if( pParse->eEdit>=JEDIT_INS ){ + u32 nIns; /* Total bytes to insert (label+value) */ + JsonParse v; /* BLOB encoding of the value to be inserted */ + JsonParse ix; /* Header of the label to be inserted */ + testcase( pParse->eEdit==JEDIT_INS ); + testcase( pParse->eEdit==JEDIT_SET ); + memset(&ix, 0, sizeof(ix)); + ix.db = pParse->db; + jsonBlobAppendNode(&ix, rawKey?JSONB_TEXTRAW:JSONB_TEXT5, nKey, 0); + pParse->oom |= ix.oom; + rc = jsonCreateEditSubstructure(pParse, &v, &zPath[i]); + if( !JSON_LOOKUP_ISERROR(rc) + && jsonBlobMakeEditable(pParse, ix.nBlob+nKey+v.nBlob) + ){ + assert( !pParse->oom ); + nIns = ix.nBlob + nKey + v.nBlob; + jsonBlobEdit(pParse, j, 0, 0, nIns); + if( !pParse->oom ){ + assert( pParse->aBlob!=0 ); /* Because pParse->oom!=0 */ + assert( ix.aBlob!=0 ); /* Because pPasre->oom!=0 */ + memcpy(&pParse->aBlob[j], ix.aBlob, ix.nBlob); + k = j + ix.nBlob; + memcpy(&pParse->aBlob[k], zKey, nKey); + k += nKey; + memcpy(&pParse->aBlob[k], v.aBlob, v.nBlob); + if( ALWAYS(pParse->delta) ) jsonAfterEditSizeAdjust(pParse, iRoot); + } + } + jsonParseReset(&v); + jsonParseReset(&ix); + return rc; } }else if( zPath[0]=='[' ){ - i = 0; - j = 1; - while( sqlite3Isdigit(zPath[j]) ){ - i = i*10 + zPath[j] - '0'; - j++; + x = pParse->aBlob[iRoot] & 0x0f; + if( x!=JSONB_ARRAY ) return JSON_LOOKUP_NOTFOUND; + n = jsonbPayloadSize(pParse, iRoot, &sz); + k = 0; + i = 1; + while( sqlite3Isdigit(zPath[i]) ){ + k = k*10 + zPath[i] - '0'; + i++; } - if( j<2 || zPath[j]!=']' ){ + if( i<2 || zPath[i]!=']' ){ if( zPath[1]=='#' ){ - JsonNode *pBase = pRoot; - int iBase = iRoot; - if( pRoot->eType!=JSON_ARRAY ) return 0; - for(;;){ - while( j<=pBase->n ){ - if( (pBase[j].jnFlags & JNODE_REMOVE)==0 || pParse->useMod==0 ) i++; - j += jsonNodeSize(&pBase[j]); - } - if( (pBase->jnFlags & JNODE_APPEND)==0 ) break; - if( pParse->useMod==0 ) break; - assert( pBase->eU==2 ); - iBase = pBase->u.iAppend; - pBase = &pParse->aNode[iBase]; - j = 1; - } - j = 2; + k = jsonbArrayCount(pParse, iRoot); + i = 2; if( zPath[2]=='-' && sqlite3Isdigit(zPath[3]) ){ - unsigned int x = 0; - j = 3; + unsigned int nn = 0; + i = 3; do{ - x = x*10 + zPath[j] - '0'; - j++; - }while( sqlite3Isdigit(zPath[j]) ); - if( x>i ) return 0; - i -= x; + nn = nn*10 + zPath[i] - '0'; + i++; + }while( sqlite3Isdigit(zPath[i]) ); + if( nn>k ) return JSON_LOOKUP_NOTFOUND; + k -= nn; } - if( zPath[j]!=']' ){ - *pzErr = zPath; - return 0; + if( zPath[i]!=']' ){ + return JSON_LOOKUP_PATHERROR; } }else{ - *pzErr = zPath; - return 0; + return JSON_LOOKUP_PATHERROR; } } - if( pRoot->eType!=JSON_ARRAY ) return 0; - zPath += j + 1; - j = 1; - for(;;){ - while( j<=pRoot->n - && (i>0 || ((pRoot[j].jnFlags & JNODE_REMOVE)!=0 && pParse->useMod)) + j = iRoot+n; + iEnd = j+sz; + while( jdelta ) jsonAfterEditSizeAdjust(pParse, iRoot); + return rc; + } + k--; + n = jsonbPayloadSize(pParse, j, &sz); + if( n==0 ) return JSON_LOOKUP_ERROR; + j += n+sz; + } + if( j>iEnd ) return JSON_LOOKUP_ERROR; + if( k>0 ) return JSON_LOOKUP_NOTFOUND; + if( pParse->eEdit>=JEDIT_INS ){ + JsonParse v; + testcase( pParse->eEdit==JEDIT_INS ); + testcase( pParse->eEdit==JEDIT_SET ); + rc = jsonCreateEditSubstructure(pParse, &v, &zPath[i+1]); + if( !JSON_LOOKUP_ISERROR(rc) + && jsonBlobMakeEditable(pParse, v.nBlob) ){ - if( (pRoot[j].jnFlags & JNODE_REMOVE)==0 || pParse->useMod==0 ) i--; - j += jsonNodeSize(&pRoot[j]); - } - if( i==0 && j<=pRoot->n ) break; - if( (pRoot->jnFlags & JNODE_APPEND)==0 ) break; - if( pParse->useMod==0 ) break; - assert( pRoot->eU==2 ); - iRoot = pRoot->u.iAppend; - pRoot = &pParse->aNode[iRoot]; - j = 1; - } - if( j<=pRoot->n ){ - return jsonLookupStep(pParse, iRoot+j, zPath, pApnd, pzErr); - } - if( i==0 && pApnd ){ - u32 iStart; - JsonNode *pNode; - assert( pParse->useMod ); - iStart = jsonParseAddNode(pParse, JSON_ARRAY, 1, 0); - pNode = jsonLookupAppend(pParse, zPath, pApnd, pzErr); - if( pParse->oom ) return 0; - if( pNode ){ - pRoot = &pParse->aNode[iRoot]; - assert( pRoot->eU==0 ); - pRoot->u.iAppend = iStart; - pRoot->jnFlags |= JNODE_APPEND; - VVA( pRoot->eU = 2 ); + assert( !pParse->oom ); + jsonBlobEdit(pParse, j, 0, v.aBlob, v.nBlob); } - return pNode; + jsonParseReset(&v); + if( pParse->delta ) jsonAfterEditSizeAdjust(pParse, iRoot); + return rc; } }else{ - *pzErr = zPath; + return JSON_LOOKUP_PATHERROR; } - return 0; + return JSON_LOOKUP_NOTFOUND; } /* -** Append content to pParse that will complete zPath. Return a pointer -** to the inserted node, or return NULL if the append fails. +** Convert a JSON BLOB into text and make that text the return value +** of an SQL function. */ -static JsonNode *jsonLookupAppend( - JsonParse *pParse, /* Append content to the JSON parse */ - const char *zPath, /* Description of content to append */ - int *pApnd, /* Set this flag to 1 */ - const char **pzErr /* Make this point to any syntax error */ +static void jsonReturnTextJsonFromBlob( + sqlite3_context *ctx, + const u8 *aBlob, + u32 nBlob ){ - *pApnd = 1; - if( zPath[0]==0 ){ - jsonParseAddNode(pParse, JSON_NULL, 0, 0); - return pParse->oom ? 0 : &pParse->aNode[pParse->nNode-1]; - } - if( zPath[0]=='.' ){ - jsonParseAddNode(pParse, JSON_OBJECT, 0, 0); - }else if( strncmp(zPath,"[0]",3)==0 ){ - jsonParseAddNode(pParse, JSON_ARRAY, 0, 0); - }else{ - return 0; - } - if( pParse->oom ) return 0; - return jsonLookupStep(pParse, pParse->nNode-1, zPath, pApnd, pzErr); + JsonParse x; + JsonString s; + + if( NEVER(aBlob==0) ) return; + memset(&x, 0, sizeof(x)); + x.aBlob = (u8*)aBlob; + x.nBlob = nBlob; + jsonStringInit(&s, ctx); + jsonTranslateBlobToText(&x, 0, &s); + jsonReturnString(&s, 0, 0); } + /* -** Return the text of a syntax error message on a JSON path. Space is -** obtained from sqlite3_malloc(). +** Return the value of the BLOB node at index i. +** +** If the value is a primitive, return it as an SQL value. +** If the value is an array or object, return it as either +** JSON text or the BLOB encoding, depending on the JSON_B flag +** on the userdata. */ -static char *jsonPathSyntaxError(const char *zErr){ - return sqlite3_mprintf("JSON path error near '%q'", zErr); +static void jsonReturnFromBlob( + JsonParse *pParse, /* Complete JSON parse tree */ + u32 i, /* Index of the node */ + sqlite3_context *pCtx, /* Return value for this function */ + int textOnly /* return text JSON. Disregard user-data */ +){ + u32 n, sz; + int rc; + sqlite3 *db = sqlite3_context_db_handle(pCtx); + + n = jsonbPayloadSize(pParse, i, &sz); + if( n==0 ){ + sqlite3_result_error(pCtx, "malformed JSON", -1); + return; + } + switch( pParse->aBlob[i] & 0x0f ){ + case JSONB_NULL: { + if( sz ) goto returnfromblob_malformed; + sqlite3_result_null(pCtx); + break; + } + case JSONB_TRUE: { + if( sz ) goto returnfromblob_malformed; + sqlite3_result_int(pCtx, 1); + break; + } + case JSONB_FALSE: { + if( sz ) goto returnfromblob_malformed; + sqlite3_result_int(pCtx, 0); + break; + } + case JSONB_INT5: + case JSONB_INT: { + sqlite3_int64 iRes = 0; + char *z; + int bNeg = 0; + char x; + if( sz==0 ) goto returnfromblob_malformed; + x = (char)pParse->aBlob[i+n]; + if( x=='-' ){ + if( sz<2 ) goto returnfromblob_malformed; + n++; + sz--; + bNeg = 1; + } + z = sqlite3DbStrNDup(db, (const char*)&pParse->aBlob[i+n], (int)sz); + if( z==0 ) goto returnfromblob_oom; + rc = sqlite3DecOrHexToI64(z, &iRes); + sqlite3DbFree(db, z); + if( rc==0 ){ + sqlite3_result_int64(pCtx, bNeg ? -iRes : iRes); + }else if( rc==3 && bNeg ){ + sqlite3_result_int64(pCtx, SMALLEST_INT64); + }else if( rc==1 ){ + goto returnfromblob_malformed; + }else{ + if( bNeg ){ n--; sz++; } + goto to_double; + } + break; + } + case JSONB_FLOAT5: + case JSONB_FLOAT: { + double r; + char *z; + if( sz==0 ) goto returnfromblob_malformed; + to_double: + z = sqlite3DbStrNDup(db, (const char*)&pParse->aBlob[i+n], (int)sz); + if( z==0 ) goto returnfromblob_oom; + rc = sqlite3AtoF(z, &r, sqlite3Strlen30(z), SQLITE_UTF8); + sqlite3DbFree(db, z); + if( rc<=0 ) goto returnfromblob_malformed; + sqlite3_result_double(pCtx, r); + break; + } + case JSONB_TEXTRAW: + case JSONB_TEXT: { + sqlite3_result_text(pCtx, (char*)&pParse->aBlob[i+n], sz, + SQLITE_TRANSIENT); + break; + } + case JSONB_TEXT5: + case JSONB_TEXTJ: { + /* Translate JSON formatted string into raw text */ + u32 iIn, iOut; + const char *z; + char *zOut; + u32 nOut = sz; + z = (const char*)&pParse->aBlob[i+n]; + zOut = sqlite3DbMallocRaw(db, nOut+1); + if( zOut==0 ) goto returnfromblob_oom; + for(iIn=iOut=0; iIn=2 ); + zOut[iOut++] = (char)(0xc0 | (v>>6)); + zOut[iOut++] = 0x80 | (v&0x3f); + }else if( v<0x10000 ){ + assert( szEscape>=3 ); + zOut[iOut++] = 0xe0 | (v>>12); + zOut[iOut++] = 0x80 | ((v>>6)&0x3f); + zOut[iOut++] = 0x80 | (v&0x3f); + }else if( v==JSON_INVALID_CHAR ){ + /* Silently ignore illegal unicode */ + }else{ + assert( szEscape>=4 ); + zOut[iOut++] = 0xf0 | (v>>18); + zOut[iOut++] = 0x80 | ((v>>12)&0x3f); + zOut[iOut++] = 0x80 | ((v>>6)&0x3f); + zOut[iOut++] = 0x80 | (v&0x3f); + } + iIn += szEscape - 1; + }else{ + zOut[iOut++] = c; + } + } /* end for() */ + assert( iOut<=nOut ); + zOut[iOut] = 0; + sqlite3_result_text(pCtx, zOut, iOut, SQLITE_DYNAMIC); + break; + } + case JSONB_ARRAY: + case JSONB_OBJECT: { + int flags = textOnly ? 0 : SQLITE_PTR_TO_INT(sqlite3_user_data(pCtx)); + if( flags & JSON_BLOB ){ + sqlite3_result_blob(pCtx, &pParse->aBlob[i], sz+n, SQLITE_TRANSIENT); + }else{ + jsonReturnTextJsonFromBlob(pCtx, &pParse->aBlob[i], sz+n); + } + break; + } + default: { + goto returnfromblob_malformed; + } + } + return; + +returnfromblob_oom: + sqlite3_result_error_nomem(pCtx); + return; + +returnfromblob_malformed: + sqlite3_result_error(pCtx, "malformed JSON", -1); + return; } /* -** Do a node lookup using zPath. Return a pointer to the node on success. -** Return NULL if not found or if there is an error. +** pArg is a function argument that might be an SQL value or a JSON +** value. Figure out what it is and encode it as a JSONB blob. +** Return the results in pParse. ** -** On an error, write an error message into pCtx and increment the -** pParse->nErr counter. +** pParse is uninitialized upon entry. This routine will handle the +** initialization of pParse. The result will be contained in +** pParse->aBlob and pParse->nBlob. pParse->aBlob might be dynamically +** allocated (if pParse->nBlobAlloc is greater than zero) in which case +** the caller is responsible for freeing the space allocated to pParse->aBlob +** when it has finished with it. Or pParse->aBlob might be a static string +** or a value obtained from sqlite3_value_blob(pArg). ** -** If pApnd!=NULL then try to append missing nodes and set *pApnd = 1 if -** nodes are appended. +** If the argument is a BLOB that is clearly not a JSONB, then this +** function might set an error message in ctx and return non-zero. +** It might also set an error message and return non-zero on an OOM error. */ -static JsonNode *jsonLookup( - JsonParse *pParse, /* The JSON to search */ - const char *zPath, /* The path to search */ - int *pApnd, /* Append nodes to complete path if not NULL */ - sqlite3_context *pCtx /* Report errors here, if not NULL */ -){ - const char *zErr = 0; - JsonNode *pNode = 0; - char *zMsg; - - if( zPath==0 ) return 0; - if( zPath[0]!='$' ){ - zErr = zPath; - goto lookup_err; +static int jsonFunctionArgToBlob( + sqlite3_context *ctx, + sqlite3_value *pArg, + JsonParse *pParse +){ + int eType = sqlite3_value_type(pArg); + static u8 aNull[] = { 0x00 }; + memset(pParse, 0, sizeof(pParse[0])); + pParse->db = sqlite3_context_db_handle(ctx); + switch( eType ){ + default: { + pParse->aBlob = aNull; + pParse->nBlob = 1; + return 0; + } + case SQLITE_BLOB: { + if( jsonFuncArgMightBeBinary(pArg) ){ + pParse->aBlob = (u8*)sqlite3_value_blob(pArg); + pParse->nBlob = sqlite3_value_bytes(pArg); + }else{ + sqlite3_result_error(ctx, "JSON cannot hold BLOB values", -1); + return 1; + } + break; + } + case SQLITE_TEXT: { + const char *zJson = (const char*)sqlite3_value_text(pArg); + int nJson = sqlite3_value_bytes(pArg); + if( zJson==0 ) return 1; + if( sqlite3_value_subtype(pArg)==JSON_SUBTYPE ){ + pParse->zJson = (char*)zJson; + pParse->nJson = nJson; + if( jsonConvertTextToBlob(pParse, ctx) ){ + sqlite3_result_error(ctx, "malformed JSON", -1); + sqlite3DbFree(pParse->db, pParse->aBlob); + memset(pParse, 0, sizeof(pParse[0])); + return 1; + } + }else{ + jsonBlobAppendNode(pParse, JSONB_TEXTRAW, nJson, zJson); + } + break; + } + case SQLITE_FLOAT: { + double r = sqlite3_value_double(pArg); + if( NEVER(sqlite3IsNaN(r)) ){ + jsonBlobAppendNode(pParse, JSONB_NULL, 0, 0); + }else{ + int n = sqlite3_value_bytes(pArg); + const char *z = (const char*)sqlite3_value_text(pArg); + if( z==0 ) return 1; + if( z[0]=='I' ){ + jsonBlobAppendNode(pParse, JSONB_FLOAT, 5, "9e999"); + }else if( z[0]=='-' && z[1]=='I' ){ + jsonBlobAppendNode(pParse, JSONB_FLOAT, 6, "-9e999"); + }else{ + jsonBlobAppendNode(pParse, JSONB_FLOAT, n, z); + } + } + break; + } + case SQLITE_INTEGER: { + int n = sqlite3_value_bytes(pArg); + const char *z = (const char*)sqlite3_value_text(pArg); + if( z==0 ) return 1; + jsonBlobAppendNode(pParse, JSONB_INT, n, z); + break; + } + } + if( pParse->oom ){ + sqlite3_result_error_nomem(ctx); + return 1; + }else{ + return 0; } - zPath++; - pNode = jsonLookupStep(pParse, 0, zPath, pApnd, &zErr); - if( zErr==0 ) return pNode; +} -lookup_err: - pParse->nErr++; - assert( zErr!=0 && pCtx!=0 ); - zMsg = jsonPathSyntaxError(zErr); +/* +** Generate a bad path error. +** +** If ctx is not NULL then push the error message into ctx and return NULL. +** If ctx is NULL, then return the text of the error message. +*/ +static char *jsonBadPathError( + sqlite3_context *ctx, /* The function call containing the error */ + const char *zPath /* The path with the problem */ +){ + char *zMsg = sqlite3_mprintf("bad JSON path: %Q", zPath); + if( ctx==0 ) return zMsg; if( zMsg ){ - sqlite3_result_error(pCtx, zMsg, -1); + sqlite3_result_error(ctx, zMsg, -1); sqlite3_free(zMsg); }else{ - sqlite3_result_error_nomem(pCtx); + sqlite3_result_error_nomem(ctx); } return 0; } +/* argv[0] is a BLOB that seems likely to be a JSONB. Subsequent +** arguments come in parse where each pair contains a JSON path and +** content to insert or set at that patch. Do the updates +** and return the result. +** +** The specific operation is determined by eEdit, which can be one +** of JEDIT_INS, JEDIT_REPL, or JEDIT_SET. +*/ +static void jsonInsertIntoBlob( + sqlite3_context *ctx, + int argc, + sqlite3_value **argv, + int eEdit /* JEDIT_INS, JEDIT_REPL, or JEDIT_SET */ +){ + int i; + u32 rc = 0; + const char *zPath = 0; + int flgs; + JsonParse *p; + JsonParse ax; + + assert( (argc&1)==1 ); + flgs = argc==1 ? 0 : JSON_EDITABLE; + p = jsonParseFuncArg(ctx, argv[0], flgs); + if( p==0 ) return; + for(i=1; inBlob, ax.aBlob, ax.nBlob); + } + rc = 0; + }else{ + p->eEdit = eEdit; + p->nIns = ax.nBlob; + p->aIns = ax.aBlob; + p->delta = 0; + rc = jsonLookupStep(p, 0, zPath+1, 0); + } + jsonParseReset(&ax); + if( rc==JSON_LOOKUP_NOTFOUND ) continue; + if( JSON_LOOKUP_ISERROR(rc) ) goto jsonInsertIntoBlob_patherror; + } + jsonReturnParse(ctx, p); + jsonParseFree(p); + return; + +jsonInsertIntoBlob_patherror: + jsonParseFree(p); + if( rc==JSON_LOOKUP_ERROR ){ + sqlite3_result_error(ctx, "malformed JSON", -1); + }else{ + jsonBadPathError(ctx, zPath); + } + return; +} + +/* +** If pArg is a blob that seems like a JSONB blob, then initialize +** p to point to that JSONB and return TRUE. If pArg does not seem like +** a JSONB blob, then return FALSE; +** +** This routine is only called if it is already known that pArg is a +** blob. The only open question is whether or not the blob appears +** to be a JSONB blob. +*/ +static int jsonArgIsJsonb(sqlite3_value *pArg, JsonParse *p){ + u32 n, sz = 0; + p->aBlob = (u8*)sqlite3_value_blob(pArg); + p->nBlob = (u32)sqlite3_value_bytes(pArg); + if( p->nBlob==0 ){ + p->aBlob = 0; + return 0; + } + if( NEVER(p->aBlob==0) ){ + return 0; + } + if( (p->aBlob[0] & 0x0f)<=JSONB_OBJECT + && (n = jsonbPayloadSize(p, 0, &sz))>0 + && sz+n==p->nBlob + && ((p->aBlob[0] & 0x0f)>JSONB_FALSE || sz==0) + ){ + return 1; + } + p->aBlob = 0; + p->nBlob = 0; + return 0; +} /* -** Report the wrong number of arguments for json_insert(), json_replace() -** or json_set(). +** Generate a JsonParse object, containing valid JSONB in aBlob and nBlob, +** from the SQL function argument pArg. Return a pointer to the new +** JsonParse object. +** +** Ownership of the new JsonParse object is passed to the caller. The +** caller should invoke jsonParseFree() on the return value when it +** has finished using it. +** +** If any errors are detected, an appropriate error messages is set +** using sqlite3_result_error() or the equivalent and this routine +** returns NULL. This routine also returns NULL if the pArg argument +** is an SQL NULL value, but no error message is set in that case. This +** is so that SQL functions that are given NULL arguments will return +** a NULL value. */ -static void jsonWrongNumArgs( - sqlite3_context *pCtx, - const char *zFuncName +static JsonParse *jsonParseFuncArg( + sqlite3_context *ctx, + sqlite3_value *pArg, + u32 flgs ){ - char *zMsg = sqlite3_mprintf("json_%s() needs an odd number of arguments", - zFuncName); - sqlite3_result_error(pCtx, zMsg, -1); - sqlite3_free(zMsg); + int eType; /* Datatype of pArg */ + JsonParse *p = 0; /* Value to be returned */ + JsonParse *pFromCache = 0; /* Value taken from cache */ + sqlite3 *db; /* The database connection */ + + assert( ctx!=0 ); + eType = sqlite3_value_type(pArg); + if( eType==SQLITE_NULL ){ + return 0; + } + pFromCache = jsonCacheSearch(ctx, pArg); + if( pFromCache ){ + pFromCache->nJPRef++; + if( (flgs & JSON_EDITABLE)==0 ){ + return pFromCache; + } + } + db = sqlite3_context_db_handle(ctx); +rebuild_from_cache: + p = sqlite3DbMallocZero(db, sizeof(*p)); + if( p==0 ) goto json_pfa_oom; + memset(p, 0, sizeof(*p)); + p->db = db; + p->nJPRef = 1; + if( pFromCache!=0 ){ + u32 nBlob = pFromCache->nBlob; + p->aBlob = sqlite3DbMallocRaw(db, nBlob); + if( p->aBlob==0 ) goto json_pfa_oom; + memcpy(p->aBlob, pFromCache->aBlob, nBlob); + p->nBlobAlloc = p->nBlob = nBlob; + p->hasNonstd = pFromCache->hasNonstd; + jsonParseFree(pFromCache); + return p; + } + if( eType==SQLITE_BLOB ){ + if( jsonArgIsJsonb(pArg,p) ){ + if( (flgs & JSON_EDITABLE)!=0 && jsonBlobMakeEditable(p, 0)==0 ){ + goto json_pfa_oom; + } + return p; + } + /* If the blob is not valid JSONB, fall through into trying to cast + ** the blob into text which is then interpreted as JSON. (tag-20240123-a) + ** + ** This goes against all historical documentation about how the SQLite + ** JSON functions were suppose to work. From the beginning, blob was + ** reserved for expansion and a blob value should have raised an error. + ** But it did not, due to a bug. And many applications came to depend + ** upon this buggy behavior, espeically when using the CLI and reading + ** JSON text using readfile(), which returns a blob. For this reason + ** we will continue to support the bug moving forward. + ** See for example https://sqlite.org/forum/forumpost/012136abd5292b8d + */ + } + p->zJson = (char*)sqlite3_value_text(pArg); + p->nJson = sqlite3_value_bytes(pArg); + if( p->nJson==0 ) goto json_pfa_malformed; + if( NEVER(p->zJson==0) ) goto json_pfa_oom; + if( jsonConvertTextToBlob(p, (flgs & JSON_KEEPERROR) ? 0 : ctx) ){ + if( flgs & JSON_KEEPERROR ){ + p->nErr = 1; + return p; + }else{ + jsonParseFree(p); + return 0; + } + }else{ + int isRCStr = sqlite3ValueIsOfClass(pArg, sqlite3RCStrUnref); + int rc; + if( !isRCStr ){ + char *zNew = sqlite3RCStrNew( p->nJson ); + if( zNew==0 ) goto json_pfa_oom; + memcpy(zNew, p->zJson, p->nJson); + p->zJson = zNew; + p->zJson[p->nJson] = 0; + }else{ + sqlite3RCStrRef(p->zJson); + } + p->bJsonIsRCStr = 1; + rc = jsonCacheInsert(ctx, p); + if( rc==SQLITE_NOMEM ) goto json_pfa_oom; + if( flgs & JSON_EDITABLE ){ + pFromCache = p; + p = 0; + goto rebuild_from_cache; + } + } + return p; + +json_pfa_malformed: + if( flgs & JSON_KEEPERROR ){ + p->nErr = 1; + return p; + }else{ + jsonParseFree(p); + sqlite3_result_error(ctx, "malformed JSON", -1); + return 0; + } + +json_pfa_oom: + jsonParseFree(pFromCache); + jsonParseFree(p); + sqlite3_result_error_nomem(ctx); + return 0; } /* -** Mark all NULL entries in the Object passed in as JNODE_REMOVE. +** Make the return value of a JSON function either the raw JSONB blob +** or make it JSON text, depending on whether the JSON_BLOB flag is +** set on the function. */ -static void jsonRemoveAllNulls(JsonNode *pNode){ - int i, n; - assert( pNode->eType==JSON_OBJECT ); - n = pNode->n; - for(i=2; i<=n; i += jsonNodeSize(&pNode[i])+1){ - switch( pNode[i].eType ){ - case JSON_NULL: - pNode[i].jnFlags |= JNODE_REMOVE; - break; - case JSON_OBJECT: - jsonRemoveAllNulls(&pNode[i]); - break; +static void jsonReturnParse( + sqlite3_context *ctx, + JsonParse *p +){ + int flgs; + if( p->oom ){ + sqlite3_result_error_nomem(ctx); + return; + } + flgs = SQLITE_PTR_TO_INT(sqlite3_user_data(ctx)); + if( flgs & JSON_BLOB ){ + if( p->nBlobAlloc>0 && !p->bReadOnly ){ + sqlite3_result_blob(ctx, p->aBlob, p->nBlob, SQLITE_DYNAMIC); + p->nBlobAlloc = 0; + }else{ + sqlite3_result_blob(ctx, p->aBlob, p->nBlob, SQLITE_TRANSIENT); } + }else{ + JsonString s; + jsonStringInit(&s, ctx); + p->delta = 0; + jsonTranslateBlobToText(p, 0, &s); + jsonReturnString(&s, p, ctx); + sqlite3_result_subtype(ctx, JSON_SUBTYPE); } } - /**************************************************************************** ** SQL functions used for testing and debugging ****************************************************************************/ #if SQLITE_DEBUG /* -** Print N node entries. -*/ -static void jsonDebugPrintNodeEntries( - JsonNode *aNode, /* First node entry to print */ - int N /* Number of node entries to print */ -){ - int i; - for(i=0; iaBlob[iStart] & 0x0f; + u32 savedNBlob = pParse->nBlob; + sqlite3_str_appendf(pOut, "%5d:%*s", iStart, nIndent, ""); + if( pParse->nBlobAlloc>pParse->nBlob ){ + pParse->nBlob = pParse->nBlobAlloc; + } + nn = n = jsonbPayloadSize(pParse, iStart, &sz); + if( nn==0 ) nn = 1; + if( sz>0 && xaBlob[iStart+i]); + } + if( n==0 ){ + sqlite3_str_appendf(pOut, " ERROR invalid node size\n"); + iStart = n==0 ? iStart+1 : iEnd; + continue; } - printf("node %4u: %-7s n=%-5d", i, zType, aNode[i].n); - if( (aNode[i].jnFlags & ~JNODE_LABEL)!=0 ){ - u8 f = aNode[i].jnFlags; - if( f & JNODE_RAW ) printf(" RAW"); - if( f & JNODE_ESCAPE ) printf(" ESCAPE"); - if( f & JNODE_REMOVE ) printf(" REMOVE"); - if( f & JNODE_REPLACE ) printf(" REPLACE"); - if( f & JNODE_APPEND ) printf(" APPEND"); - if( f & JNODE_JSON5 ) printf(" JSON5"); + pParse->nBlob = savedNBlob; + if( iStart+n+sz>iEnd ){ + iEnd = iStart+n+sz; + if( iEnd>pParse->nBlob ){ + if( pParse->nBlobAlloc>0 && iEnd>pParse->nBlobAlloc ){ + iEnd = pParse->nBlobAlloc; + }else{ + iEnd = pParse->nBlob; + } + } } - switch( aNode[i].eU ){ - case 1: printf(" zJContent=[%.*s]\n", - aNode[i].n, aNode[i].u.zJContent); break; - case 2: printf(" iAppend=%u\n", aNode[i].u.iAppend); break; - case 3: printf(" iKey=%u\n", aNode[i].u.iKey); break; - case 4: printf(" iPrev=%u\n", aNode[i].u.iPrev); break; - default: printf("\n"); + sqlite3_str_appendall(pOut," <-- "); + switch( x ){ + case JSONB_NULL: sqlite3_str_appendall(pOut,"null"); break; + case JSONB_TRUE: sqlite3_str_appendall(pOut,"true"); break; + case JSONB_FALSE: sqlite3_str_appendall(pOut,"false"); break; + case JSONB_INT: sqlite3_str_appendall(pOut,"int"); break; + case JSONB_INT5: sqlite3_str_appendall(pOut,"int5"); break; + case JSONB_FLOAT: sqlite3_str_appendall(pOut,"float"); break; + case JSONB_FLOAT5: sqlite3_str_appendall(pOut,"float5"); break; + case JSONB_TEXT: sqlite3_str_appendall(pOut,"text"); break; + case JSONB_TEXTJ: sqlite3_str_appendall(pOut,"textj"); break; + case JSONB_TEXT5: sqlite3_str_appendall(pOut,"text5"); break; + case JSONB_TEXTRAW: sqlite3_str_appendall(pOut,"textraw"); break; + case JSONB_ARRAY: { + sqlite3_str_appendf(pOut,"array, %u bytes\n", sz); + jsonDebugPrintBlob(pParse, iStart+n, iStart+n+sz, nIndent+2, pOut); + showContent = 0; + break; + } + case JSONB_OBJECT: { + sqlite3_str_appendf(pOut, "object, %u bytes\n", sz); + jsonDebugPrintBlob(pParse, iStart+n, iStart+n+sz, nIndent+2, pOut); + showContent = 0; + break; + } + default: { + sqlite3_str_appendall(pOut, "ERROR: unknown node type\n"); + showContent = 0; + break; + } } + if( showContent ){ + if( sz==0 && x<=JSONB_FALSE ){ + sqlite3_str_append(pOut, "\n", 1); + }else{ + u32 i; + sqlite3_str_appendall(pOut, ": \""); + for(i=iStart+n; iaBlob[i]; + if( c<0x20 || c>=0x7f ) c = '.'; + sqlite3_str_append(pOut, (char*)&c, 1); + } + sqlite3_str_append(pOut, "\"\n", 2); + } + } + iStart += n + sz; } } -#endif /* SQLITE_DEBUG */ - - -#if 0 /* 1 for debugging. 0 normally. Requires -DSQLITE_DEBUG too */ -static void jsonDebugPrintParse(JsonParse *p){ - jsonDebugPrintNodeEntries(p->aNode, p->nNode); -} -static void jsonDebugPrintNode(JsonNode *pNode){ - jsonDebugPrintNodeEntries(pNode, jsonNodeSize(pNode)); +static void jsonShowParse(JsonParse *pParse){ + sqlite3_str out; + char zBuf[1000]; + if( pParse==0 ){ + printf("NULL pointer\n"); + return; + }else{ + printf("nBlobAlloc = %u\n", pParse->nBlobAlloc); + printf("nBlob = %u\n", pParse->nBlob); + printf("delta = %d\n", pParse->delta); + if( pParse->nBlob==0 ) return; + printf("content (bytes 0..%u):\n", pParse->nBlob-1); + } + sqlite3StrAccumInit(&out, 0, zBuf, sizeof(zBuf), 1000000); + jsonDebugPrintBlob(pParse, 0, pParse->nBlob, 0, &out); + printf("%s", sqlite3_str_value(&out)); + sqlite3_str_reset(&out); } -#else - /* The usual case */ -# define jsonDebugPrintNode(X) -# define jsonDebugPrintParse(X) -#endif +#endif /* SQLITE_DEBUG */ #ifdef SQLITE_DEBUG /* ** SQL function: json_parse(JSON) ** -** Parse JSON using jsonParseCached(). Then print a dump of that -** parse on standard output. Return the mimified JSON result, just -** like the json() function. +** Parse JSON using jsonParseFuncArg(). Return text that is a +** human-readable dump of the binary JSONB for the input parameter. */ static void jsonParseFunc( sqlite3_context *ctx, @@ -204934,38 +206722,19 @@ static void jsonParseFunc( sqlite3_value **argv ){ JsonParse *p; /* The parse */ + sqlite3_str out; - assert( argc==1 ); - p = jsonParseCached(ctx, argv[0], ctx, 0); + assert( argc>=1 ); + sqlite3StrAccumInit(&out, 0, 0, 0, 1000000); + p = jsonParseFuncArg(ctx, argv[0], 0); if( p==0 ) return; - printf("nNode = %u\n", p->nNode); - printf("nAlloc = %u\n", p->nAlloc); - printf("nJson = %d\n", p->nJson); - printf("nAlt = %d\n", p->nAlt); - printf("nErr = %u\n", p->nErr); - printf("oom = %u\n", p->oom); - printf("hasNonstd = %u\n", p->hasNonstd); - printf("useMod = %u\n", p->useMod); - printf("hasMod = %u\n", p->hasMod); - printf("nJPRef = %u\n", p->nJPRef); - printf("iSubst = %u\n", p->iSubst); - printf("iHold = %u\n", p->iHold); - jsonDebugPrintNodeEntries(p->aNode, p->nNode); - jsonReturnJson(p, p->aNode, ctx, 1); -} - -/* -** The json_test1(JSON) function return true (1) if the input is JSON -** text generated by another json function. It returns (0) if the input -** is not known to be JSON. -*/ -static void jsonTest1Func( - sqlite3_context *ctx, - int argc, - sqlite3_value **argv -){ - UNUSED_PARAMETER(argc); - sqlite3_result_int(ctx, sqlite3_value_subtype(argv[0])==JSON_SUBTYPE); + if( argc==1 ){ + jsonDebugPrintBlob(p, 0, p->nBlob, 0, &out); + sqlite3_result_text64(ctx, out.zText, out.nChar, SQLITE_DYNAMIC, SQLITE_UTF8); + }else{ + jsonShowParse(p); + } + jsonParseFree(p); } #endif /* SQLITE_DEBUG */ @@ -204974,7 +206743,7 @@ static void jsonTest1Func( ****************************************************************************/ /* -** Implementation of the json_QUOTE(VALUE) function. Return a JSON value +** Implementation of the json_quote(VALUE) function. Return a JSON value ** corresponding to the SQL value input. Mostly this means putting ** double-quotes around strings and returning the unquoted string "null" ** when given a NULL input. @@ -204987,9 +206756,9 @@ static void jsonQuoteFunc( JsonString jx; UNUSED_PARAMETER(argc); - jsonInit(&jx, ctx); - jsonAppendValue(&jx, argv[0]); - jsonResult(&jx); + jsonStringInit(&jx, ctx); + jsonAppendSqlValue(&jx, argv[0]); + jsonReturnString(&jx, 0, 0); sqlite3_result_subtype(ctx, JSON_SUBTYPE); } @@ -205006,18 +206775,17 @@ static void jsonArrayFunc( int i; JsonString jx; - jsonInit(&jx, ctx); + jsonStringInit(&jx, ctx); jsonAppendChar(&jx, '['); for(i=0; inNode ); if( argc==2 ){ const char *zPath = (const char*)sqlite3_value_text(argv[1]); - pNode = jsonLookup(p, zPath, 0, ctx); - }else{ - pNode = p->aNode; - } - if( pNode==0 ){ - return; - } - if( pNode->eType==JSON_ARRAY ){ - while( 1 /*exit-by-break*/ ){ - i = 1; - while( i<=pNode->n ){ - if( (pNode[i].jnFlags & JNODE_REMOVE)==0 ) n++; - i += jsonNodeSize(&pNode[i]); + if( zPath==0 ){ + jsonParseFree(p); + return; + } + i = jsonLookupStep(p, 0, zPath[0]=='$' ? zPath+1 : "@", 0); + if( JSON_LOOKUP_ISERROR(i) ){ + if( i==JSON_LOOKUP_NOTFOUND ){ + /* no-op */ + }else if( i==JSON_LOOKUP_PATHERROR ){ + jsonBadPathError(ctx, zPath); + }else{ + sqlite3_result_error(ctx, "malformed JSON", -1); } - if( (pNode->jnFlags & JNODE_APPEND)==0 ) break; - if( p->useMod==0 ) break; - assert( pNode->eU==2 ); - pNode = &p->aNode[pNode->u.iAppend]; + eErr = 1; + i = 0; } + }else{ + i = 0; + } + if( (p->aBlob[i] & 0x0f)==JSONB_ARRAY ){ + cnt = jsonbArrayCount(p, i); } - sqlite3_result_int64(ctx, n); + if( !eErr ) sqlite3_result_int64(ctx, cnt); + jsonParseFree(p); } -/* -** Bit values for the flags passed into jsonExtractFunc() or -** jsonSetFunc() via the user-data value. -*/ -#define JSON_JSON 0x01 /* Result is always JSON */ -#define JSON_SQL 0x02 /* Result is always SQL */ -#define JSON_ABPATH 0x03 /* Allow abbreviated JSON path specs */ -#define JSON_ISSET 0x04 /* json_set(), not json_insert() */ +/* True if the string is all digits */ +static int jsonAllDigits(const char *z, int n){ + int i; + for(i=0; i2 ){ + jsonAppendChar(&jx, '['); + } + for(i=1; i and ->> operators accept abbreviated PATH arguments. This - ** is mostly for compatibility with PostgreSQL, but also for - ** convenience. - ** - ** NUMBER ==> $[NUMBER] // PG compatible - ** LABEL ==> $.LABEL // PG compatible - ** [NUMBER] ==> $[NUMBER] // Not PG. Purely for convenience - */ - jsonInit(&jx, ctx); - if( sqlite3Isdigit(zPath[0]) ){ - jsonAppendRawNZ(&jx, "$[", 2); - jsonAppendRaw(&jx, zPath, (int)strlen(zPath)); - jsonAppendRawNZ(&jx, "]", 2); - }else{ - jsonAppendRawNZ(&jx, "$.", 1 + (zPath[0]!='[')); - jsonAppendRaw(&jx, zPath, (int)strlen(zPath)); - jsonAppendChar(&jx, 0); - } - pNode = jx.bErr ? 0 : jsonLookup(p, jx.zBuf, 0, ctx); - jsonReset(&jx); + const char *zPath = (const char*)sqlite3_value_text(argv[i]); + int nPath; + u32 j; + if( zPath==0 ) goto json_extract_error; + nPath = sqlite3Strlen30(zPath); + if( zPath[0]=='$' ){ + j = jsonLookupStep(p, 0, zPath+1, 0); + }else if( (flags & JSON_ABPATH) ){ + /* The -> and ->> operators accept abbreviated PATH arguments. This + ** is mostly for compatibility with PostgreSQL, but also for + ** convenience. + ** + ** NUMBER ==> $[NUMBER] // PG compatible + ** LABEL ==> $.LABEL // PG compatible + ** [NUMBER] ==> $[NUMBER] // Not PG. Purely for convenience + */ + jsonStringInit(&jx, ctx); + if( jsonAllDigits(zPath, nPath) ){ + jsonAppendRawNZ(&jx, "[", 1); + jsonAppendRaw(&jx, zPath, nPath); + jsonAppendRawNZ(&jx, "]", 2); + }else if( jsonAllAlphanum(zPath, nPath) ){ + jsonAppendRawNZ(&jx, ".", 1); + jsonAppendRaw(&jx, zPath, nPath); + }else if( zPath[0]=='[' && nPath>=3 && zPath[nPath-1]==']' ){ + jsonAppendRaw(&jx, zPath, nPath); }else{ - pNode = jsonLookup(p, zPath, 0, ctx); + jsonAppendRawNZ(&jx, ".\"", 2); + jsonAppendRaw(&jx, zPath, nPath); + jsonAppendRawNZ(&jx, "\"", 1); } - if( pNode ){ + jsonStringTerminate(&jx); + j = jsonLookupStep(p, 0, jx.zBuf, 0); + jsonStringReset(&jx); + }else{ + jsonBadPathError(ctx, zPath); + goto json_extract_error; + } + if( jnBlob ){ + if( argc==2 ){ if( flags & JSON_JSON ){ - jsonReturnJson(p, pNode, ctx, 0); + jsonStringInit(&jx, ctx); + jsonTranslateBlobToText(p, j, &jx); + jsonReturnString(&jx, 0, 0); + jsonStringReset(&jx); + assert( (flags & JSON_BLOB)==0 ); + sqlite3_result_subtype(ctx, JSON_SUBTYPE); }else{ - jsonReturn(p, pNode, ctx); - sqlite3_result_subtype(ctx, 0); + jsonReturnFromBlob(p, j, ctx, 0); + if( (flags & (JSON_SQL|JSON_BLOB))==0 + && (p->aBlob[j]&0x0f)>=JSONB_ARRAY + ){ + sqlite3_result_subtype(ctx, JSON_SUBTYPE); + } } + }else{ + jsonAppendSeparator(&jx); + jsonTranslateBlobToText(p, j, &jx); } - }else{ - pNode = jsonLookup(p, zPath, 0, ctx); - if( p->nErr==0 && pNode ) jsonReturn(p, pNode, ctx); - } - }else{ - /* Two or more PATH arguments results in a JSON array with each - ** element of the array being the value selected by one of the PATHs */ - int i; - jsonInit(&jx, ctx); - jsonAppendChar(&jx, '['); - for(i=1; inErr ) break; - jsonAppendSeparator(&jx); - if( pNode ){ - jsonRenderNode(p, pNode, &jx); + }else if( j==JSON_LOOKUP_NOTFOUND ){ + if( argc==2 ){ + goto json_extract_error; /* Return NULL if not found */ }else{ + jsonAppendSeparator(&jx); jsonAppendRawNZ(&jx, "null", 4); } + }else if( j==JSON_LOOKUP_ERROR ){ + sqlite3_result_error(ctx, "malformed JSON", -1); + goto json_extract_error; + }else{ + jsonBadPathError(ctx, zPath); + goto json_extract_error; } - if( i==argc ){ - jsonAppendChar(&jx, ']'); - jsonResult(&jx); + } + if( argc>2 ){ + jsonAppendChar(&jx, ']'); + jsonReturnString(&jx, 0, 0); + if( (flags & JSON_BLOB)==0 ){ sqlite3_result_subtype(ctx, JSON_SUBTYPE); } - jsonReset(&jx); } +json_extract_error: + jsonStringReset(&jx); + jsonParseFree(p); + return; } -/* This is the RFC 7396 MergePatch algorithm. -*/ -static JsonNode *jsonMergePatch( - JsonParse *pParse, /* The JSON parser that contains the TARGET */ - u32 iTarget, /* Node of the TARGET in pParse */ - JsonNode *pPatch /* The PATCH */ -){ - u32 i, j; - u32 iRoot; - JsonNode *pTarget; - if( pPatch->eType!=JSON_OBJECT ){ - return pPatch; - } - assert( iTargetnNode ); - pTarget = &pParse->aNode[iTarget]; - assert( (pPatch->jnFlags & JNODE_APPEND)==0 ); - if( pTarget->eType!=JSON_OBJECT ){ - jsonRemoveAllNulls(pPatch); - return pPatch; - } - iRoot = iTarget; - for(i=1; in; i += jsonNodeSize(&pPatch[i+1])+1){ - u32 nKey; - const char *zKey; - assert( pPatch[i].eType==JSON_STRING ); - assert( pPatch[i].jnFlags & JNODE_LABEL ); - assert( pPatch[i].eU==1 ); - nKey = pPatch[i].n; - zKey = pPatch[i].u.zJContent; - for(j=1; jn; j += jsonNodeSize(&pTarget[j+1])+1 ){ - assert( pTarget[j].eType==JSON_STRING ); - assert( pTarget[j].jnFlags & JNODE_LABEL ); - if( jsonSameLabel(&pPatch[i], &pTarget[j]) ){ - if( pTarget[j+1].jnFlags & (JNODE_REMOVE|JNODE_REPLACE) ) break; - if( pPatch[i+1].eType==JSON_NULL ){ - pTarget[j+1].jnFlags |= JNODE_REMOVE; - }else{ - JsonNode *pNew = jsonMergePatch(pParse, iTarget+j+1, &pPatch[i+1]); - if( pNew==0 ) return 0; - if( pNew!=&pParse->aNode[iTarget+j+1] ){ - jsonParseAddSubstNode(pParse, iTarget+j+1); - jsonParseAddNodeArray(pParse, pNew, jsonNodeSize(pNew)); - } - pTarget = &pParse->aNode[iTarget]; - } - break; +/* +** Return codes for jsonMergePatch() +*/ +#define JSON_MERGE_OK 0 /* Success */ +#define JSON_MERGE_BADTARGET 1 /* Malformed TARGET blob */ +#define JSON_MERGE_BADPATCH 2 /* Malformed PATCH blob */ +#define JSON_MERGE_OOM 3 /* Out-of-memory condition */ + +/* +** RFC-7396 MergePatch for two JSONB blobs. +** +** pTarget is the target. pPatch is the patch. The target is updated +** in place. The patch is read-only. +** +** The original RFC-7396 algorithm is this: +** +** define MergePatch(Target, Patch): +** if Patch is an Object: +** if Target is not an Object: +** Target = {} # Ignore the contents and set it to an empty Object +** for each Name/Value pair in Patch: +** if Value is null: +** if Name exists in Target: +** remove the Name/Value pair from Target +** else: +** Target[Name] = MergePatch(Target[Name], Value) +** return Target +** else: +** return Patch +** +** Here is an equivalent algorithm restructured to show the actual +** implementation: +** +** 01 define MergePatch(Target, Patch): +** 02 if Patch is not an Object: +** 03 return Patch +** 04 else: // if Patch is an Object +** 05 if Target is not an Object: +** 06 Target = {} +** 07 for each Name/Value pair in Patch: +** 08 if Name exists in Target: +** 09 if Value is null: +** 10 remove the Name/Value pair from Target +** 11 else +** 12 Target[name] = MergePatch(Target[Name], Value) +** 13 else if Value is not NULL: +** 14 if Value is not an Object: +** 15 Target[name] = Value +** 16 else: +** 17 Target[name] = MergePatch('{}',value) +** 18 return Target +** | +** ^---- Line numbers referenced in comments in the implementation +*/ +static int jsonMergePatch( + JsonParse *pTarget, /* The JSON parser that contains the TARGET */ + u32 iTarget, /* Index of TARGET in pTarget->aBlob[] */ + const JsonParse *pPatch, /* The PATCH */ + u32 iPatch /* Index of PATCH in pPatch->aBlob[] */ +){ + u8 x; /* Type of a single node */ + u32 n, sz=0; /* Return values from jsonbPayloadSize() */ + u32 iTCursor; /* Cursor position while scanning the target object */ + u32 iTStart; /* First label in the target object */ + u32 iTEndBE; /* Original first byte past end of target, before edit */ + u32 iTEnd; /* Current first byte past end of target */ + u8 eTLabel; /* Node type of the target label */ + u32 iTLabel = 0; /* Index of the label */ + u32 nTLabel = 0; /* Header size in bytes for the target label */ + u32 szTLabel = 0; /* Size of the target label payload */ + u32 iTValue = 0; /* Index of the target value */ + u32 nTValue = 0; /* Header size of the target value */ + u32 szTValue = 0; /* Payload size for the target value */ + + u32 iPCursor; /* Cursor position while scanning the patch */ + u32 iPEnd; /* First byte past the end of the patch */ + u8 ePLabel; /* Node type of the patch label */ + u32 iPLabel; /* Start of patch label */ + u32 nPLabel; /* Size of header on the patch label */ + u32 szPLabel; /* Payload size of the patch label */ + u32 iPValue; /* Start of patch value */ + u32 nPValue; /* Header size for the patch value */ + u32 szPValue; /* Payload size of the patch value */ + + assert( iTarget>=0 && iTargetnBlob ); + assert( iPatch>=0 && iPatchnBlob ); + x = pPatch->aBlob[iPatch] & 0x0f; + if( x!=JSONB_OBJECT ){ /* Algorithm line 02 */ + u32 szPatch; /* Total size of the patch, header+payload */ + u32 szTarget; /* Total size of the target, header+payload */ + n = jsonbPayloadSize(pPatch, iPatch, &sz); + szPatch = n+sz; + sz = 0; + n = jsonbPayloadSize(pTarget, iTarget, &sz); + szTarget = n+sz; + jsonBlobEdit(pTarget, iTarget, szTarget, pPatch->aBlob+iPatch, szPatch); + return pTarget->oom ? JSON_MERGE_OOM : JSON_MERGE_OK; /* Line 03 */ + } + x = pTarget->aBlob[iTarget] & 0x0f; + if( x!=JSONB_OBJECT ){ /* Algorithm line 05 */ + n = jsonbPayloadSize(pTarget, iTarget, &sz); + jsonBlobEdit(pTarget, iTarget+n, sz, 0, 0); + x = pTarget->aBlob[iTarget]; + pTarget->aBlob[iTarget] = (x & 0xf0) | JSONB_OBJECT; + } + n = jsonbPayloadSize(pPatch, iPatch, &sz); + if( NEVER(n==0) ) return JSON_MERGE_BADPATCH; + iPCursor = iPatch+n; + iPEnd = iPCursor+sz; + n = jsonbPayloadSize(pTarget, iTarget, &sz); + if( NEVER(n==0) ) return JSON_MERGE_BADTARGET; + iTStart = iTarget+n; + iTEndBE = iTStart+sz; + + while( iPCursoraBlob[iPCursor] & 0x0f; + if( ePLabelJSONB_TEXTRAW ){ + return JSON_MERGE_BADPATCH; + } + nPLabel = jsonbPayloadSize(pPatch, iPCursor, &szPLabel); + if( nPLabel==0 ) return JSON_MERGE_BADPATCH; + iPValue = iPCursor + nPLabel + szPLabel; + if( iPValue>=iPEnd ) return JSON_MERGE_BADPATCH; + nPValue = jsonbPayloadSize(pPatch, iPValue, &szPValue); + if( nPValue==0 ) return JSON_MERGE_BADPATCH; + iPCursor = iPValue + nPValue + szPValue; + if( iPCursor>iPEnd ) return JSON_MERGE_BADPATCH; + + iTCursor = iTStart; + iTEnd = iTEndBE + pTarget->delta; + while( iTCursoraBlob[iTCursor] & 0x0f; + if( eTLabelJSONB_TEXTRAW ){ + return JSON_MERGE_BADTARGET; + } + nTLabel = jsonbPayloadSize(pTarget, iTCursor, &szTLabel); + if( nTLabel==0 ) return JSON_MERGE_BADTARGET; + iTValue = iTLabel + nTLabel + szTLabel; + if( iTValue>=iTEnd ) return JSON_MERGE_BADTARGET; + nTValue = jsonbPayloadSize(pTarget, iTValue, &szTValue); + if( nTValue==0 ) return JSON_MERGE_BADTARGET; + if( iTValue + nTValue + szTValue > iTEnd ) return JSON_MERGE_BADTARGET; + isEqual = jsonLabelCompare( + (const char*)&pPatch->aBlob[iPLabel+nPLabel], + szPLabel, + (ePLabel==JSONB_TEXT || ePLabel==JSONB_TEXTRAW), + (const char*)&pTarget->aBlob[iTLabel+nTLabel], + szTLabel, + (eTLabel==JSONB_TEXT || eTLabel==JSONB_TEXTRAW)); + if( isEqual ) break; + iTCursor = iTValue + nTValue + szTValue; + } + x = pPatch->aBlob[iPValue] & 0x0f; + if( iTCursoroom) ) return JSON_MERGE_OOM; + }else{ + /* Algorithm line 12 */ + int rc, savedDelta = pTarget->delta; + pTarget->delta = 0; + rc = jsonMergePatch(pTarget, iTValue, pPatch, iPValue); + if( rc ) return rc; + pTarget->delta += savedDelta; + } + }else if( x>0 ){ /* Algorithm line 13 */ + /* No match and patch value is not NULL */ + u32 szNew = szPLabel+nPLabel; + if( (pPatch->aBlob[iPValue] & 0x0f)!=JSONB_OBJECT ){ /* Line 14 */ + jsonBlobEdit(pTarget, iTEnd, 0, 0, szPValue+nPValue+szNew); + if( pTarget->oom ) return JSON_MERGE_OOM; + memcpy(&pTarget->aBlob[iTEnd], &pPatch->aBlob[iPLabel], szNew); + memcpy(&pTarget->aBlob[iTEnd+szNew], + &pPatch->aBlob[iPValue], szPValue+nPValue); + }else{ + int rc, savedDelta; + jsonBlobEdit(pTarget, iTEnd, 0, 0, szNew+1); + if( pTarget->oom ) return JSON_MERGE_OOM; + memcpy(&pTarget->aBlob[iTEnd], &pPatch->aBlob[iPLabel], szNew); + pTarget->aBlob[iTEnd+szNew] = 0x00; + savedDelta = pTarget->delta; + pTarget->delta = 0; + rc = jsonMergePatch(pTarget, iTEnd+szNew,pPatch,iPValue); + if( rc ) return rc; + pTarget->delta += savedDelta; } } - if( j>=pTarget->n && pPatch[i+1].eType!=JSON_NULL ){ - int iStart; - JsonNode *pApnd; - u32 nApnd; - iStart = jsonParseAddNode(pParse, JSON_OBJECT, 0, 0); - jsonParseAddNode(pParse, JSON_STRING, nKey, zKey); - pApnd = &pPatch[i+1]; - if( pApnd->eType==JSON_OBJECT ) jsonRemoveAllNulls(pApnd); - nApnd = jsonNodeSize(pApnd); - jsonParseAddNodeArray(pParse, pApnd, jsonNodeSize(pApnd)); - if( pParse->oom ) return 0; - pParse->aNode[iStart].n = 1+nApnd; - pParse->aNode[iRoot].jnFlags |= JNODE_APPEND; - pParse->aNode[iRoot].u.iAppend = iStart; - VVA( pParse->aNode[iRoot].eU = 2 ); - iRoot = iStart; - pTarget = &pParse->aNode[iTarget]; - } } - return pTarget; + if( pTarget->delta ) jsonAfterEditSizeAdjust(pTarget, iTarget); + return pTarget->oom ? JSON_MERGE_OOM : JSON_MERGE_OK; } + /* ** Implementation of the json_mergepatch(JSON1,JSON2) function. Return a JSON ** object that is the result of running the RFC 7396 MergePatch() algorithm @@ -205253,28 +207183,27 @@ static void jsonPatchFunc( int argc, sqlite3_value **argv ){ - JsonParse *pX; /* The JSON that is being patched */ - JsonParse *pY; /* The patch */ - JsonNode *pResult; /* The result of the merge */ + JsonParse *pTarget; /* The TARGET */ + JsonParse *pPatch; /* The PATCH */ + int rc; /* Result code */ UNUSED_PARAMETER(argc); - pX = jsonParseCached(ctx, argv[0], ctx, 1); - if( pX==0 ) return; - assert( pX->hasMod==0 ); - pX->hasMod = 1; - pY = jsonParseCached(ctx, argv[1], ctx, 1); - if( pY==0 ) return; - pX->useMod = 1; - pY->useMod = 1; - pResult = jsonMergePatch(pX, 0, pY->aNode); - assert( pResult!=0 || pX->oom ); - if( pResult && pX->oom==0 ){ - jsonDebugPrintParse(pX); - jsonDebugPrintNode(pResult); - jsonReturnJson(pX, pResult, ctx, 0); - }else{ - sqlite3_result_error_nomem(ctx); + assert( argc==2 ); + pTarget = jsonParseFuncArg(ctx, argv[0], JSON_EDITABLE); + if( pTarget==0 ) return; + pPatch = jsonParseFuncArg(ctx, argv[1], 0); + if( pPatch ){ + rc = jsonMergePatch(pTarget, 0, pPatch, 0); + if( rc==JSON_MERGE_OK ){ + jsonReturnParse(ctx, pTarget); + }else if( rc==JSON_MERGE_OOM ){ + sqlite3_result_error_nomem(ctx); + }else{ + sqlite3_result_error(ctx, "malformed JSON", -1); + } + jsonParseFree(pPatch); } + jsonParseFree(pTarget); } @@ -205298,23 +207227,23 @@ static void jsonObjectFunc( "of arguments", -1); return; } - jsonInit(&jx, ctx); + jsonStringInit(&jx, ctx); jsonAppendChar(&jx, '{'); for(i=0; i1); - if( pParse==0 ) return; - for(i=1; i<(u32)argc; i++){ + p = jsonParseFuncArg(ctx, argv[0], argc>1 ? JSON_EDITABLE : 0); + if( p==0 ) return; + for(i=1; inErr ) goto remove_done; - if( pNode ){ - pNode->jnFlags |= JNODE_REMOVE; - pParse->hasMod = 1; - pParse->useMod = 1; - } - } - if( (pParse->aNode[0].jnFlags & JNODE_REMOVE)==0 ){ - jsonReturnJson(pParse, pParse->aNode, ctx, 1); - } -remove_done: - jsonDebugPrintParse(p); -} - -/* -** Substitute the value at iNode with the pValue parameter. -*/ -static void jsonReplaceNode( - sqlite3_context *pCtx, - JsonParse *p, - int iNode, - sqlite3_value *pValue -){ - int idx = jsonParseAddSubstNode(p, iNode); - if( idx<=0 ){ - assert( p->oom ); - return; - } - switch( sqlite3_value_type(pValue) ){ - case SQLITE_NULL: { - jsonParseAddNode(p, JSON_NULL, 0, 0); - break; - } - case SQLITE_FLOAT: { - char *z = sqlite3_mprintf("%!0.15g", sqlite3_value_double(pValue)); - int n; - if( z==0 ){ - p->oom = 1; - break; - } - n = sqlite3Strlen30(z); - jsonParseAddNode(p, JSON_REAL, n, z); - jsonParseAddCleanup(p, sqlite3_free, z); - break; - } - case SQLITE_INTEGER: { - char *z = sqlite3_mprintf("%lld", sqlite3_value_int64(pValue)); - int n; - if( z==0 ){ - p->oom = 1; - break; - } - n = sqlite3Strlen30(z); - jsonParseAddNode(p, JSON_INT, n, z); - jsonParseAddCleanup(p, sqlite3_free, z); - - break; - } - case SQLITE_TEXT: { - const char *z = (const char*)sqlite3_value_text(pValue); - u32 n = (u32)sqlite3_value_bytes(pValue); - if( z==0 ){ - p->oom = 1; - break; - } - if( sqlite3_value_subtype(pValue)!=JSON_SUBTYPE ){ - char *zCopy = sqlite3_malloc64( n+1 ); - int k; - if( zCopy ){ - memcpy(zCopy, z, n); - zCopy[n] = 0; - jsonParseAddCleanup(p, sqlite3_free, zCopy); - }else{ - p->oom = 1; - sqlite3_result_error_nomem(pCtx); - } - k = jsonParseAddNode(p, JSON_STRING, n, zCopy); - assert( k>0 || p->oom ); - if( p->oom==0 ) p->aNode[k].jnFlags |= JNODE_RAW; + if( zPath==0 ){ + goto json_remove_done; + } + if( zPath[0]!='$' ){ + goto json_remove_patherror; + } + if( zPath[1]==0 ){ + /* json_remove(j,'$') returns NULL */ + goto json_remove_done; + } + p->eEdit = JEDIT_DEL; + p->delta = 0; + rc = jsonLookupStep(p, 0, zPath+1, 0); + if( JSON_LOOKUP_ISERROR(rc) ){ + if( rc==JSON_LOOKUP_NOTFOUND ){ + continue; /* No-op */ + }else if( rc==JSON_LOOKUP_PATHERROR ){ + jsonBadPathError(ctx, zPath); }else{ - JsonParse *pPatch = jsonParseCached(pCtx, pValue, pCtx, 1); - if( pPatch==0 ){ - p->oom = 1; - break; - } - jsonParseAddNodeArray(p, pPatch->aNode, pPatch->nNode); - /* The nodes copied out of pPatch and into p likely contain - ** u.zJContent pointers into pPatch->zJson. So preserve the - ** content of pPatch until p is destroyed. */ - assert( pPatch->nJPRef>=1 ); - pPatch->nJPRef++; - jsonParseAddCleanup(p, (void(*)(void*))jsonParseFree, pPatch); + sqlite3_result_error(ctx, "malformed JSON", -1); } - break; - } - default: { - jsonParseAddNode(p, JSON_NULL, 0, 0); - sqlite3_result_error(pCtx, "JSON cannot hold BLOB values", -1); - p->nErr++; - break; + goto json_remove_done; } } + jsonReturnParse(ctx, p); + jsonParseFree(p); + return; + +json_remove_patherror: + jsonBadPathError(ctx, zPath); + +json_remove_done: + jsonParseFree(p); + return; } /* @@ -205457,32 +207316,12 @@ static void jsonReplaceFunc( int argc, sqlite3_value **argv ){ - JsonParse *pParse; /* The parse */ - JsonNode *pNode; - const char *zPath; - u32 i; - if( argc<1 ) return; if( (argc&1)==0 ) { jsonWrongNumArgs(ctx, "replace"); return; } - pParse = jsonParseCached(ctx, argv[0], ctx, argc>1); - if( pParse==0 ) return; - pParse->nJPRef++; - for(i=1; i<(u32)argc; i+=2){ - zPath = (const char*)sqlite3_value_text(argv[i]); - pParse->useMod = 1; - pNode = jsonLookup(pParse, zPath, 0, ctx); - if( pParse->nErr ) goto replace_err; - if( pNode ){ - jsonReplaceNode(ctx, pParse, (u32)(pNode - pParse->aNode), argv[i+1]); - } - } - jsonReturnJson(pParse, pParse->aNode, ctx, 1); -replace_err: - jsonDebugPrintParse(pParse); - jsonParseFree(pParse); + jsonInsertIntoBlob(ctx, argc, argv, JEDIT_REPL); } @@ -205503,39 +207342,16 @@ static void jsonSetFunc( int argc, sqlite3_value **argv ){ - JsonParse *pParse; /* The parse */ - JsonNode *pNode; - const char *zPath; - u32 i; - int bApnd; - int bIsSet = sqlite3_user_data(ctx)!=0; + + int flags = SQLITE_PTR_TO_INT(sqlite3_user_data(ctx)); + int bIsSet = (flags&JSON_ISSET)!=0; if( argc<1 ) return; if( (argc&1)==0 ) { jsonWrongNumArgs(ctx, bIsSet ? "set" : "insert"); return; } - pParse = jsonParseCached(ctx, argv[0], ctx, argc>1); - if( pParse==0 ) return; - pParse->nJPRef++; - for(i=1; i<(u32)argc; i+=2){ - zPath = (const char*)sqlite3_value_text(argv[i]); - bApnd = 0; - pParse->useMod = 1; - pNode = jsonLookup(pParse, zPath, &bApnd, ctx); - if( pParse->oom ){ - sqlite3_result_error_nomem(ctx); - goto jsonSetDone; - }else if( pParse->nErr ){ - goto jsonSetDone; - }else if( pNode && (bApnd || bIsSet) ){ - jsonReplaceNode(ctx, pParse, (u32)(pNode - pParse->aNode), argv[i+1]); - } - } - jsonDebugPrintParse(pParse); - jsonReturnJson(pParse, pParse->aNode, ctx, 1); -jsonSetDone: - jsonParseFree(pParse); + jsonInsertIntoBlob(ctx, argc, argv, bIsSet ? JEDIT_SET : JEDIT_INS); } /* @@ -205551,27 +207367,93 @@ static void jsonTypeFunc( sqlite3_value **argv ){ JsonParse *p; /* The parse */ - const char *zPath; - JsonNode *pNode; + const char *zPath = 0; + u32 i; - p = jsonParseCached(ctx, argv[0], ctx, 0); + p = jsonParseFuncArg(ctx, argv[0], 0); if( p==0 ) return; if( argc==2 ){ zPath = (const char*)sqlite3_value_text(argv[1]); - pNode = jsonLookup(p, zPath, 0, ctx); + if( zPath==0 ) goto json_type_done; + if( zPath[0]!='$' ){ + jsonBadPathError(ctx, zPath); + goto json_type_done; + } + i = jsonLookupStep(p, 0, zPath+1, 0); + if( JSON_LOOKUP_ISERROR(i) ){ + if( i==JSON_LOOKUP_NOTFOUND ){ + /* no-op */ + }else if( i==JSON_LOOKUP_PATHERROR ){ + jsonBadPathError(ctx, zPath); + }else{ + sqlite3_result_error(ctx, "malformed JSON", -1); + } + goto json_type_done; + } }else{ - pNode = p->aNode; - } - if( pNode ){ - sqlite3_result_text(ctx, jsonType[pNode->eType], -1, SQLITE_STATIC); + i = 0; } + sqlite3_result_text(ctx, jsonbType[p->aBlob[i]&0x0f], -1, SQLITE_STATIC); +json_type_done: + jsonParseFree(p); } /* ** json_valid(JSON) -** -** Return 1 if JSON is a well-formed canonical JSON string according -** to RFC-7159. Return 0 otherwise. +** json_valid(JSON, FLAGS) +** +** Check the JSON argument to see if it is well-formed. The FLAGS argument +** encodes the various constraints on what is meant by "well-formed": +** +** 0x01 Canonical RFC-8259 JSON text +** 0x02 JSON text with optional JSON-5 extensions +** 0x04 Superficially appears to be JSONB +** 0x08 Strictly well-formed JSONB +** +** If the FLAGS argument is omitted, it defaults to 1. Useful values for +** FLAGS include: +** +** 1 Strict canonical JSON text +** 2 JSON text perhaps with JSON-5 extensions +** 4 Superficially appears to be JSONB +** 5 Canonical JSON text or superficial JSONB +** 6 JSON-5 text or superficial JSONB +** 8 Strict JSONB +** 9 Canonical JSON text or strict JSONB +** 10 JSON-5 text or strict JSONB +** +** Other flag combinations are redundant. For example, every canonical +** JSON text is also well-formed JSON-5 text, so FLAG values 2 and 3 +** are the same. Similarly, any input that passes a strict JSONB validation +** will also pass the superficial validation so 12 through 15 are the same +** as 8 through 11 respectively. +** +** This routine runs in linear time to validate text and when doing strict +** JSONB validation. Superficial JSONB validation is constant time, +** assuming the BLOB is already in memory. The performance advantage +** of superficial JSONB validation is why that option is provided. +** Application developers can choose to do fast superficial validation or +** slower strict validation, according to their specific needs. +** +** Only the lower four bits of the FLAGS argument are currently used. +** Higher bits are reserved for future expansion. To facilitate +** compatibility, the current implementation raises an error if any bit +** in FLAGS is set other than the lower four bits. +** +** The original circa 2015 implementation of the JSON routines in +** SQLite only supported canonical RFC-8259 JSON text and the json_valid() +** function only accepted one argument. That is why the default value +** for the FLAGS argument is 1, since FLAGS=1 causes this routine to only +** recognize canonical RFC-8259 JSON text as valid. The extra FLAGS +** argument was added when the JSON routines were extended to support +** JSON5-like extensions and binary JSONB stored in BLOBs. +** +** Return Values: +** +** * Raise an error if FLAGS is outside the range of 1 to 15. +** * Return NULL if the input is NULL +** * Return 1 if the input is well-formed. +** * Return 0 if the input is not well-formed. */ static void jsonValidFunc( sqlite3_context *ctx, @@ -205579,79 +207461,128 @@ static void jsonValidFunc( sqlite3_value **argv ){ JsonParse *p; /* The parse */ - UNUSED_PARAMETER(argc); - if( sqlite3_value_type(argv[0])==SQLITE_NULL ){ + u8 flags = 1; + u8 res = 0; + if( argc==2 ){ + i64 f = sqlite3_value_int64(argv[1]); + if( f<1 || f>15 ){ + sqlite3_result_error(ctx, "FLAGS parameter to json_valid() must be" + " between 1 and 15", -1); + return; + } + flags = f & 0x0f; + } + switch( sqlite3_value_type(argv[0]) ){ + case SQLITE_NULL: { #ifdef SQLITE_LEGACY_JSON_VALID - /* Incorrect legacy behavior was to return FALSE for a NULL input */ - sqlite3_result_int(ctx, 0); + /* Incorrect legacy behavior was to return FALSE for a NULL input */ + sqlite3_result_int(ctx, 0); #endif - return; - } - p = jsonParseCached(ctx, argv[0], 0, 0); - if( p==0 || p->oom ){ - sqlite3_result_error_nomem(ctx); - sqlite3_free(p); - }else{ - sqlite3_result_int(ctx, p->nErr==0 && (p->hasNonstd==0 || p->useMod)); - if( p->nErr ) jsonParseFree(p); + return; + } + case SQLITE_BLOB: { + if( jsonFuncArgMightBeBinary(argv[0]) ){ + if( flags & 0x04 ){ + /* Superficial checking only - accomplished by the + ** jsonFuncArgMightBeBinary() call above. */ + res = 1; + }else if( flags & 0x08 ){ + /* Strict checking. Check by translating BLOB->TEXT->BLOB. If + ** no errors occur, call that a "strict check". */ + JsonParse px; + u32 iErr; + memset(&px, 0, sizeof(px)); + px.aBlob = (u8*)sqlite3_value_blob(argv[0]); + px.nBlob = sqlite3_value_bytes(argv[0]); + iErr = jsonbValidityCheck(&px, 0, px.nBlob, 1); + res = iErr==0; + } + break; + } + /* Fall through into interpreting the input as text. See note + ** above at tag-20240123-a. */ + /* no break */ deliberate_fall_through + } + default: { + JsonParse px; + if( (flags & 0x3)==0 ) break; + memset(&px, 0, sizeof(px)); + + p = jsonParseFuncArg(ctx, argv[0], JSON_KEEPERROR); + if( p ){ + if( p->oom ){ + sqlite3_result_error_nomem(ctx); + }else if( p->nErr ){ + /* no-op */ + }else if( (flags & 0x02)!=0 || p->hasNonstd==0 ){ + res = 1; + } + jsonParseFree(p); + }else{ + sqlite3_result_error_nomem(ctx); + } + break; + } } + sqlite3_result_int(ctx, res); } /* ** json_error_position(JSON) ** -** If the argument is not an interpretable JSON string, then return the 1-based -** character position at which the parser first recognized that the input -** was in error. The left-most character is 1. If the string is valid -** JSON, then return 0. -** -** Note that json_valid() is only true for strictly conforming canonical JSON. -** But this routine returns zero if the input contains extension. Thus: +** If the argument is NULL, return NULL ** -** (1) If the input X is strictly conforming canonical JSON: +** If the argument is BLOB, do a full validity check and return non-zero +** if the check fails. The return value is the approximate 1-based offset +** to the byte of the element that contains the first error. ** -** json_valid(X) returns true -** json_error_position(X) returns 0 -** -** (2) If the input X is JSON but it includes extension (such as JSON5) that -** are not part of RFC-8259: -** -** json_valid(X) returns false -** json_error_position(X) return 0 -** -** (3) If the input X cannot be interpreted as JSON even taking extensions -** into account: -** -** json_valid(X) return false -** json_error_position(X) returns 1 or more +** Otherwise interpret the argument is TEXT (even if it is numeric) and +** return the 1-based character position for where the parser first recognized +** that the input was not valid JSON, or return 0 if the input text looks +** ok. JSON-5 extensions are accepted. */ static void jsonErrorFunc( sqlite3_context *ctx, int argc, sqlite3_value **argv ){ - JsonParse *p; /* The parse */ + i64 iErrPos = 0; /* Error position to be returned */ + JsonParse s; + + assert( argc==1 ); UNUSED_PARAMETER(argc); - if( sqlite3_value_type(argv[0])==SQLITE_NULL ) return; - p = jsonParseCached(ctx, argv[0], 0, 0); - if( p==0 || p->oom ){ + memset(&s, 0, sizeof(s)); + s.db = sqlite3_context_db_handle(ctx); + if( jsonFuncArgMightBeBinary(argv[0]) ){ + s.aBlob = (u8*)sqlite3_value_blob(argv[0]); + s.nBlob = sqlite3_value_bytes(argv[0]); + iErrPos = (i64)jsonbValidityCheck(&s, 0, s.nBlob, 1); + }else{ + s.zJson = (char*)sqlite3_value_text(argv[0]); + if( s.zJson==0 ) return; /* NULL input or OOM */ + s.nJson = sqlite3_value_bytes(argv[0]); + if( jsonConvertTextToBlob(&s,0) ){ + if( s.oom ){ + iErrPos = -1; + }else{ + /* Convert byte-offset s.iErr into a character offset */ + u32 k; + assert( s.zJson!=0 ); /* Because s.oom is false */ + for(k=0; knErr==0 ){ - sqlite3_result_int(ctx, 0); }else{ - int n = 1; - u32 i; - const char *z = (const char*)sqlite3_value_text(argv[0]); - for(i=0; iiErr && ALWAYS(z[i]); i++){ - if( (z[i]&0xc0)!=0x80 ) n++; - } - sqlite3_result_int(ctx, n); - jsonParseFree(p); + sqlite3_result_int64(ctx, iErrPos); } } - /**************************************************************************** ** Aggregate SQL function implementations ****************************************************************************/ @@ -205670,24 +207601,34 @@ static void jsonArrayStep( pStr = (JsonString*)sqlite3_aggregate_context(ctx, sizeof(*pStr)); if( pStr ){ if( pStr->zBuf==0 ){ - jsonInit(pStr, ctx); + jsonStringInit(pStr, ctx); jsonAppendChar(pStr, '['); }else if( pStr->nUsed>1 ){ jsonAppendChar(pStr, ','); } pStr->pCtx = ctx; - jsonAppendValue(pStr, argv[0]); + jsonAppendSqlValue(pStr, argv[0]); } } static void jsonArrayCompute(sqlite3_context *ctx, int isFinal){ JsonString *pStr; pStr = (JsonString*)sqlite3_aggregate_context(ctx, 0); if( pStr ){ + int flags; pStr->pCtx = ctx; jsonAppendChar(pStr, ']'); - if( pStr->bErr ){ - if( pStr->bErr==1 ) sqlite3_result_error_nomem(ctx); - assert( pStr->bStatic ); + flags = SQLITE_PTR_TO_INT(sqlite3_user_data(ctx)); + if( pStr->eErr ){ + jsonReturnString(pStr, 0, 0); + return; + }else if( flags & JSON_BLOB ){ + jsonReturnStringAsBlob(pStr); + if( isFinal ){ + if( !pStr->bStatic ) sqlite3RCStrUnref(pStr->zBuf); + }else{ + jsonStringTrimOneChar(pStr); + } + return; }else if( isFinal ){ sqlite3_result_text(ctx, pStr->zBuf, (int)pStr->nUsed, pStr->bStatic ? SQLITE_TRANSIENT : @@ -205695,7 +207636,7 @@ static void jsonArrayCompute(sqlite3_context *ctx, int isFinal){ pStr->bStatic = 1; }else{ sqlite3_result_text(ctx, pStr->zBuf, (int)pStr->nUsed, SQLITE_TRANSIENT); - pStr->nUsed--; + jsonStringTrimOneChar(pStr); } }else{ sqlite3_result_text(ctx, "[]", 2, SQLITE_STATIC); @@ -205776,27 +207717,38 @@ static void jsonObjectStep( pStr = (JsonString*)sqlite3_aggregate_context(ctx, sizeof(*pStr)); if( pStr ){ if( pStr->zBuf==0 ){ - jsonInit(pStr, ctx); + jsonStringInit(pStr, ctx); jsonAppendChar(pStr, '{'); }else if( pStr->nUsed>1 ){ jsonAppendChar(pStr, ','); } pStr->pCtx = ctx; z = (const char*)sqlite3_value_text(argv[0]); - n = (u32)sqlite3_value_bytes(argv[0]); + n = sqlite3Strlen30(z); jsonAppendString(pStr, z, n); jsonAppendChar(pStr, ':'); - jsonAppendValue(pStr, argv[1]); + jsonAppendSqlValue(pStr, argv[1]); } } static void jsonObjectCompute(sqlite3_context *ctx, int isFinal){ JsonString *pStr; pStr = (JsonString*)sqlite3_aggregate_context(ctx, 0); if( pStr ){ + int flags; jsonAppendChar(pStr, '}'); - if( pStr->bErr ){ - if( pStr->bErr==1 ) sqlite3_result_error_nomem(ctx); - assert( pStr->bStatic ); + pStr->pCtx = ctx; + flags = SQLITE_PTR_TO_INT(sqlite3_user_data(ctx)); + if( pStr->eErr ){ + jsonReturnString(pStr, 0, 0); + return; + }else if( flags & JSON_BLOB ){ + jsonReturnStringAsBlob(pStr); + if( isFinal ){ + if( !pStr->bStatic ) sqlite3RCStrUnref(pStr->zBuf); + }else{ + jsonStringTrimOneChar(pStr); + } + return; }else if( isFinal ){ sqlite3_result_text(ctx, pStr->zBuf, (int)pStr->nUsed, pStr->bStatic ? SQLITE_TRANSIENT : @@ -205804,7 +207756,7 @@ static void jsonObjectCompute(sqlite3_context *ctx, int isFinal){ pStr->bStatic = 1; }else{ sqlite3_result_text(ctx, pStr->zBuf, (int)pStr->nUsed, SQLITE_TRANSIENT); - pStr->nUsed--; + jsonStringTrimOneChar(pStr); } }else{ sqlite3_result_text(ctx, "{}", 2, SQLITE_STATIC); @@ -205824,19 +207776,37 @@ static void jsonObjectFinal(sqlite3_context *ctx){ /**************************************************************************** ** The json_each virtual table ****************************************************************************/ +typedef struct JsonParent JsonParent; +struct JsonParent { + u32 iHead; /* Start of object or array */ + u32 iValue; /* Start of the value */ + u32 iEnd; /* First byte past the end */ + u32 nPath; /* Length of path */ + i64 iKey; /* Key for JSONB_ARRAY */ +}; + typedef struct JsonEachCursor JsonEachCursor; struct JsonEachCursor { sqlite3_vtab_cursor base; /* Base class - must be first */ u32 iRowid; /* The rowid */ - u32 iBegin; /* The first node of the scan */ - u32 i; /* Index in sParse.aNode[] of current row */ + u32 i; /* Index in sParse.aBlob[] of current row */ u32 iEnd; /* EOF when i equals or exceeds this value */ - u8 eType; /* Type of top-level element */ + u32 nRoot; /* Size of the root path in bytes */ + u8 eType; /* Type of the container for element i */ u8 bRecursive; /* True for json_tree(). False for json_each() */ - char *zJson; /* Input JSON */ - char *zRoot; /* Path by which to filter zJson */ + u32 nParent; /* Current nesting depth */ + u32 nParentAlloc; /* Space allocated for aParent[] */ + JsonParent *aParent; /* Parent elements of i */ + sqlite3 *db; /* Database connection */ + JsonString path; /* Current path */ JsonParse sParse; /* Parse of the input JSON */ }; +typedef struct JsonEachConnection JsonEachConnection; +struct JsonEachConnection { + sqlite3_vtab base; /* Base class - must be first */ + sqlite3 *db; /* Database connection */ +}; + /* Constructor for the json_each virtual table */ static int jsonEachConnect( @@ -205846,7 +207816,7 @@ static int jsonEachConnect( sqlite3_vtab **ppVtab, char **pzErr ){ - sqlite3_vtab *pNew; + JsonEachConnection *pNew; int rc; /* Column numbers */ @@ -205872,28 +207842,32 @@ static int jsonEachConnect( "CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path," "json HIDDEN,root HIDDEN)"); if( rc==SQLITE_OK ){ - pNew = *ppVtab = sqlite3_malloc( sizeof(*pNew) ); + pNew = (JsonEachConnection*)sqlite3DbMallocZero(db, sizeof(*pNew)); + *ppVtab = (sqlite3_vtab*)pNew; if( pNew==0 ) return SQLITE_NOMEM; - memset(pNew, 0, sizeof(*pNew)); sqlite3_vtab_config(db, SQLITE_VTAB_INNOCUOUS); + pNew->db = db; } return rc; } /* destructor for json_each virtual table */ static int jsonEachDisconnect(sqlite3_vtab *pVtab){ - sqlite3_free(pVtab); + JsonEachConnection *p = (JsonEachConnection*)pVtab; + sqlite3DbFree(p->db, pVtab); return SQLITE_OK; } /* constructor for a JsonEachCursor object for json_each(). */ static int jsonEachOpenEach(sqlite3_vtab *p, sqlite3_vtab_cursor **ppCursor){ + JsonEachConnection *pVtab = (JsonEachConnection*)p; JsonEachCursor *pCur; UNUSED_PARAMETER(p); - pCur = sqlite3_malloc( sizeof(*pCur) ); + pCur = sqlite3DbMallocZero(pVtab->db, sizeof(*pCur)); if( pCur==0 ) return SQLITE_NOMEM; - memset(pCur, 0, sizeof(*pCur)); + pCur->db = pVtab->db; + jsonStringZero(&pCur->path); *ppCursor = &pCur->base; return SQLITE_OK; } @@ -205911,21 +207885,24 @@ static int jsonEachOpenTree(sqlite3_vtab *p, sqlite3_vtab_cursor **ppCursor){ /* Reset a JsonEachCursor back to its original state. Free any memory ** held. */ static void jsonEachCursorReset(JsonEachCursor *p){ - sqlite3_free(p->zRoot); jsonParseReset(&p->sParse); + jsonStringReset(&p->path); + sqlite3DbFree(p->db, p->aParent); p->iRowid = 0; p->i = 0; + p->aParent = 0; + p->nParent = 0; + p->nParentAlloc = 0; p->iEnd = 0; p->eType = 0; - p->zJson = 0; - p->zRoot = 0; } /* Destructor for a jsonEachCursor object */ static int jsonEachClose(sqlite3_vtab_cursor *cur){ JsonEachCursor *p = (JsonEachCursor*)cur; jsonEachCursorReset(p); - sqlite3_free(cur); + + sqlite3DbFree(p->db, cur); return SQLITE_OK; } @@ -205936,200 +207913,230 @@ static int jsonEachEof(sqlite3_vtab_cursor *cur){ return p->i >= p->iEnd; } -/* Advance the cursor to the next element for json_tree() */ -static int jsonEachNext(sqlite3_vtab_cursor *cur){ - JsonEachCursor *p = (JsonEachCursor*)cur; - if( p->bRecursive ){ - if( p->sParse.aNode[p->i].jnFlags & JNODE_LABEL ) p->i++; - p->i++; - p->iRowid++; - if( p->iiEnd ){ - u32 iUp = p->sParse.aUp[p->i]; - JsonNode *pUp = &p->sParse.aNode[iUp]; - p->eType = pUp->eType; - if( pUp->eType==JSON_ARRAY ){ - assert( pUp->eU==0 || pUp->eU==3 ); - testcase( pUp->eU==3 ); - VVA( pUp->eU = 3 ); - if( iUp==p->i-1 ){ - pUp->u.iKey = 0; - }else{ - pUp->u.iKey++; +/* +** If the cursor is currently pointing at the label of a object entry, +** then return the index of the value. For all other cases, return the +** current pointer position, which is the value. +*/ +static int jsonSkipLabel(JsonEachCursor *p){ + if( p->eType==JSONB_OBJECT ){ + u32 sz = 0; + u32 n = jsonbPayloadSize(&p->sParse, p->i, &sz); + return p->i + n + sz; + }else{ + return p->i; + } +} + +/* +** Append the path name for the current element. +*/ +static void jsonAppendPathName(JsonEachCursor *p){ + assert( p->nParent>0 ); + assert( p->eType==JSONB_ARRAY || p->eType==JSONB_OBJECT ); + if( p->eType==JSONB_ARRAY ){ + jsonPrintf(30, &p->path, "[%lld]", p->aParent[p->nParent-1].iKey); + }else{ + u32 n, sz = 0, k, i; + const char *z; + int needQuote = 0; + n = jsonbPayloadSize(&p->sParse, p->i, &sz); + k = p->i + n; + z = (const char*)&p->sParse.aBlob[k]; + if( sz==0 || !sqlite3Isalpha(z[0]) ){ + needQuote = 1; + }else{ + for(i=0; ieType ){ - case JSON_ARRAY: { - p->i += jsonNodeSize(&p->sParse.aNode[p->i]); - p->iRowid++; - break; - } - case JSON_OBJECT: { - p->i += 1 + jsonNodeSize(&p->sParse.aNode[p->i+1]); - p->iRowid++; - break; - } - default: { - p->i = p->iEnd; - break; - } + if( needQuote ){ + jsonPrintf(sz+4,&p->path,".\"%.*s\"", sz, z); + }else{ + jsonPrintf(sz+2,&p->path,".%.*s", sz, z); } } - return SQLITE_OK; } -/* Append an object label to the JSON Path being constructed -** in pStr. -*/ -static void jsonAppendObjectPathElement( - JsonString *pStr, - JsonNode *pNode -){ - int jj, nn; - const char *z; - assert( pNode->eType==JSON_STRING ); - assert( pNode->jnFlags & JNODE_LABEL ); - assert( pNode->eU==1 ); - z = pNode->u.zJContent; - nn = pNode->n; - if( (pNode->jnFlags & JNODE_RAW)==0 ){ - assert( nn>=2 ); - assert( z[0]=='"' || z[0]=='\'' ); - assert( z[nn-1]=='"' || z[0]=='\'' ); - if( nn>2 && sqlite3Isalpha(z[1]) ){ - for(jj=2; jjbRecursive ){ + u8 x; + u8 levelChange = 0; + u32 n, sz = 0; + u32 i = jsonSkipLabel(p); + x = p->sParse.aBlob[i] & 0x0f; + n = jsonbPayloadSize(&p->sParse, i, &sz); + if( x==JSONB_OBJECT || x==JSONB_ARRAY ){ + JsonParent *pParent; + if( p->nParent>=p->nParentAlloc ){ + JsonParent *pNew; + u64 nNew; + nNew = p->nParentAlloc*2 + 3; + pNew = sqlite3DbRealloc(p->db, p->aParent, sizeof(JsonParent)*nNew); + if( pNew==0 ) return SQLITE_NOMEM; + p->nParentAlloc = (u32)nNew; + p->aParent = pNew; + } + levelChange = 1; + pParent = &p->aParent[p->nParent]; + pParent->iHead = p->i; + pParent->iValue = i; + pParent->iEnd = i + n + sz; + pParent->iKey = -1; + pParent->nPath = (u32)p->path.nUsed; + if( p->eType && p->nParent ){ + jsonAppendPathName(p); + if( p->path.eErr ) rc = SQLITE_NOMEM; + } + p->nParent++; + p->i = i + n; + }else{ + p->i = i + n + sz; + } + while( p->nParent>0 && p->i >= p->aParent[p->nParent-1].iEnd ){ + p->nParent--; + p->path.nUsed = p->aParent[p->nParent].nPath; + levelChange = 1; + } + if( levelChange ){ + if( p->nParent>0 ){ + JsonParent *pParent = &p->aParent[p->nParent-1]; + u32 iVal = pParent->iValue; + p->eType = p->sParse.aBlob[iVal] & 0x0f; + }else{ + p->eType = 0; } } + }else{ + u32 n, sz = 0; + u32 i = jsonSkipLabel(p); + n = jsonbPayloadSize(&p->sParse, i, &sz); + p->i = i + n + sz; + } + if( p->eType==JSONB_ARRAY && p->nParent ){ + p->aParent[p->nParent-1].iKey++; } - jsonPrintf(nn+2, pStr, ".%.*s", nn, z); + p->iRowid++; + return rc; } -/* Append the name of the path for element i to pStr +/* Length of the path for rowid==0 in bRecursive mode. */ -static void jsonEachComputePath( - JsonEachCursor *p, /* The cursor */ - JsonString *pStr, /* Write the path here */ - u32 i /* Path to this element */ -){ - JsonNode *pNode, *pUp; - u32 iUp; - if( i==0 ){ - jsonAppendChar(pStr, '$'); - return; - } - iUp = p->sParse.aUp[i]; - jsonEachComputePath(p, pStr, iUp); - pNode = &p->sParse.aNode[i]; - pUp = &p->sParse.aNode[iUp]; - if( pUp->eType==JSON_ARRAY ){ - assert( pUp->eU==3 || (pUp->eU==0 && pUp->u.iKey==0) ); - testcase( pUp->eU==0 ); - jsonPrintf(30, pStr, "[%d]", pUp->u.iKey); - }else{ - assert( pUp->eType==JSON_OBJECT ); - if( (pNode->jnFlags & JNODE_LABEL)==0 ) pNode--; - jsonAppendObjectPathElement(pStr, pNode); +static int jsonEachPathLength(JsonEachCursor *p){ + u32 n = p->path.nUsed; + char *z = p->path.zBuf; + if( p->iRowid==0 && p->bRecursive && n>=2 ){ + while( n>1 ){ + n--; + if( z[n]=='[' || z[n]=='.' ){ + u32 x, sz = 0; + char cSaved = z[n]; + z[n] = 0; + assert( p->sParse.eEdit==0 ); + x = jsonLookupStep(&p->sParse, 0, z+1, 0); + z[n] = cSaved; + if( JSON_LOOKUP_ISERROR(x) ) continue; + if( x + jsonbPayloadSize(&p->sParse, x, &sz) == p->i ) break; + } + } } + return n; } /* Return the value of a column */ static int jsonEachColumn( sqlite3_vtab_cursor *cur, /* The cursor */ sqlite3_context *ctx, /* First argument to sqlite3_result_...() */ - int i /* Which column to return */ + int iColumn /* Which column to return */ ){ JsonEachCursor *p = (JsonEachCursor*)cur; - JsonNode *pThis = &p->sParse.aNode[p->i]; - switch( i ){ + switch( iColumn ){ case JEACH_KEY: { - if( p->i==0 ) break; - if( p->eType==JSON_OBJECT ){ - jsonReturn(&p->sParse, pThis, ctx); - }else if( p->eType==JSON_ARRAY ){ - u32 iKey; - if( p->bRecursive ){ - if( p->iRowid==0 ) break; - assert( p->sParse.aNode[p->sParse.aUp[p->i]].eU==3 ); - iKey = p->sParse.aNode[p->sParse.aUp[p->i]].u.iKey; + if( p->nParent==0 ){ + u32 n, j; + if( p->nRoot==1 ) break; + j = jsonEachPathLength(p); + n = p->nRoot - j; + if( n==0 ){ + break; + }else if( p->path.zBuf[j]=='[' ){ + i64 x; + sqlite3Atoi64(&p->path.zBuf[j+1], &x, n-1, SQLITE_UTF8); + sqlite3_result_int64(ctx, x); + }else if( p->path.zBuf[j+1]=='"' ){ + sqlite3_result_text(ctx, &p->path.zBuf[j+2], n-3, SQLITE_TRANSIENT); }else{ - iKey = p->iRowid; + sqlite3_result_text(ctx, &p->path.zBuf[j+1], n-1, SQLITE_TRANSIENT); } - sqlite3_result_int64(ctx, (sqlite3_int64)iKey); + break; + } + if( p->eType==JSONB_OBJECT ){ + jsonReturnFromBlob(&p->sParse, p->i, ctx, 1); + }else{ + assert( p->eType==JSONB_ARRAY ); + sqlite3_result_int64(ctx, p->aParent[p->nParent-1].iKey); } break; } case JEACH_VALUE: { - if( pThis->jnFlags & JNODE_LABEL ) pThis++; - jsonReturn(&p->sParse, pThis, ctx); + u32 i = jsonSkipLabel(p); + jsonReturnFromBlob(&p->sParse, i, ctx, 1); break; } case JEACH_TYPE: { - if( pThis->jnFlags & JNODE_LABEL ) pThis++; - sqlite3_result_text(ctx, jsonType[pThis->eType], -1, SQLITE_STATIC); + u32 i = jsonSkipLabel(p); + u8 eType = p->sParse.aBlob[i] & 0x0f; + sqlite3_result_text(ctx, jsonbType[eType], -1, SQLITE_STATIC); break; } case JEACH_ATOM: { - if( pThis->jnFlags & JNODE_LABEL ) pThis++; - if( pThis->eType>=JSON_ARRAY ) break; - jsonReturn(&p->sParse, pThis, ctx); + u32 i = jsonSkipLabel(p); + if( (p->sParse.aBlob[i] & 0x0f)sParse, i, ctx, 1); + } break; } case JEACH_ID: { - sqlite3_result_int64(ctx, - (sqlite3_int64)p->i + ((pThis->jnFlags & JNODE_LABEL)!=0)); + sqlite3_result_int64(ctx, (sqlite3_int64)p->i); break; } case JEACH_PARENT: { - if( p->i>p->iBegin && p->bRecursive ){ - sqlite3_result_int64(ctx, (sqlite3_int64)p->sParse.aUp[p->i]); + if( p->nParent>0 && p->bRecursive ){ + sqlite3_result_int64(ctx, p->aParent[p->nParent-1].iHead); } break; } case JEACH_FULLKEY: { - JsonString x; - jsonInit(&x, ctx); - if( p->bRecursive ){ - jsonEachComputePath(p, &x, p->i); - }else{ - if( p->zRoot ){ - jsonAppendRaw(&x, p->zRoot, (int)strlen(p->zRoot)); - }else{ - jsonAppendChar(&x, '$'); - } - if( p->eType==JSON_ARRAY ){ - jsonPrintf(30, &x, "[%d]", p->iRowid); - }else if( p->eType==JSON_OBJECT ){ - jsonAppendObjectPathElement(&x, pThis); - } - } - jsonResult(&x); + u64 nBase = p->path.nUsed; + if( p->nParent ) jsonAppendPathName(p); + sqlite3_result_text64(ctx, p->path.zBuf, p->path.nUsed, + SQLITE_TRANSIENT, SQLITE_UTF8); + p->path.nUsed = nBase; break; } case JEACH_PATH: { - if( p->bRecursive ){ - JsonString x; - jsonInit(&x, ctx); - jsonEachComputePath(p, &x, p->sParse.aUp[p->i]); - jsonResult(&x); - break; - } - /* For json_each() path and root are the same so fall through - ** into the root case */ - /* no break */ deliberate_fall_through + u32 n = jsonEachPathLength(p); + sqlite3_result_text64(ctx, p->path.zBuf, n, + SQLITE_TRANSIENT, SQLITE_UTF8); + break; } default: { - const char *zRoot = p->zRoot; - if( zRoot==0 ) zRoot = "$"; - sqlite3_result_text(ctx, zRoot, -1, SQLITE_STATIC); + sqlite3_result_text(ctx, p->path.zBuf, p->nRoot, SQLITE_STATIC); break; } case JEACH_JSON: { - assert( i==JEACH_JSON ); - sqlite3_result_text(ctx, p->sParse.zJson, -1, SQLITE_STATIC); + if( p->sParse.zJson==0 ){ + sqlite3_result_blob(ctx, p->sParse.aBlob, p->sParse.nBlob, + SQLITE_STATIC); + }else{ + sqlite3_result_text(ctx, p->sParse.zJson, -1, SQLITE_STATIC); + } break; } } @@ -206220,86 +208227,97 @@ static int jsonEachFilter( int argc, sqlite3_value **argv ){ JsonEachCursor *p = (JsonEachCursor*)cur; - const char *z; const char *zRoot = 0; - sqlite3_int64 n; + u32 i, n, sz; UNUSED_PARAMETER(idxStr); UNUSED_PARAMETER(argc); jsonEachCursorReset(p); if( idxNum==0 ) return SQLITE_OK; - z = (const char*)sqlite3_value_text(argv[0]); - if( z==0 ) return SQLITE_OK; memset(&p->sParse, 0, sizeof(p->sParse)); p->sParse.nJPRef = 1; - if( sqlite3ValueIsOfClass(argv[0], sqlite3RCStrUnref) ){ - p->sParse.zJson = sqlite3RCStrRef((char*)z); - }else{ - n = sqlite3_value_bytes(argv[0]); - p->sParse.zJson = sqlite3RCStrNew( n+1 ); - if( p->sParse.zJson==0 ) return SQLITE_NOMEM; - memcpy(p->sParse.zJson, z, (size_t)n+1); - } - p->sParse.bJsonIsRCStr = 1; - p->zJson = p->sParse.zJson; - if( jsonParse(&p->sParse, 0) ){ - int rc = SQLITE_NOMEM; - if( p->sParse.oom==0 ){ - sqlite3_free(cur->pVtab->zErrMsg); - cur->pVtab->zErrMsg = sqlite3_mprintf("malformed JSON"); - if( cur->pVtab->zErrMsg ) rc = SQLITE_ERROR; + p->sParse.db = p->db; + if( jsonFuncArgMightBeBinary(argv[0]) ){ + p->sParse.nBlob = sqlite3_value_bytes(argv[0]); + p->sParse.aBlob = (u8*)sqlite3_value_blob(argv[0]); + }else{ + p->sParse.zJson = (char*)sqlite3_value_text(argv[0]); + p->sParse.nJson = sqlite3_value_bytes(argv[0]); + if( p->sParse.zJson==0 ){ + p->i = p->iEnd = 0; + return SQLITE_OK; } - jsonEachCursorReset(p); - return rc; - }else if( p->bRecursive && jsonParseFindParents(&p->sParse) ){ - jsonEachCursorReset(p); - return SQLITE_NOMEM; - }else{ - JsonNode *pNode = 0; - if( idxNum==3 ){ - const char *zErr = 0; - zRoot = (const char*)sqlite3_value_text(argv[1]); - if( zRoot==0 ) return SQLITE_OK; - n = sqlite3_value_bytes(argv[1]); - p->zRoot = sqlite3_malloc64( n+1 ); - if( p->zRoot==0 ) return SQLITE_NOMEM; - memcpy(p->zRoot, zRoot, (size_t)n+1); - if( zRoot[0]!='$' ){ - zErr = zRoot; - }else{ - pNode = jsonLookupStep(&p->sParse, 0, p->zRoot+1, 0, &zErr); + if( jsonConvertTextToBlob(&p->sParse, 0) ){ + if( p->sParse.oom ){ + return SQLITE_NOMEM; } - if( zErr ){ + goto json_each_malformed_input; + } + } + if( idxNum==3 ){ + zRoot = (const char*)sqlite3_value_text(argv[1]); + if( zRoot==0 ) return SQLITE_OK; + if( zRoot[0]!='$' ){ + sqlite3_free(cur->pVtab->zErrMsg); + cur->pVtab->zErrMsg = jsonBadPathError(0, zRoot); + jsonEachCursorReset(p); + return cur->pVtab->zErrMsg ? SQLITE_ERROR : SQLITE_NOMEM; + } + p->nRoot = sqlite3Strlen30(zRoot); + if( zRoot[1]==0 ){ + i = p->i = 0; + p->eType = 0; + }else{ + i = jsonLookupStep(&p->sParse, 0, zRoot+1, 0); + if( JSON_LOOKUP_ISERROR(i) ){ + if( i==JSON_LOOKUP_NOTFOUND ){ + p->i = 0; + p->eType = 0; + p->iEnd = 0; + return SQLITE_OK; + } sqlite3_free(cur->pVtab->zErrMsg); - cur->pVtab->zErrMsg = jsonPathSyntaxError(zErr); + cur->pVtab->zErrMsg = jsonBadPathError(0, zRoot); jsonEachCursorReset(p); return cur->pVtab->zErrMsg ? SQLITE_ERROR : SQLITE_NOMEM; - }else if( pNode==0 ){ - return SQLITE_OK; } - }else{ - pNode = p->sParse.aNode; - } - p->iBegin = p->i = (int)(pNode - p->sParse.aNode); - p->eType = pNode->eType; - if( p->eType>=JSON_ARRAY ){ - assert( pNode->eU==0 ); - VVA( pNode->eU = 3 ); - pNode->u.iKey = 0; - p->iEnd = p->i + pNode->n + 1; - if( p->bRecursive ){ - p->eType = p->sParse.aNode[p->sParse.aUp[p->i]].eType; - if( p->i>0 && (p->sParse.aNode[p->i-1].jnFlags & JNODE_LABEL)!=0 ){ - p->i--; - } + if( p->sParse.iLabel ){ + p->i = p->sParse.iLabel; + p->eType = JSONB_OBJECT; }else{ - p->i++; - } - }else{ - p->iEnd = p->i+1; - } + p->i = i; + p->eType = JSONB_ARRAY; + } + } + jsonAppendRaw(&p->path, zRoot, p->nRoot); + }else{ + i = p->i = 0; + p->eType = 0; + p->nRoot = 1; + jsonAppendRaw(&p->path, "$", 1); + } + p->nParent = 0; + n = jsonbPayloadSize(&p->sParse, i, &sz); + p->iEnd = i+n+sz; + if( (p->sParse.aBlob[i] & 0x0f)>=JSONB_ARRAY && !p->bRecursive ){ + p->i = i + n; + p->eType = p->sParse.aBlob[i] & 0x0f; + p->aParent = sqlite3DbMallocZero(p->db, sizeof(JsonParent)); + if( p->aParent==0 ) return SQLITE_NOMEM; + p->nParent = 1; + p->nParentAlloc = 1; + p->aParent[0].iKey = 0; + p->aParent[0].iEnd = p->iEnd; + p->aParent[0].iHead = p->i; + p->aParent[0].iValue = i; } return SQLITE_OK; + +json_each_malformed_input: + sqlite3_free(cur->pVtab->zErrMsg); + cur->pVtab->zErrMsg = sqlite3_mprintf("malformed JSON"); + jsonEachCursorReset(p); + return cur->pVtab->zErrMsg ? SQLITE_ERROR : SQLITE_NOMEM; } /* The methods of the json_each virtual table */ @@ -206368,34 +208386,57 @@ static sqlite3_module jsonTreeModule = { SQLITE_PRIVATE void sqlite3RegisterJsonFunctions(void){ #ifndef SQLITE_OMIT_JSON static FuncDef aJsonFunc[] = { - JFUNCTION(json, 1, 0, jsonRemoveFunc), - JFUNCTION(json_array, -1, 0, jsonArrayFunc), - JFUNCTION(json_array_length, 1, 0, jsonArrayLengthFunc), - JFUNCTION(json_array_length, 2, 0, jsonArrayLengthFunc), - JFUNCTION(json_error_position,1, 0, jsonErrorFunc), - JFUNCTION(json_extract, -1, 0, jsonExtractFunc), - JFUNCTION(->, 2, JSON_JSON, jsonExtractFunc), - JFUNCTION(->>, 2, JSON_SQL, jsonExtractFunc), - JFUNCTION(json_insert, -1, 0, jsonSetFunc), - JFUNCTION(json_object, -1, 0, jsonObjectFunc), - JFUNCTION(json_patch, 2, 0, jsonPatchFunc), - JFUNCTION(json_quote, 1, 0, jsonQuoteFunc), - JFUNCTION(json_remove, -1, 0, jsonRemoveFunc), - JFUNCTION(json_replace, -1, 0, jsonReplaceFunc), - JFUNCTION(json_set, -1, JSON_ISSET, jsonSetFunc), - JFUNCTION(json_type, 1, 0, jsonTypeFunc), - JFUNCTION(json_type, 2, 0, jsonTypeFunc), - JFUNCTION(json_valid, 1, 0, jsonValidFunc), + /* sqlite3_result_subtype() ----, ,--- sqlite3_value_subtype() */ + /* | | */ + /* Uses cache ------, | | ,---- Returns JSONB */ + /* | | | | */ + /* Number of arguments ---, | | | | ,--- Flags */ + /* | | | | | | */ + JFUNCTION(json, 1,1,1, 0,0,0, jsonRemoveFunc), + JFUNCTION(jsonb, 1,1,0, 0,1,0, jsonRemoveFunc), + JFUNCTION(json_array, -1,0,1, 1,0,0, jsonArrayFunc), + JFUNCTION(jsonb_array, -1,0,1, 1,1,0, jsonArrayFunc), + JFUNCTION(json_array_length, 1,1,0, 0,0,0, jsonArrayLengthFunc), + JFUNCTION(json_array_length, 2,1,0, 0,0,0, jsonArrayLengthFunc), + JFUNCTION(json_error_position,1,1,0, 0,0,0, jsonErrorFunc), + JFUNCTION(json_extract, -1,1,1, 0,0,0, jsonExtractFunc), + JFUNCTION(jsonb_extract, -1,1,0, 0,1,0, jsonExtractFunc), + JFUNCTION(->, 2,1,1, 0,0,JSON_JSON, jsonExtractFunc), + JFUNCTION(->>, 2,1,0, 0,0,JSON_SQL, jsonExtractFunc), + JFUNCTION(json_insert, -1,1,1, 1,0,0, jsonSetFunc), + JFUNCTION(jsonb_insert, -1,1,0, 1,1,0, jsonSetFunc), + JFUNCTION(json_object, -1,0,1, 1,0,0, jsonObjectFunc), + JFUNCTION(jsonb_object, -1,0,1, 1,1,0, jsonObjectFunc), + JFUNCTION(json_patch, 2,1,1, 0,0,0, jsonPatchFunc), + JFUNCTION(jsonb_patch, 2,1,0, 0,1,0, jsonPatchFunc), + JFUNCTION(json_quote, 1,0,1, 1,0,0, jsonQuoteFunc), + JFUNCTION(json_remove, -1,1,1, 0,0,0, jsonRemoveFunc), + JFUNCTION(jsonb_remove, -1,1,0, 0,1,0, jsonRemoveFunc), + JFUNCTION(json_replace, -1,1,1, 1,0,0, jsonReplaceFunc), + JFUNCTION(jsonb_replace, -1,1,0, 1,1,0, jsonReplaceFunc), + JFUNCTION(json_set, -1,1,1, 1,0,JSON_ISSET, jsonSetFunc), + JFUNCTION(jsonb_set, -1,1,0, 1,1,JSON_ISSET, jsonSetFunc), + JFUNCTION(json_type, 1,1,0, 0,0,0, jsonTypeFunc), + JFUNCTION(json_type, 2,1,0, 0,0,0, jsonTypeFunc), + JFUNCTION(json_valid, 1,1,0, 0,0,0, jsonValidFunc), + JFUNCTION(json_valid, 2,1,0, 0,0,0, jsonValidFunc), #if SQLITE_DEBUG - JFUNCTION(json_parse, 1, 0, jsonParseFunc), - JFUNCTION(json_test1, 1, 0, jsonTest1Func), + JFUNCTION(json_parse, 1,1,0, 0,0,0, jsonParseFunc), #endif WAGGREGATE(json_group_array, 1, 0, 0, jsonArrayStep, jsonArrayFinal, jsonArrayValue, jsonGroupInverse, - SQLITE_SUBTYPE|SQLITE_UTF8|SQLITE_DETERMINISTIC), + SQLITE_SUBTYPE|SQLITE_RESULT_SUBTYPE|SQLITE_UTF8| + SQLITE_DETERMINISTIC), + WAGGREGATE(jsonb_group_array, 1, JSON_BLOB, 0, + jsonArrayStep, jsonArrayFinal, jsonArrayValue, jsonGroupInverse, + SQLITE_SUBTYPE|SQLITE_RESULT_SUBTYPE|SQLITE_UTF8|SQLITE_DETERMINISTIC), WAGGREGATE(json_group_object, 2, 0, 0, jsonObjectStep, jsonObjectFinal, jsonObjectValue, jsonGroupInverse, - SQLITE_SUBTYPE|SQLITE_UTF8|SQLITE_DETERMINISTIC) + SQLITE_SUBTYPE|SQLITE_RESULT_SUBTYPE|SQLITE_UTF8|SQLITE_DETERMINISTIC), + WAGGREGATE(jsonb_group_object,2, JSON_BLOB, 0, + jsonObjectStep, jsonObjectFinal, jsonObjectValue, jsonGroupInverse, + SQLITE_SUBTYPE|SQLITE_RESULT_SUBTYPE|SQLITE_UTF8| + SQLITE_DETERMINISTIC) }; sqlite3InsertBuiltinFuncs(aJsonFunc, ArraySize(aJsonFunc)); #endif @@ -207143,7 +209184,7 @@ static int nodeAcquire( ** increase its reference count and return it. */ if( (pNode = nodeHashLookup(pRtree, iNode))!=0 ){ - if( pParent && pParent!=pNode->pParent ){ + if( pParent && ALWAYS(pParent!=pNode->pParent) ){ RTREE_IS_CORRUPT(pRtree); return SQLITE_CORRUPT_VTAB; } @@ -209878,7 +211919,7 @@ static int rtreeSqlInit( } sqlite3_free(zSql); } - if( pRtree->nAux ){ + if( pRtree->nAux && rc!=SQLITE_NOMEM ){ pRtree->zReadAuxSql = sqlite3_mprintf( "SELECT * FROM \"%w\".\"%w_rowid\" WHERE rowid=?1", zDb, zPrefix); @@ -210567,15 +212608,13 @@ static int rtreeCheckTable( check.zTab = zTab; /* Find the number of auxiliary columns */ - if( check.rc==SQLITE_OK ){ - pStmt = rtreeCheckPrepare(&check, "SELECT * FROM %Q.'%q_rowid'", zDb, zTab); - if( pStmt ){ - nAux = sqlite3_column_count(pStmt) - 2; - sqlite3_finalize(pStmt); - }else - if( check.rc!=SQLITE_NOMEM ){ - check.rc = SQLITE_OK; - } + pStmt = rtreeCheckPrepare(&check, "SELECT * FROM %Q.'%q_rowid'", zDb, zTab); + if( pStmt ){ + nAux = sqlite3_column_count(pStmt) - 2; + sqlite3_finalize(pStmt); + }else + if( check.rc!=SQLITE_NOMEM ){ + check.rc = SQLITE_OK; } /* Find number of dimensions in the rtree table. */ @@ -210630,6 +212669,7 @@ static int rtreeIntegrity( if( rc==SQLITE_OK && *pzErr ){ *pzErr = sqlite3_mprintf("In RTree %s.%s:\n%z", pRtree->zDb, pRtree->zName, *pzErr); + if( (*pzErr)==0 ) rc = SQLITE_NOMEM; } return rc; } @@ -223300,9 +225340,7 @@ SQLITE_API void sqlite3session_delete(sqlite3_session *pSession){ ** associated hash-tables. */ sessionDeleteTable(pSession, pSession->pTable); - /* Assert that all allocations have been freed and then free the - ** session object itself. */ - // assert( pSession->nMalloc==0 ); + /* Free the session object. */ sqlite3_free(pSession); } @@ -227515,8 +229553,11 @@ struct Fts5PhraseIter { ** created with the "columnsize=0" option. ** ** xColumnText: -** This function attempts to retrieve the text of column iCol of the -** current document. If successful, (*pz) is set to point to a buffer +** If parameter iCol is less than zero, or greater than or equal to the +** number of columns in the table, SQLITE_RANGE is returned. +** +** Otherwise, this function attempts to retrieve the text of column iCol of +** the current document. If successful, (*pz) is set to point to a buffer ** containing the text in utf-8 encoding, (*pn) is set to the size in bytes ** (not characters) of the buffer and SQLITE_OK is returned. Otherwise, ** if an error occurs, an SQLite error code is returned and the final values @@ -227526,8 +229567,10 @@ struct Fts5PhraseIter { ** Returns the number of phrases in the current query expression. ** ** xPhraseSize: -** Returns the number of tokens in phrase iPhrase of the query. Phrases -** are numbered starting from zero. +** If parameter iCol is less than zero, or greater than or equal to the +** number of phrases in the current query, as returned by xPhraseCount, +** 0 is returned. Otherwise, this function returns the number of tokens in +** phrase iPhrase of the query. Phrases are numbered starting from zero. ** ** xInstCount: ** Set *pnInst to the total number of occurrences of all phrases within @@ -227543,12 +229586,13 @@ struct Fts5PhraseIter { ** Query for the details of phrase match iIdx within the current row. ** Phrase matches are numbered starting from zero, so the iIdx argument ** should be greater than or equal to zero and smaller than the value -** output by xInstCount(). +** output by xInstCount(). If iIdx is less than zero or greater than +** or equal to the value returned by xInstCount(), SQLITE_RANGE is returned. ** -** Usually, output parameter *piPhrase is set to the phrase number, *piCol +** Otherwise, output parameter *piPhrase is set to the phrase number, *piCol ** to the column in which it occurs and *piOff the token offset of the -** first token of the phrase. Returns SQLITE_OK if successful, or an error -** code (i.e. SQLITE_NOMEM) if an error occurs. +** first token of the phrase. SQLITE_OK is returned if successful, or an +** error code (i.e. SQLITE_NOMEM) if an error occurs. ** ** This API can be quite slow if used with an FTS5 table created with the ** "detail=none" or "detail=column" option. @@ -227574,6 +229618,10 @@ struct Fts5PhraseIter { ** Invoking Api.xUserData() returns a copy of the pointer passed as ** the third argument to pUserData. ** +** If parameter iPhrase is less than zero, or greater than or equal to +** the number of phrases in the query, as returned by xPhraseCount(), +** this function returns SQLITE_RANGE. +** ** If the callback function returns any value other than SQLITE_OK, the ** query is abandoned and the xQueryPhrase function returns immediately. ** If the returned value is SQLITE_DONE, xQueryPhrase returns SQLITE_OK. @@ -227688,9 +229736,42 @@ struct Fts5PhraseIter { ** ** xPhraseNextColumn() ** See xPhraseFirstColumn above. +** +** xQueryToken(pFts5, iPhrase, iToken, ppToken, pnToken) +** This is used to access token iToken of phrase iPhrase of the current +** query. Before returning, output parameter *ppToken is set to point +** to a buffer containing the requested token, and *pnToken to the +** size of this buffer in bytes. +** +** If iPhrase or iToken are less than zero, or if iPhrase is greater than +** or equal to the number of phrases in the query as reported by +** xPhraseCount(), or if iToken is equal to or greater than the number of +** tokens in the phrase, SQLITE_RANGE is returned and *ppToken and *pnToken + are both zeroed. +** +** The output text is not a copy of the query text that specified the +** token. It is the output of the tokenizer module. For tokendata=1 +** tables, this includes any embedded 0x00 and trailing data. +** +** xInstToken(pFts5, iIdx, iToken, ppToken, pnToken) +** This is used to access token iToken of phrase hit iIdx within the +** current row. If iIdx is less than zero or greater than or equal to the +** value returned by xInstCount(), SQLITE_RANGE is returned. Otherwise, +** output variable (*ppToken) is set to point to a buffer containing the +** matching document token, and (*pnToken) to the size of that buffer in +** bytes. This API is not available if the specified token matches a +** prefix query term. In that case both output variables are always set +** to 0. +** +** The output text is not a copy of the document text that was tokenized. +** It is the output of the tokenizer module. For tokendata=1 tables, this +** includes any embedded 0x00 and trailing data. +** +** This API can be quite slow if used with an FTS5 table created with the +** "detail=none" or "detail=column" option. */ struct Fts5ExtensionApi { - int iVersion; /* Currently always set to 2 */ + int iVersion; /* Currently always set to 3 */ void *(*xUserData)(Fts5Context*); @@ -227725,6 +229806,13 @@ struct Fts5ExtensionApi { int (*xPhraseFirstColumn)(Fts5Context*, int iPhrase, Fts5PhraseIter*, int*); void (*xPhraseNextColumn)(Fts5Context*, Fts5PhraseIter*, int *piCol); + + /* Below this point are iVersion>=3 only */ + int (*xQueryToken)(Fts5Context*, + int iPhrase, int iToken, + const char **ppToken, int *pnToken + ); + int (*xInstToken)(Fts5Context*, int iIdx, int iToken, const char**, int*); }; /* @@ -228199,6 +230287,7 @@ struct Fts5Config { char *zContent; /* content table */ char *zContentRowid; /* "content_rowid=" option value */ int bColumnsize; /* "columnsize=" option value (dflt==1) */ + int bTokendata; /* "tokendata=" option value (dflt==0) */ int eDetail; /* FTS5_DETAIL_XXX value */ char *zContentExprlist; Fts5Tokenizer *pTok; @@ -228387,17 +230476,19 @@ struct Fts5IndexIter { /* ** Values used as part of the flags argument passed to IndexQuery(). */ -#define FTS5INDEX_QUERY_PREFIX 0x0001 /* Prefix query */ -#define FTS5INDEX_QUERY_DESC 0x0002 /* Docs in descending rowid order */ -#define FTS5INDEX_QUERY_TEST_NOIDX 0x0004 /* Do not use prefix index */ -#define FTS5INDEX_QUERY_SCAN 0x0008 /* Scan query (fts5vocab) */ +#define FTS5INDEX_QUERY_PREFIX 0x0001 /* Prefix query */ +#define FTS5INDEX_QUERY_DESC 0x0002 /* Docs in descending rowid order */ +#define FTS5INDEX_QUERY_TEST_NOIDX 0x0004 /* Do not use prefix index */ +#define FTS5INDEX_QUERY_SCAN 0x0008 /* Scan query (fts5vocab) */ /* The following are used internally by the fts5_index.c module. They are ** defined here only to make it easier to avoid clashes with the flags ** above. */ -#define FTS5INDEX_QUERY_SKIPEMPTY 0x0010 -#define FTS5INDEX_QUERY_NOOUTPUT 0x0020 -#define FTS5INDEX_QUERY_SKIPHASH 0x0040 +#define FTS5INDEX_QUERY_SKIPEMPTY 0x0010 +#define FTS5INDEX_QUERY_NOOUTPUT 0x0020 +#define FTS5INDEX_QUERY_SKIPHASH 0x0040 +#define FTS5INDEX_QUERY_NOTOKENDATA 0x0080 +#define FTS5INDEX_QUERY_SCANONETERM 0x0100 /* ** Create/destroy an Fts5Index object. @@ -228466,6 +230557,10 @@ static void *sqlite3Fts5StructureRef(Fts5Index*); static void sqlite3Fts5StructureRelease(void*); static int sqlite3Fts5StructureTest(Fts5Index*, void*); +/* +** Used by xInstToken(): +*/ +static int sqlite3Fts5IterToken(Fts5IndexIter*, i64, int, int, const char**, int*); /* ** Insert or remove data to or from the index. Each time a document is @@ -228543,6 +230638,13 @@ static int sqlite3Fts5IndexLoadConfig(Fts5Index *p); static int sqlite3Fts5IndexGetOrigin(Fts5Index *p, i64 *piOrigin); static int sqlite3Fts5IndexContentlessDelete(Fts5Index *p, i64 iOrigin, i64 iRowid); +static void sqlite3Fts5IndexIterClearTokendata(Fts5IndexIter*); + +/* Used to populate hash tables for xInstToken in detail=none/column mode. */ +static int sqlite3Fts5IndexIterWriteTokendata( + Fts5IndexIter*, const char*, int, i64 iRowid, int iCol, int iOff +); + /* ** End of interface to code in fts5_index.c. **************************************************************************/ @@ -228648,6 +230750,7 @@ static void sqlite3Fts5HashScanNext(Fts5Hash*); static int sqlite3Fts5HashScanEof(Fts5Hash*); static void sqlite3Fts5HashScanEntry(Fts5Hash *, const char **pzTerm, /* OUT: term (nul-terminated) */ + int *pnTerm, /* OUT: Size of term in bytes */ const u8 **ppDoclist, /* OUT: pointer to doclist */ int *pnDoclist /* OUT: size of doclist in bytes */ ); @@ -228774,6 +230877,10 @@ static int sqlite3Fts5ExprClonePhrase(Fts5Expr*, int, Fts5Expr**); static int sqlite3Fts5ExprPhraseCollist(Fts5Expr *, int, const u8 **, int *); +static int sqlite3Fts5ExprQueryToken(Fts5Expr*, int, int, const char**, int*); +static int sqlite3Fts5ExprInstToken(Fts5Expr*, i64, int, int, int, int, const char**, int*); +static void sqlite3Fts5ExprClearTokens(Fts5Expr*); + /******************************************* ** The fts5_expr.c API above this point is used by the other hand-written ** C code in this module. The interfaces below this point are called by @@ -230589,6 +232696,14 @@ static int fts5HighlightCb( } if( iPos==p->iRangeEnd ){ + if( p->bOpen ){ + if( p->iter.iStart>=0 && iPos>=p->iter.iStart ){ + fts5HighlightAppend(&rc, p, &p->zIn[p->iOff], iEndOff - p->iOff); + p->iOff = iEndOff; + } + fts5HighlightAppend(&rc, p, p->zClose, -1); + p->bOpen = 0; + } fts5HighlightAppend(&rc, p, &p->zIn[p->iOff], iEndOff - p->iOff); p->iOff = iEndOff; } @@ -230622,8 +232737,10 @@ static void fts5HighlightFunction( ctx.zClose = (const char*)sqlite3_value_text(apVal[2]); ctx.iRangeEnd = -1; rc = pApi->xColumnText(pFts, iCol, &ctx.zIn, &ctx.nIn); - - if( ctx.zIn ){ + if( rc==SQLITE_RANGE ){ + sqlite3_result_text(pCtx, "", -1, SQLITE_STATIC); + rc = SQLITE_OK; + }else if( ctx.zIn ){ if( rc==SQLITE_OK ){ rc = fts5CInstIterInit(pApi, pFts, iCol, &ctx.iter); } @@ -231190,6 +233307,7 @@ static void sqlite3Fts5BufferAppendBlob( ){ if( nData ){ if( fts5BufferGrow(pRc, pBuf, nData) ) return; + assert( pBuf->p!=0 ); memcpy(&pBuf->p[pBuf->n], pData, nData); pBuf->n += nData; } @@ -231291,6 +233409,7 @@ static int sqlite3Fts5PoslistNext64( i64 *piOff /* IN/OUT: Current offset */ ){ int i = *pi; + assert( a!=0 || i==0 ); if( i>=n ){ /* EOF */ *piOff = -1; @@ -231298,6 +233417,7 @@ static int sqlite3Fts5PoslistNext64( }else{ i64 iOff = *piOff; u32 iVal; + assert( a!=0 ); fts5FastGetVarint32(a, i, iVal); if( iVal<=1 ){ if( iVal==0 ){ @@ -231929,6 +234049,16 @@ static int fts5ConfigParseSpecial( return rc; } + if( sqlite3_strnicmp("tokendata", zCmd, nCmd)==0 ){ + if( (zArg[0]!='0' && zArg[0]!='1') || zArg[1]!='\0' ){ + *pzErr = sqlite3_mprintf("malformed tokendata=... directive"); + rc = SQLITE_ERROR; + }else{ + pConfig->bTokendata = (zArg[0]=='1'); + } + return rc; + } + *pzErr = sqlite3_mprintf("unrecognized option: \"%.*s\"", nCmd, zCmd); return SQLITE_ERROR; } @@ -232662,7 +234792,9 @@ struct Fts5ExprNode { struct Fts5ExprTerm { u8 bPrefix; /* True for a prefix term */ u8 bFirst; /* True if token must be first in column */ - char *zTerm; /* nul-terminated term */ + char *pTerm; /* Term data */ + int nQueryTerm; /* Effective size of term in bytes */ + int nFullTerm; /* Size of term in bytes incl. tokendata */ Fts5IndexIter *pIter; /* Iterator for this term */ Fts5ExprTerm *pSynonym; /* Pointer to first in list of synonyms */ }; @@ -233529,7 +235661,7 @@ static int fts5ExprNearInitAll( p->pIter = 0; } rc = sqlite3Fts5IndexQuery( - pExpr->pIndex, p->zTerm, (int)strlen(p->zTerm), + pExpr->pIndex, p->pTerm, p->nQueryTerm, (pTerm->bPrefix ? FTS5INDEX_QUERY_PREFIX : 0) | (pExpr->bDesc ? FTS5INDEX_QUERY_DESC : 0), pNear->pColset, @@ -234166,7 +236298,7 @@ static void fts5ExprPhraseFree(Fts5ExprPhrase *pPhrase){ Fts5ExprTerm *pSyn; Fts5ExprTerm *pNext; Fts5ExprTerm *pTerm = &pPhrase->aTerm[i]; - sqlite3_free(pTerm->zTerm); + sqlite3_free(pTerm->pTerm); sqlite3Fts5IterClose(pTerm->pIter); for(pSyn=pTerm->pSynonym; pSyn; pSyn=pNext){ pNext = pSyn->pSynonym; @@ -234264,6 +236396,7 @@ static Fts5ExprNearset *sqlite3Fts5ParseNearset( typedef struct TokenCtx TokenCtx; struct TokenCtx { Fts5ExprPhrase *pPhrase; + Fts5Config *pConfig; int rc; }; @@ -234297,8 +236430,12 @@ static int fts5ParseTokenize( rc = SQLITE_NOMEM; }else{ memset(pSyn, 0, (size_t)nByte); - pSyn->zTerm = ((char*)pSyn) + sizeof(Fts5ExprTerm) + sizeof(Fts5Buffer); - memcpy(pSyn->zTerm, pToken, nToken); + pSyn->pTerm = ((char*)pSyn) + sizeof(Fts5ExprTerm) + sizeof(Fts5Buffer); + pSyn->nFullTerm = pSyn->nQueryTerm = nToken; + if( pCtx->pConfig->bTokendata ){ + pSyn->nQueryTerm = (int)strlen(pSyn->pTerm); + } + memcpy(pSyn->pTerm, pToken, nToken); pSyn->pSynonym = pPhrase->aTerm[pPhrase->nTerm-1].pSynonym; pPhrase->aTerm[pPhrase->nTerm-1].pSynonym = pSyn; } @@ -234323,7 +236460,11 @@ static int fts5ParseTokenize( if( rc==SQLITE_OK ){ pTerm = &pPhrase->aTerm[pPhrase->nTerm++]; memset(pTerm, 0, sizeof(Fts5ExprTerm)); - pTerm->zTerm = sqlite3Fts5Strndup(&rc, pToken, nToken); + pTerm->pTerm = sqlite3Fts5Strndup(&rc, pToken, nToken); + pTerm->nFullTerm = pTerm->nQueryTerm = nToken; + if( pCtx->pConfig->bTokendata && rc==SQLITE_OK ){ + pTerm->nQueryTerm = (int)strlen(pTerm->pTerm); + } } } @@ -234390,6 +236531,7 @@ static Fts5ExprPhrase *sqlite3Fts5ParseTerm( memset(&sCtx, 0, sizeof(TokenCtx)); sCtx.pPhrase = pAppend; + sCtx.pConfig = pConfig; rc = fts5ParseStringFromToken(pToken, &z); if( rc==SQLITE_OK ){ @@ -234437,12 +236579,15 @@ static int sqlite3Fts5ExprClonePhrase( Fts5Expr **ppNew ){ int rc = SQLITE_OK; /* Return code */ - Fts5ExprPhrase *pOrig; /* The phrase extracted from pExpr */ + Fts5ExprPhrase *pOrig = 0; /* The phrase extracted from pExpr */ Fts5Expr *pNew = 0; /* Expression to return via *ppNew */ - TokenCtx sCtx = {0,0}; /* Context object for fts5ParseTokenize */ - - pOrig = pExpr->apExprPhrase[iPhrase]; - pNew = (Fts5Expr*)sqlite3Fts5MallocZero(&rc, sizeof(Fts5Expr)); + TokenCtx sCtx = {0,0,0}; /* Context object for fts5ParseTokenize */ + if( iPhrase<0 || iPhrase>=pExpr->nPhrase ){ + rc = SQLITE_RANGE; + }else{ + pOrig = pExpr->apExprPhrase[iPhrase]; + pNew = (Fts5Expr*)sqlite3Fts5MallocZero(&rc, sizeof(Fts5Expr)); + } if( rc==SQLITE_OK ){ pNew->apExprPhrase = (Fts5ExprPhrase**)sqlite3Fts5MallocZero(&rc, sizeof(Fts5ExprPhrase*)); @@ -234455,7 +236600,7 @@ static int sqlite3Fts5ExprClonePhrase( pNew->pRoot->pNear = (Fts5ExprNearset*)sqlite3Fts5MallocZero(&rc, sizeof(Fts5ExprNearset) + sizeof(Fts5ExprPhrase*)); } - if( rc==SQLITE_OK ){ + if( rc==SQLITE_OK && ALWAYS(pOrig!=0) ){ Fts5Colset *pColsetOrig = pOrig->pNode->pNear->pColset; if( pColsetOrig ){ sqlite3_int64 nByte; @@ -234469,26 +236614,27 @@ static int sqlite3Fts5ExprClonePhrase( } } - if( pOrig->nTerm ){ - int i; /* Used to iterate through phrase terms */ - for(i=0; rc==SQLITE_OK && inTerm; i++){ - int tflags = 0; - Fts5ExprTerm *p; - for(p=&pOrig->aTerm[i]; p && rc==SQLITE_OK; p=p->pSynonym){ - const char *zTerm = p->zTerm; - rc = fts5ParseTokenize((void*)&sCtx, tflags, zTerm, (int)strlen(zTerm), - 0, 0); - tflags = FTS5_TOKEN_COLOCATED; - } - if( rc==SQLITE_OK ){ - sCtx.pPhrase->aTerm[i].bPrefix = pOrig->aTerm[i].bPrefix; - sCtx.pPhrase->aTerm[i].bFirst = pOrig->aTerm[i].bFirst; + if( rc==SQLITE_OK ){ + if( pOrig->nTerm ){ + int i; /* Used to iterate through phrase terms */ + sCtx.pConfig = pExpr->pConfig; + for(i=0; rc==SQLITE_OK && inTerm; i++){ + int tflags = 0; + Fts5ExprTerm *p; + for(p=&pOrig->aTerm[i]; p && rc==SQLITE_OK; p=p->pSynonym){ + rc = fts5ParseTokenize((void*)&sCtx,tflags,p->pTerm,p->nFullTerm,0,0); + tflags = FTS5_TOKEN_COLOCATED; + } + if( rc==SQLITE_OK ){ + sCtx.pPhrase->aTerm[i].bPrefix = pOrig->aTerm[i].bPrefix; + sCtx.pPhrase->aTerm[i].bFirst = pOrig->aTerm[i].bFirst; + } } + }else{ + /* This happens when parsing a token or quoted phrase that contains + ** no token characters at all. (e.g ... MATCH '""'). */ + sCtx.pPhrase = sqlite3Fts5MallocZero(&rc, sizeof(Fts5ExprPhrase)); } - }else{ - /* This happens when parsing a token or quoted phrase that contains - ** no token characters at all. (e.g ... MATCH '""'). */ - sCtx.pPhrase = sqlite3Fts5MallocZero(&rc, sizeof(Fts5ExprPhrase)); } if( rc==SQLITE_OK && ALWAYS(sCtx.pPhrase) ){ @@ -234858,11 +237004,13 @@ static Fts5ExprNode *fts5ParsePhraseToAnd( if( parseGrowPhraseArray(pParse) ){ fts5ExprPhraseFree(pPhrase); }else{ + Fts5ExprTerm *p = &pNear->apPhrase[0]->aTerm[ii]; + Fts5ExprTerm *pTo = &pPhrase->aTerm[0]; pParse->apPhrase[pParse->nPhrase++] = pPhrase; pPhrase->nTerm = 1; - pPhrase->aTerm[0].zTerm = sqlite3Fts5Strndup( - &pParse->rc, pNear->apPhrase[0]->aTerm[ii].zTerm, -1 - ); + pTo->pTerm = sqlite3Fts5Strndup(&pParse->rc, p->pTerm, p->nFullTerm); + pTo->nQueryTerm = p->nQueryTerm; + pTo->nFullTerm = p->nFullTerm; pRet->apChild[ii] = sqlite3Fts5ParseNode(pParse, FTS5_STRING, 0, 0, sqlite3Fts5ParseNearset(pParse, 0, pPhrase) ); @@ -235047,16 +237195,17 @@ static char *fts5ExprTermPrint(Fts5ExprTerm *pTerm){ /* Determine the maximum amount of space required. */ for(p=pTerm; p; p=p->pSynonym){ - nByte += (int)strlen(pTerm->zTerm) * 2 + 3 + 2; + nByte += pTerm->nQueryTerm * 2 + 3 + 2; } zQuoted = sqlite3_malloc64(nByte); if( zQuoted ){ int i = 0; for(p=pTerm; p; p=p->pSynonym){ - char *zIn = p->zTerm; + char *zIn = p->pTerm; + char *zEnd = &zIn[p->nQueryTerm]; zQuoted[i++] = '"'; - while( *zIn ){ + while( zInnTerm; iTerm++){ - char *zTerm = pPhrase->aTerm[iTerm].zTerm; - zRet = fts5PrintfAppend(zRet, "%s%s", iTerm==0?"":" ", zTerm); + Fts5ExprTerm *p = &pPhrase->aTerm[iTerm]; + zRet = fts5PrintfAppend(zRet, "%s%.*s", iTerm==0?"":" ", + p->nQueryTerm, p->pTerm + ); if( pPhrase->aTerm[iTerm].bPrefix ){ zRet = fts5PrintfAppend(zRet, "*"); } @@ -235536,6 +237687,17 @@ static int fts5ExprColsetTest(Fts5Colset *pColset, int iCol){ return 0; } +/* +** pToken is a buffer nToken bytes in size that may or may not contain +** an embedded 0x00 byte. If it does, return the number of bytes in +** the buffer before the 0x00. If it does not, return nToken. +*/ +static int fts5QueryTerm(const char *pToken, int nToken){ + int ii; + for(ii=0; iipExpr; int i; + int nQuery = nToken; + i64 iRowid = pExpr->pRoot->iRowid; UNUSED_PARAM2(iUnused1, iUnused2); - if( nToken>FTS5_MAX_TOKEN_SIZE ) nToken = FTS5_MAX_TOKEN_SIZE; + if( nQuery>FTS5_MAX_TOKEN_SIZE ) nQuery = FTS5_MAX_TOKEN_SIZE; + if( pExpr->pConfig->bTokendata ){ + nQuery = fts5QueryTerm(pToken, nQuery); + } if( (tflags & FTS5_TOKEN_COLOCATED)==0 ) p->iOff++; for(i=0; inPhrase; i++){ - Fts5ExprTerm *pTerm; + Fts5ExprTerm *pT; if( p->aPopulator[i].bOk==0 ) continue; - for(pTerm=&pExpr->apExprPhrase[i]->aTerm[0]; pTerm; pTerm=pTerm->pSynonym){ - int nTerm = (int)strlen(pTerm->zTerm); - if( (nTerm==nToken || (nTermbPrefix)) - && memcmp(pTerm->zTerm, pToken, nTerm)==0 + for(pT=&pExpr->apExprPhrase[i]->aTerm[0]; pT; pT=pT->pSynonym){ + if( (pT->nQueryTerm==nQuery || (pT->nQueryTermbPrefix)) + && memcmp(pT->pTerm, pToken, pT->nQueryTerm)==0 ){ int rc = sqlite3Fts5PoslistWriterAppend( &pExpr->apExprPhrase[i]->poslist, &p->aPopulator[i].writer, p->iOff ); + if( rc==SQLITE_OK && pExpr->pConfig->bTokendata && !pT->bPrefix ){ + int iCol = p->iOff>>32; + int iTokOff = p->iOff & 0x7FFFFFFF; + rc = sqlite3Fts5IndexIterWriteTokendata( + pT->pIter, pToken, nToken, iRowid, iCol, iTokOff + ); + } if( rc ) return rc; break; } @@ -235698,6 +237871,83 @@ static int sqlite3Fts5ExprPhraseCollist( return rc; } +/* +** Does the work of the fts5_api.xQueryToken() API method. +*/ +static int sqlite3Fts5ExprQueryToken( + Fts5Expr *pExpr, + int iPhrase, + int iToken, + const char **ppOut, + int *pnOut +){ + Fts5ExprPhrase *pPhrase = 0; + + if( iPhrase<0 || iPhrase>=pExpr->nPhrase ){ + return SQLITE_RANGE; + } + pPhrase = pExpr->apExprPhrase[iPhrase]; + if( iToken<0 || iToken>=pPhrase->nTerm ){ + return SQLITE_RANGE; + } + + *ppOut = pPhrase->aTerm[iToken].pTerm; + *pnOut = pPhrase->aTerm[iToken].nFullTerm; + return SQLITE_OK; +} + +/* +** Does the work of the fts5_api.xInstToken() API method. +*/ +static int sqlite3Fts5ExprInstToken( + Fts5Expr *pExpr, + i64 iRowid, + int iPhrase, + int iCol, + int iOff, + int iToken, + const char **ppOut, + int *pnOut +){ + Fts5ExprPhrase *pPhrase = 0; + Fts5ExprTerm *pTerm = 0; + int rc = SQLITE_OK; + + if( iPhrase<0 || iPhrase>=pExpr->nPhrase ){ + return SQLITE_RANGE; + } + pPhrase = pExpr->apExprPhrase[iPhrase]; + if( iToken<0 || iToken>=pPhrase->nTerm ){ + return SQLITE_RANGE; + } + pTerm = &pPhrase->aTerm[iToken]; + if( pTerm->bPrefix==0 ){ + if( pExpr->pConfig->bTokendata ){ + rc = sqlite3Fts5IterToken( + pTerm->pIter, iRowid, iCol, iOff+iToken, ppOut, pnOut + ); + }else{ + *ppOut = pTerm->pTerm; + *pnOut = pTerm->nFullTerm; + } + } + return rc; +} + +/* +** Clear the token mappings for all Fts5IndexIter objects mannaged by +** the expression passed as the only argument. +*/ +static void sqlite3Fts5ExprClearTokens(Fts5Expr *pExpr){ + int ii; + for(ii=0; iinPhrase; ii++){ + Fts5ExprTerm *pT; + for(pT=&pExpr->apExprPhrase[ii]->aTerm[0]; pT; pT=pT->pSynonym){ + sqlite3Fts5IndexIterClearTokendata(pT->pIter); + } + } +} + /* ** 2014 August 11 ** @@ -235736,10 +237986,15 @@ struct Fts5Hash { /* ** Each entry in the hash table is represented by an object of the -** following type. Each object, its key (a nul-terminated string) and -** its current data are stored in a single memory allocation. The -** key immediately follows the object in memory. The position list -** data immediately follows the key data in memory. +** following type. Each object, its key, and its current data are stored +** in a single memory allocation. The key immediately follows the object +** in memory. The position list data immediately follows the key data +** in memory. +** +** The key is Fts5HashEntry.nKey bytes in size. It consists of a single +** byte identifying the index (either the main term index or a prefix-index), +** followed by the term data. For example: "0token". There is no +** nul-terminator - in this case nKey=6. ** ** The data that follows the key is in a similar, but not identical format ** to the doclist data stored in the database. It is: @@ -235874,8 +238129,7 @@ static int fts5HashResize(Fts5Hash *pHash){ unsigned int iHash; Fts5HashEntry *p = apOld[i]; apOld[i] = p->pHashNext; - iHash = fts5HashKey(nNew, (u8*)fts5EntryKey(p), - (int)strlen(fts5EntryKey(p))); + iHash = fts5HashKey(nNew, (u8*)fts5EntryKey(p), p->nKey); p->pHashNext = apNew[iHash]; apNew[iHash] = p; } @@ -235959,7 +238213,7 @@ static int sqlite3Fts5HashWrite( for(p=pHash->aSlot[iHash]; p; p=p->pHashNext){ char *zKey = fts5EntryKey(p); if( zKey[0]==bByte - && p->nKey==nToken + && p->nKey==nToken+1 && memcmp(&zKey[1], pToken, nToken)==0 ){ break; @@ -235989,9 +238243,9 @@ static int sqlite3Fts5HashWrite( zKey[0] = bByte; memcpy(&zKey[1], pToken, nToken); assert( iHash==fts5HashKey(pHash->nSlot, (u8*)zKey, nToken+1) ); - p->nKey = nToken; + p->nKey = nToken+1; zKey[nToken+1] = '\0'; - p->nData = nToken+1 + 1 + sizeof(Fts5HashEntry); + p->nData = nToken+1 + sizeof(Fts5HashEntry); p->pHashNext = pHash->aSlot[iHash]; pHash->aSlot[iHash] = p; pHash->nEntry++; @@ -236108,12 +238362,17 @@ static Fts5HashEntry *fts5HashEntryMerge( *ppOut = p1; p1 = 0; }else{ - int i = 0; char *zKey1 = fts5EntryKey(p1); char *zKey2 = fts5EntryKey(p2); - while( zKey1[i]==zKey2[i] ) i++; + int nMin = MIN(p1->nKey, p2->nKey); - if( ((u8)zKey1[i])>((u8)zKey2[i]) ){ + int cmp = memcmp(zKey1, zKey2, nMin); + if( cmp==0 ){ + cmp = p1->nKey - p2->nKey; + } + assert( cmp!=0 ); + + if( cmp>0 ){ /* p2 is smaller */ *ppOut = p2; ppOut = &p2->pScanNext; @@ -236132,10 +238391,8 @@ static Fts5HashEntry *fts5HashEntryMerge( } /* -** Extract all tokens from hash table iHash and link them into a list -** in sorted order. The hash table is cleared before returning. It is -** the responsibility of the caller to free the elements of the returned -** list. +** Link all tokens from hash table iHash into a list in sorted order. The +** tokens are not removed from the hash table. */ static int fts5HashEntrySort( Fts5Hash *pHash, @@ -236157,7 +238414,7 @@ static int fts5HashEntrySort( Fts5HashEntry *pIter; for(pIter=pHash->aSlot[iSlot]; pIter; pIter=pIter->pHashNext){ if( pTerm==0 - || (pIter->nKey+1>=nTerm && 0==memcmp(fts5EntryKey(pIter), pTerm, nTerm)) + || (pIter->nKey>=nTerm && 0==memcmp(fts5EntryKey(pIter), pTerm, nTerm)) ){ Fts5HashEntry *pEntry = pIter; pEntry->pScanNext = 0; @@ -236196,12 +238453,11 @@ static int sqlite3Fts5HashQuery( for(p=pHash->aSlot[iHash]; p; p=p->pHashNext){ zKey = fts5EntryKey(p); - assert( p->nKey+1==(int)strlen(zKey) ); - if( nTerm==p->nKey+1 && memcmp(zKey, pTerm, nTerm)==0 ) break; + if( nTerm==p->nKey && memcmp(zKey, pTerm, nTerm)==0 ) break; } if( p ){ - int nHashPre = sizeof(Fts5HashEntry) + nTerm + 1; + int nHashPre = sizeof(Fts5HashEntry) + nTerm; int nList = p->nData - nHashPre; u8 *pRet = (u8*)(*ppOut = sqlite3_malloc64(nPre + nList + 10)); if( pRet ){ @@ -236262,19 +238518,22 @@ static int sqlite3Fts5HashScanEof(Fts5Hash *p){ static void sqlite3Fts5HashScanEntry( Fts5Hash *pHash, const char **pzTerm, /* OUT: term (nul-terminated) */ + int *pnTerm, /* OUT: Size of term in bytes */ const u8 **ppDoclist, /* OUT: pointer to doclist */ int *pnDoclist /* OUT: size of doclist in bytes */ ){ Fts5HashEntry *p; if( (p = pHash->pScan) ){ char *zKey = fts5EntryKey(p); - int nTerm = (int)strlen(zKey); + int nTerm = p->nKey; fts5HashAddPoslistSize(pHash, p, 0); *pzTerm = zKey; - *ppDoclist = (const u8*)&zKey[nTerm+1]; - *pnDoclist = p->nData - (sizeof(Fts5HashEntry) + nTerm + 1); + *pnTerm = nTerm; + *ppDoclist = (const u8*)&zKey[nTerm]; + *pnDoclist = p->nData - (sizeof(Fts5HashEntry) + nTerm); }else{ *pzTerm = 0; + *pnTerm = 0; *ppDoclist = 0; *pnDoclist = 0; } @@ -236605,6 +238864,9 @@ typedef struct Fts5SegWriter Fts5SegWriter; typedef struct Fts5Structure Fts5Structure; typedef struct Fts5StructureLevel Fts5StructureLevel; typedef struct Fts5StructureSegment Fts5StructureSegment; +typedef struct Fts5TokenDataIter Fts5TokenDataIter; +typedef struct Fts5TokenDataMap Fts5TokenDataMap; +typedef struct Fts5TombstoneArray Fts5TombstoneArray; struct Fts5Data { u8 *p; /* Pointer to buffer containing record */ @@ -236639,6 +238901,7 @@ struct Fts5Index { /* Error state. */ int rc; /* Current error code */ + int flushRc; /* State used by the fts5DataXXX() functions. */ sqlite3_blob *pReader; /* RO incr-blob open on %_data table */ @@ -236647,6 +238910,7 @@ struct Fts5Index { sqlite3_stmt *pIdxWriter; /* "INSERT ... %_idx VALUES(?,?,?,?)" */ sqlite3_stmt *pIdxDeleter; /* "DELETE FROM %_idx WHERE segid=?" */ sqlite3_stmt *pIdxSelect; + sqlite3_stmt *pIdxNextSelect; int nRead; /* Total number of blocks read */ sqlite3_stmt *pDeleteFromIdx; @@ -236800,8 +239064,7 @@ struct Fts5SegIter { Fts5Data *pLeaf; /* Current leaf data */ Fts5Data *pNextLeaf; /* Leaf page (iLeafPgno+1) */ i64 iLeafOffset; /* Byte offset within current leaf */ - Fts5Data **apTombstone; /* Array of tombstone pages */ - int nTombstone; + Fts5TombstoneArray *pTombArray; /* Array of tombstone pages */ /* Next method */ void (*xNext)(Fts5Index*, Fts5SegIter*, int*); @@ -236828,6 +239091,15 @@ struct Fts5SegIter { u8 bDel; /* True if the delete flag is set */ }; +/* +** Array of tombstone pages. Reference counted. +*/ +struct Fts5TombstoneArray { + int nRef; /* Number of pointers to this object */ + int nTombstone; + Fts5Data *apTombstone[1]; /* Array of tombstone pages */ +}; + /* ** Argument is a pointer to an Fts5Data structure that contains a ** leaf page. @@ -236872,9 +239144,16 @@ struct Fts5SegIter { ** poslist: ** Used by sqlite3Fts5IterPoslist() when the poslist needs to be buffered. ** There is no way to tell if this is populated or not. +** +** pColset: +** If not NULL, points to an object containing a set of column indices. +** Only matches that occur in one of these columns will be returned. +** The Fts5Iter does not own the Fts5Colset object, and so it is not +** freed when the iterator is closed - it is owned by the upper layer. */ struct Fts5Iter { Fts5IndexIter base; /* Base class containing output vars */ + Fts5TokenDataIter *pTokenDataIter; Fts5Index *pIndex; /* Index that owns this iterator */ Fts5Buffer poslist; /* Buffer containing current poslist */ @@ -236892,7 +239171,6 @@ struct Fts5Iter { Fts5SegIter aSeg[1]; /* Array of segment iterators */ }; - /* ** An instance of the following type is used to iterate through the contents ** of a doclist-index record. @@ -237810,9 +240088,9 @@ static int fts5DlidxLvlNext(Fts5DlidxLvl *pLvl){ } if( iOffnn ){ - i64 iVal; + u64 iVal; pLvl->iLeafPgno += (iOff - pLvl->iOff) + 1; - iOff += fts5GetVarint(&pData->p[iOff], (u64*)&iVal); + iOff += fts5GetVarint(&pData->p[iOff], &iVal); pLvl->iRowid += iVal; pLvl->iOff = iOff; }else{ @@ -238191,18 +240469,20 @@ static void fts5SegIterSetNext(Fts5Index *p, Fts5SegIter *pIter){ } /* -** Allocate a tombstone hash page array (pIter->apTombstone) for the -** iterator passed as the second argument. If an OOM error occurs, leave -** an error in the Fts5Index object. +** Allocate a tombstone hash page array object (pIter->pTombArray) for +** the iterator passed as the second argument. If an OOM error occurs, +** leave an error in the Fts5Index object. */ static void fts5SegIterAllocTombstone(Fts5Index *p, Fts5SegIter *pIter){ const int nTomb = pIter->pSeg->nPgTombstone; if( nTomb>0 ){ - Fts5Data **apTomb = 0; - apTomb = (Fts5Data**)sqlite3Fts5MallocZero(&p->rc, sizeof(Fts5Data)*nTomb); - if( apTomb ){ - pIter->apTombstone = apTomb; - pIter->nTombstone = nTomb; + int nByte = nTomb * sizeof(Fts5Data*) + sizeof(Fts5TombstoneArray); + Fts5TombstoneArray *pNew; + pNew = (Fts5TombstoneArray*)sqlite3Fts5MallocZero(&p->rc, nByte); + if( pNew ){ + pNew->nTombstone = nTomb; + pNew->nRef = 1; + pIter->pTombArray = pNew; } } } @@ -238459,15 +240739,16 @@ static void fts5SegIterNext_None( }else{ const u8 *pList = 0; const char *zTerm = 0; + int nTerm = 0; int nList; sqlite3Fts5HashScanNext(p->pHash); - sqlite3Fts5HashScanEntry(p->pHash, &zTerm, &pList, &nList); + sqlite3Fts5HashScanEntry(p->pHash, &zTerm, &nTerm, &pList, &nList); if( pList==0 ) goto next_none_eof; pIter->pLeaf->p = (u8*)pList; pIter->pLeaf->nn = nList; pIter->pLeaf->szLeaf = nList; pIter->iEndofDoclist = nList; - sqlite3Fts5BufferSet(&p->rc,&pIter->term, (int)strlen(zTerm), (u8*)zTerm); + sqlite3Fts5BufferSet(&p->rc,&pIter->term, nTerm, (u8*)zTerm); pIter->iLeafOffset = fts5GetVarint(pList, (u64*)&pIter->iRowid); } @@ -238533,11 +240814,12 @@ static void fts5SegIterNext( }else if( pIter->pSeg==0 ){ const u8 *pList = 0; const char *zTerm = 0; + int nTerm = 0; int nList = 0; assert( (pIter->flags & FTS5_SEGITER_ONETERM) || pbNewTerm ); if( 0==(pIter->flags & FTS5_SEGITER_ONETERM) ){ sqlite3Fts5HashScanNext(p->pHash); - sqlite3Fts5HashScanEntry(p->pHash, &zTerm, &pList, &nList); + sqlite3Fts5HashScanEntry(p->pHash, &zTerm, &nTerm, &pList, &nList); } if( pList==0 ){ fts5DataRelease(pIter->pLeaf); @@ -238547,8 +240829,7 @@ static void fts5SegIterNext( pIter->pLeaf->nn = nList; pIter->pLeaf->szLeaf = nList; pIter->iEndofDoclist = nList+1; - sqlite3Fts5BufferSet(&p->rc, &pIter->term, (int)strlen(zTerm), - (u8*)zTerm); + sqlite3Fts5BufferSet(&p->rc, &pIter->term, nTerm, (u8*)zTerm); pIter->iLeafOffset = fts5GetVarint(pList, (u64*)&pIter->iRowid); *pbNewTerm = 1; } @@ -238934,7 +241215,7 @@ static void fts5SegIterSeekInit( fts5LeafSeek(p, bGe, pIter, pTerm, nTerm); } - if( p->rc==SQLITE_OK && bGe==0 ){ + if( p->rc==SQLITE_OK && (bGe==0 || (flags & FTS5INDEX_QUERY_SCANONETERM)) ){ pIter->flags |= FTS5_SEGITER_ONETERM; if( pIter->pLeaf ){ if( flags & FTS5INDEX_QUERY_DESC ){ @@ -238950,7 +241231,9 @@ static void fts5SegIterSeekInit( } fts5SegIterSetNext(p, pIter); - fts5SegIterAllocTombstone(p, pIter); + if( 0==(flags & FTS5INDEX_QUERY_SCANONETERM) ){ + fts5SegIterAllocTombstone(p, pIter); + } /* Either: ** @@ -238967,6 +241250,79 @@ static void fts5SegIterSeekInit( ); } + +/* +** SQL used by fts5SegIterNextInit() to find the page to open. +*/ +static sqlite3_stmt *fts5IdxNextStmt(Fts5Index *p){ + if( p->pIdxNextSelect==0 ){ + Fts5Config *pConfig = p->pConfig; + fts5IndexPrepareStmt(p, &p->pIdxNextSelect, sqlite3_mprintf( + "SELECT pgno FROM '%q'.'%q_idx' WHERE " + "segid=? AND term>? ORDER BY term ASC LIMIT 1", + pConfig->zDb, pConfig->zName + )); + + } + return p->pIdxNextSelect; +} + +/* +** This is similar to fts5SegIterSeekInit(), except that it initializes +** the segment iterator to point to the first term following the page +** with pToken/nToken on it. +*/ +static void fts5SegIterNextInit( + Fts5Index *p, + const char *pTerm, int nTerm, + Fts5StructureSegment *pSeg, /* Description of segment */ + Fts5SegIter *pIter /* Object to populate */ +){ + int iPg = -1; /* Page of segment to open */ + int bDlidx = 0; + sqlite3_stmt *pSel = 0; /* SELECT to find iPg */ + + pSel = fts5IdxNextStmt(p); + if( pSel ){ + assert( p->rc==SQLITE_OK ); + sqlite3_bind_int(pSel, 1, pSeg->iSegid); + sqlite3_bind_blob(pSel, 2, pTerm, nTerm, SQLITE_STATIC); + + if( sqlite3_step(pSel)==SQLITE_ROW ){ + i64 val = sqlite3_column_int64(pSel, 0); + iPg = (int)(val>>1); + bDlidx = (val & 0x0001); + } + p->rc = sqlite3_reset(pSel); + sqlite3_bind_null(pSel, 2); + if( p->rc ) return; + } + + memset(pIter, 0, sizeof(*pIter)); + pIter->pSeg = pSeg; + pIter->flags |= FTS5_SEGITER_ONETERM; + if( iPg>=0 ){ + pIter->iLeafPgno = iPg - 1; + fts5SegIterNextPage(p, pIter); + fts5SegIterSetNext(p, pIter); + } + if( pIter->pLeaf ){ + const u8 *a = pIter->pLeaf->p; + int iTermOff = 0; + + pIter->iPgidxOff = pIter->pLeaf->szLeaf; + pIter->iPgidxOff += fts5GetVarint32(&a[pIter->iPgidxOff], iTermOff); + pIter->iLeafOffset = iTermOff; + fts5SegIterLoadTerm(p, pIter, 0); + fts5SegIterLoadNPos(p, pIter); + if( bDlidx ) fts5SegIterLoadDlidx(p, pIter); + + assert( p->rc!=SQLITE_OK || + fts5BufferCompareBlob(&pIter->term, (const u8*)pTerm, nTerm)>0 + ); + } +} + /* ** Initialize the object pIter to point to term pTerm/nTerm within the ** in-memory hash table. If there is no such term in the hash-table, the @@ -238993,14 +241349,21 @@ static void fts5SegIterHashInit( const u8 *pList = 0; p->rc = sqlite3Fts5HashScanInit(p->pHash, (const char*)pTerm, nTerm); - sqlite3Fts5HashScanEntry(p->pHash, (const char**)&z, &pList, &nList); - n = (z ? (int)strlen((const char*)z) : 0); + sqlite3Fts5HashScanEntry(p->pHash, (const char**)&z, &n, &pList, &nList); if( pList ){ pLeaf = fts5IdxMalloc(p, sizeof(Fts5Data)); if( pLeaf ){ pLeaf->p = (u8*)pList; } } + + /* The call to sqlite3Fts5HashScanInit() causes the hash table to + ** fill the size field of all existing position lists. This means they + ** can no longer be appended to. Since the only scenario in which they + ** can be appended to is if the previous operation on this table was + ** a DELETE, by clearing the Fts5Index.bDelete flag we can avoid this + ** possibility altogether. */ + p->bDelete = 0; }else{ p->rc = sqlite3Fts5HashQuery(p->pHash, sizeof(Fts5Data), (const char*)pTerm, nTerm, (void**)&pLeaf, &nList @@ -239045,6 +241408,23 @@ static void fts5IndexFreeArray(Fts5Data **ap, int n){ } } +/* +** Decrement the ref-count of the object passed as the only argument. If it +** reaches 0, free it and its contents. +*/ +static void fts5TombstoneArrayDelete(Fts5TombstoneArray *p){ + if( p ){ + p->nRef--; + if( p->nRef<=0 ){ + int ii; + for(ii=0; iinTombstone; ii++){ + fts5DataRelease(p->apTombstone[ii]); + } + sqlite3_free(p); + } + } +} + /* ** Zero the iterator passed as the only argument. */ @@ -239052,7 +241432,7 @@ static void fts5SegIterClear(Fts5SegIter *pIter){ fts5BufferFree(&pIter->term); fts5DataRelease(pIter->pLeaf); fts5DataRelease(pIter->pNextLeaf); - fts5IndexFreeArray(pIter->apTombstone, pIter->nTombstone); + fts5TombstoneArrayDelete(pIter->pTombArray); fts5DlidxIterFree(pIter->pDlidx); sqlite3_free(pIter->aRowidOffset); memset(pIter, 0, sizeof(Fts5SegIter)); @@ -239297,7 +241677,6 @@ static void fts5SegIterNextFrom( }while( p->rc==SQLITE_OK ); } - /* ** Free the iterator object passed as the second argument. */ @@ -239442,24 +241821,25 @@ static int fts5IndexTombstoneQuery( static int fts5MultiIterIsDeleted(Fts5Iter *pIter){ int iFirst = pIter->aFirst[1].iFirst; Fts5SegIter *pSeg = &pIter->aSeg[iFirst]; + Fts5TombstoneArray *pArray = pSeg->pTombArray; - if( pSeg->pLeaf && pSeg->nTombstone ){ + if( pSeg->pLeaf && pArray ){ /* Figure out which page the rowid might be present on. */ - int iPg = ((u64)pSeg->iRowid) % pSeg->nTombstone; + int iPg = ((u64)pSeg->iRowid) % pArray->nTombstone; assert( iPg>=0 ); /* If tombstone hash page iPg has not yet been loaded from the ** database, load it now. */ - if( pSeg->apTombstone[iPg]==0 ){ - pSeg->apTombstone[iPg] = fts5DataRead(pIter->pIndex, + if( pArray->apTombstone[iPg]==0 ){ + pArray->apTombstone[iPg] = fts5DataRead(pIter->pIndex, FTS5_TOMBSTONE_ROWID(pSeg->pSeg->iSegid, iPg) ); - if( pSeg->apTombstone[iPg]==0 ) return 0; + if( pArray->apTombstone[iPg]==0 ) return 0; } return fts5IndexTombstoneQuery( - pSeg->apTombstone[iPg], - pSeg->nTombstone, + pArray->apTombstone[iPg], + pArray->nTombstone, pSeg->iRowid ); } @@ -239998,6 +242378,32 @@ static void fts5IterSetOutputCb(int *pRc, Fts5Iter *pIter){ } } +/* +** All the component segment-iterators of pIter have been set up. This +** functions finishes setup for iterator pIter itself. +*/ +static void fts5MultiIterFinishSetup(Fts5Index *p, Fts5Iter *pIter){ + int iIter; + for(iIter=pIter->nSeg-1; iIter>0; iIter--){ + int iEq; + if( (iEq = fts5MultiIterDoCompare(pIter, iIter)) ){ + Fts5SegIter *pSeg = &pIter->aSeg[iEq]; + if( p->rc==SQLITE_OK ) pSeg->xNext(p, pSeg, 0); + fts5MultiIterAdvanced(p, pIter, iEq, iIter); + } + } + fts5MultiIterSetEof(pIter); + fts5AssertMultiIterSetup(p, pIter); + + if( (pIter->bSkipEmpty && fts5MultiIterIsEmpty(p, pIter)) + || fts5MultiIterIsDeleted(pIter) + ){ + fts5MultiIterNext(p, pIter, 0, 0); + }else if( pIter->base.bEof==0 ){ + Fts5SegIter *pSeg = &pIter->aSeg[pIter->aFirst[1].iFirst]; + pIter->xSetOutputs(pIter, pSeg); + } +} /* ** Allocate a new Fts5Iter object. @@ -240079,31 +242485,12 @@ static void fts5MultiIterNew( assert( iIter==nSeg ); } - /* If the above was successful, each component iterators now points + /* If the above was successful, each component iterator now points ** to the first entry in its segment. In this case initialize the ** aFirst[] array. Or, if an error has occurred, free the iterator ** object and set the output variable to NULL. */ if( p->rc==SQLITE_OK ){ - for(iIter=pNew->nSeg-1; iIter>0; iIter--){ - int iEq; - if( (iEq = fts5MultiIterDoCompare(pNew, iIter)) ){ - Fts5SegIter *pSeg = &pNew->aSeg[iEq]; - if( p->rc==SQLITE_OK ) pSeg->xNext(p, pSeg, 0); - fts5MultiIterAdvanced(p, pNew, iEq, iIter); - } - } - fts5MultiIterSetEof(pNew); - fts5AssertMultiIterSetup(p, pNew); - - if( (pNew->bSkipEmpty && fts5MultiIterIsEmpty(p, pNew)) - || fts5MultiIterIsDeleted(pNew) - ){ - fts5MultiIterNext(p, pNew, 0, 0); - }else if( pNew->base.bEof==0 ){ - Fts5SegIter *pSeg = &pNew->aSeg[pNew->aFirst[1].iFirst]; - pNew->xSetOutputs(pNew, pSeg); - } - + fts5MultiIterFinishSetup(p, pNew); }else{ fts5MultiIterFree(pNew); *ppOut = 0; @@ -240128,7 +242515,6 @@ static void fts5MultiIterNew2( pNew = fts5MultiIterAlloc(p, 2); if( pNew ){ Fts5SegIter *pIter = &pNew->aSeg[1]; - pIter->flags = FTS5_SEGITER_ONETERM; if( pData->szLeaf>0 ){ pIter->pLeaf = pData; @@ -240276,6 +242662,7 @@ static void fts5IndexDiscardData(Fts5Index *p){ sqlite3Fts5HashClear(p->pHash); p->nPendingData = 0; p->nPendingRow = 0; + p->flushRc = SQLITE_OK; } p->nContentlessDelete = 0; } @@ -240491,7 +242878,7 @@ static void fts5WriteDlidxAppend( } if( pDlidx->bPrevValid ){ - iVal = iRowid - pDlidx->iPrev; + iVal = (u64)iRowid - (u64)pDlidx->iPrev; }else{ i64 iPgno = (i==0 ? pWriter->writer.pgno : pDlidx[-1].pgno); assert( pDlidx->buf.n==0 ); @@ -240678,7 +243065,7 @@ static void fts5WriteAppendPoslistData( const u8 *a = aData; int n = nData; - assert( p->pConfig->pgsz>0 ); + assert( p->pConfig->pgsz>0 || p->rc!=SQLITE_OK ); while( p->rc==SQLITE_OK && (pPage->buf.n + pPage->pgidx.n + n)>=p->pConfig->pgsz ){ @@ -241411,18 +243798,24 @@ static void fts5DoSecureDelete( iOff = iStart; - /* Set variable bLastInDoclist to true if this entry happens to be - ** the last rowid in the doclist for its term. */ + /* If the position-list for the entry being removed flows over past + ** the end of this page, delete the portion of the position-list on the + ** next page and beyond. + ** + ** Set variable bLastInDoclist to true if this entry happens + ** to be the last rowid in the doclist for its term. */ + if( iNextOff>=iPgIdx ){ + int pgno = pSeg->iLeafPgno+1; + fts5SecureDeleteOverflow(p, pSeg->pSeg, pgno, &bLastInDoclist); + iNextOff = iPgIdx; + } + if( pSeg->bDel==0 ){ - if( iNextOff>=iPgIdx ){ - int pgno = pSeg->iLeafPgno+1; - fts5SecureDeleteOverflow(p, pSeg->pSeg, pgno, &bLastInDoclist); - iNextOff = iPgIdx; - }else{ + if( iNextOff!=iPgIdx ){ /* Loop through the page-footer. If iNextOff (offset of the ** entry following the one we are removing) is equal to the ** offset of a key on this page, then the entry is the last - ** in its doclist. */ + ** in its doclist. */ int iKeyOff = 0; for(iIdx=0; iIdxrc!=SQLITE_OK ) break; @@ -241716,7 +244108,7 @@ static void fts5FlushOneHash(Fts5Index *p){ if( bSecureDelete ){ if( eDetail==FTS5_DETAIL_NONE ){ if( iOffrc!=SQLITE_OK || pDoclist[iOff]==0x01 ){ iOff++; continue; @@ -241852,6 +244244,10 @@ static void fts5FlushOneHash(Fts5Index *p){ */ static void fts5IndexFlush(Fts5Index *p){ /* Unless it is empty, flush the hash table to disk */ + if( p->flushRc ){ + p->rc = p->flushRc; + return; + } if( p->nPendingData || p->nContentlessDelete ){ assert( p->pHash ); fts5FlushOneHash(p); @@ -241860,6 +244256,8 @@ static void fts5IndexFlush(Fts5Index *p){ p->nPendingData = 0; p->nPendingRow = 0; p->nContentlessDelete = 0; + }else if( p->nPendingData || p->nContentlessDelete ){ + p->flushRc = p->rc; } } } @@ -241938,8 +244336,9 @@ static int sqlite3Fts5IndexOptimize(Fts5Index *p){ assert( p->rc==SQLITE_OK ); fts5IndexFlush(p); - assert( p->nContentlessDelete==0 ); + assert( p->rc!=SQLITE_OK || p->nContentlessDelete==0 ); pStruct = fts5StructureRead(p); + assert( p->rc!=SQLITE_OK || pStruct!=0 ); fts5StructureInvalidate(p); if( pStruct ){ @@ -242345,7 +244744,7 @@ static void fts5SetupPrefixIter( u8 *pToken, /* Buffer containing prefix to match */ int nToken, /* Size of buffer pToken in bytes */ Fts5Colset *pColset, /* Restrict matches to these columns */ - Fts5Iter **ppIter /* OUT: New iterator */ + Fts5Iter **ppIter /* OUT: New iterator */ ){ Fts5Structure *pStruct; Fts5Buffer *aBuf; @@ -242366,8 +244765,9 @@ static void fts5SetupPrefixIter( aBuf = (Fts5Buffer*)fts5IdxMalloc(p, sizeof(Fts5Buffer)*nBuf); pStruct = fts5StructureRead(p); + assert( p->rc!=SQLITE_OK || (aBuf && pStruct) ); - if( aBuf && pStruct ){ + if( p->rc==SQLITE_OK ){ const int flags = FTS5INDEX_QUERY_SCAN | FTS5INDEX_QUERY_SKIPEMPTY | FTS5INDEX_QUERY_NOOUTPUT; @@ -242379,6 +244779,12 @@ static void fts5SetupPrefixIter( int bNewTerm = 1; memset(&doclist, 0, sizeof(doclist)); + + /* If iIdx is non-zero, then it is the number of a prefix-index for + ** prefixes 1 character longer than the prefix being queried for. That + ** index contains all the doclists required, except for the one + ** corresponding to the prefix itself. That one is extracted from the + ** main term index here. */ if( iIdx!=0 ){ int dummy = 0; const int f2 = FTS5INDEX_QUERY_SKIPEMPTY|FTS5INDEX_QUERY_NOOUTPUT; @@ -242402,6 +244808,7 @@ static void fts5SetupPrefixIter( pToken[0] = FTS5_MAIN_PREFIX + iIdx; fts5MultiIterNew(p, pStruct, flags, pColset, pToken, nToken, -1, 0, &p1); fts5IterSetOutputCb(&p->rc, p1); + for( /* no-op */ ; fts5MultiIterEof(p, p1)==0; fts5MultiIterNext2(p, p1, &bNewTerm) @@ -242417,7 +244824,6 @@ static void fts5SetupPrefixIter( } if( p1->base.nData==0 ) continue; - if( p1->base.iRowid<=iLastRowid && doclist.n>0 ){ for(i=0; p->rc==SQLITE_OK && doclist.n; i++){ int i1 = i*nMerge; @@ -242456,7 +244862,7 @@ static void fts5SetupPrefixIter( } fts5MultiIterFree(p1); - pData = fts5IdxMalloc(p, sizeof(Fts5Data)+doclist.n+FTS5_DATA_ZERO_PADDING); + pData = fts5IdxMalloc(p, sizeof(*pData)+doclist.n+FTS5_DATA_ZERO_PADDING); if( pData ){ pData->p = (u8*)&pData[1]; pData->nn = pData->szLeaf = doclist.n; @@ -242599,6 +245005,7 @@ static int sqlite3Fts5IndexClose(Fts5Index *p){ sqlite3_finalize(p->pIdxWriter); sqlite3_finalize(p->pIdxDeleter); sqlite3_finalize(p->pIdxSelect); + sqlite3_finalize(p->pIdxNextSelect); sqlite3_finalize(p->pDataVersion); sqlite3_finalize(p->pDeleteFromIdx); sqlite3Fts5HashFree(p->pHash); @@ -242694,6 +245101,454 @@ static int sqlite3Fts5IndexWrite( return rc; } +/* +** pToken points to a buffer of size nToken bytes containing a search +** term, including the index number at the start, used on a tokendata=1 +** table. This function returns true if the term in buffer pBuf matches +** token pToken/nToken. +*/ +static int fts5IsTokendataPrefix( + Fts5Buffer *pBuf, + const u8 *pToken, + int nToken +){ + return ( + pBuf->n>=nToken + && 0==memcmp(pBuf->p, pToken, nToken) + && (pBuf->n==nToken || pBuf->p[nToken]==0x00) + ); +} + +/* +** Ensure the segment-iterator passed as the only argument points to EOF. +*/ +static void fts5SegIterSetEOF(Fts5SegIter *pSeg){ + fts5DataRelease(pSeg->pLeaf); + pSeg->pLeaf = 0; +} + +/* +** Usually, a tokendata=1 iterator (struct Fts5TokenDataIter) accumulates an +** array of these for each row it visits. Or, for an iterator used by an +** "ORDER BY rank" query, it accumulates an array of these for the entire +** query. +** +** Each instance in the array indicates the iterator (and therefore term) +** associated with position iPos of rowid iRowid. This is used by the +** xInstToken() API. +*/ +struct Fts5TokenDataMap { + i64 iRowid; /* Row this token is located in */ + i64 iPos; /* Position of token */ + int iIter; /* Iterator token was read from */ +}; + +/* +** An object used to supplement Fts5Iter for tokendata=1 iterators. +*/ +struct Fts5TokenDataIter { + int nIter; + int nIterAlloc; + + int nMap; + int nMapAlloc; + Fts5TokenDataMap *aMap; + + Fts5PoslistReader *aPoslistReader; + int *aPoslistToIter; + Fts5Iter *apIter[1]; +}; + +/* +** This function appends iterator pAppend to Fts5TokenDataIter pIn and +** returns the result. +*/ +static Fts5TokenDataIter *fts5AppendTokendataIter( + Fts5Index *p, /* Index object (for error code) */ + Fts5TokenDataIter *pIn, /* Current Fts5TokenDataIter struct */ + Fts5Iter *pAppend /* Append this iterator */ +){ + Fts5TokenDataIter *pRet = pIn; + + if( p->rc==SQLITE_OK ){ + if( pIn==0 || pIn->nIter==pIn->nIterAlloc ){ + int nAlloc = pIn ? pIn->nIterAlloc*2 : 16; + int nByte = nAlloc * sizeof(Fts5Iter*) + sizeof(Fts5TokenDataIter); + Fts5TokenDataIter *pNew = (Fts5TokenDataIter*)sqlite3_realloc(pIn, nByte); + + if( pNew==0 ){ + p->rc = SQLITE_NOMEM; + }else{ + if( pIn==0 ) memset(pNew, 0, nByte); + pRet = pNew; + pNew->nIterAlloc = nAlloc; + } + } + } + if( p->rc ){ + sqlite3Fts5IterClose((Fts5IndexIter*)pAppend); + }else{ + pRet->apIter[pRet->nIter++] = pAppend; + } + assert( pRet==0 || pRet->nIter<=pRet->nIterAlloc ); + + return pRet; +} + +/* +** Delete an Fts5TokenDataIter structure and its contents. +*/ +static void fts5TokendataIterDelete(Fts5TokenDataIter *pSet){ + if( pSet ){ + int ii; + for(ii=0; iinIter; ii++){ + fts5MultiIterFree(pSet->apIter[ii]); + } + sqlite3_free(pSet->aPoslistReader); + sqlite3_free(pSet->aMap); + sqlite3_free(pSet); + } +} + +/* +** Append a mapping to the token-map belonging to object pT. +*/ +static void fts5TokendataIterAppendMap( + Fts5Index *p, + Fts5TokenDataIter *pT, + int iIter, + i64 iRowid, + i64 iPos +){ + if( p->rc==SQLITE_OK ){ + if( pT->nMap==pT->nMapAlloc ){ + int nNew = pT->nMapAlloc ? pT->nMapAlloc*2 : 64; + int nByte = nNew * sizeof(Fts5TokenDataMap); + Fts5TokenDataMap *aNew; + + aNew = (Fts5TokenDataMap*)sqlite3_realloc(pT->aMap, nByte); + if( aNew==0 ){ + p->rc = SQLITE_NOMEM; + return; + } + + pT->aMap = aNew; + pT->nMapAlloc = nNew; + } + + pT->aMap[pT->nMap].iRowid = iRowid; + pT->aMap[pT->nMap].iPos = iPos; + pT->aMap[pT->nMap].iIter = iIter; + pT->nMap++; + } +} + +/* +** The iterator passed as the only argument must be a tokendata=1 iterator +** (pIter->pTokenDataIter!=0). This function sets the iterator output +** variables (pIter->base.*) according to the contents of the current +** row. +*/ +static void fts5IterSetOutputsTokendata(Fts5Iter *pIter){ + int ii; + int nHit = 0; + i64 iRowid = SMALLEST_INT64; + int iMin = 0; + + Fts5TokenDataIter *pT = pIter->pTokenDataIter; + + pIter->base.nData = 0; + pIter->base.pData = 0; + + for(ii=0; iinIter; ii++){ + Fts5Iter *p = pT->apIter[ii]; + if( p->base.bEof==0 ){ + if( nHit==0 || p->base.iRowidbase.iRowid; + nHit = 1; + pIter->base.pData = p->base.pData; + pIter->base.nData = p->base.nData; + iMin = ii; + }else if( p->base.iRowid==iRowid ){ + nHit++; + } + } + } + + if( nHit==0 ){ + pIter->base.bEof = 1; + }else{ + int eDetail = pIter->pIndex->pConfig->eDetail; + pIter->base.bEof = 0; + pIter->base.iRowid = iRowid; + + if( nHit==1 && eDetail==FTS5_DETAIL_FULL ){ + fts5TokendataIterAppendMap(pIter->pIndex, pT, iMin, iRowid, -1); + }else + if( nHit>1 && eDetail!=FTS5_DETAIL_NONE ){ + int nReader = 0; + int nByte = 0; + i64 iPrev = 0; + + /* Allocate array of iterators if they are not already allocated. */ + if( pT->aPoslistReader==0 ){ + pT->aPoslistReader = (Fts5PoslistReader*)sqlite3Fts5MallocZero( + &pIter->pIndex->rc, + pT->nIter * (sizeof(Fts5PoslistReader) + sizeof(int)) + ); + if( pT->aPoslistReader==0 ) return; + pT->aPoslistToIter = (int*)&pT->aPoslistReader[pT->nIter]; + } + + /* Populate an iterator for each poslist that will be merged */ + for(ii=0; iinIter; ii++){ + Fts5Iter *p = pT->apIter[ii]; + if( iRowid==p->base.iRowid ){ + pT->aPoslistToIter[nReader] = ii; + sqlite3Fts5PoslistReaderInit( + p->base.pData, p->base.nData, &pT->aPoslistReader[nReader++] + ); + nByte += p->base.nData; + } + } + + /* Ensure the output buffer is large enough */ + if( fts5BufferGrow(&pIter->pIndex->rc, &pIter->poslist, nByte+nHit*10) ){ + return; + } + + /* Ensure the token-mapping is large enough */ + if( eDetail==FTS5_DETAIL_FULL && pT->nMapAlloc<(pT->nMap + nByte) ){ + int nNew = (pT->nMapAlloc + nByte) * 2; + Fts5TokenDataMap *aNew = (Fts5TokenDataMap*)sqlite3_realloc( + pT->aMap, nNew*sizeof(Fts5TokenDataMap) + ); + if( aNew==0 ){ + pIter->pIndex->rc = SQLITE_NOMEM; + return; + } + pT->aMap = aNew; + pT->nMapAlloc = nNew; + } + + pIter->poslist.n = 0; + + while( 1 ){ + i64 iMinPos = LARGEST_INT64; + + /* Find smallest position */ + iMin = 0; + for(ii=0; iiaPoslistReader[ii]; + if( pReader->bEof==0 ){ + if( pReader->iPosiPos; + iMin = ii; + } + } + } + + /* If all readers were at EOF, break out of the loop. */ + if( iMinPos==LARGEST_INT64 ) break; + + sqlite3Fts5PoslistSafeAppend(&pIter->poslist, &iPrev, iMinPos); + sqlite3Fts5PoslistReaderNext(&pT->aPoslistReader[iMin]); + + if( eDetail==FTS5_DETAIL_FULL ){ + pT->aMap[pT->nMap].iPos = iMinPos; + pT->aMap[pT->nMap].iIter = pT->aPoslistToIter[iMin]; + pT->aMap[pT->nMap].iRowid = iRowid; + pT->nMap++; + } + } + + pIter->base.pData = pIter->poslist.p; + pIter->base.nData = pIter->poslist.n; + } + } +} + +/* +** The iterator passed as the only argument must be a tokendata=1 iterator +** (pIter->pTokenDataIter!=0). This function advances the iterator. If +** argument bFrom is false, then the iterator is advanced to the next +** entry. Or, if bFrom is true, it is advanced to the first entry with +** a rowid of iFrom or greater. +*/ +static void fts5TokendataIterNext(Fts5Iter *pIter, int bFrom, i64 iFrom){ + int ii; + Fts5TokenDataIter *pT = pIter->pTokenDataIter; + + for(ii=0; iinIter; ii++){ + Fts5Iter *p = pT->apIter[ii]; + if( p->base.bEof==0 + && (p->base.iRowid==pIter->base.iRowid || (bFrom && p->base.iRowidpIndex, p, bFrom, iFrom); + while( bFrom && p->base.bEof==0 + && p->base.iRowidpIndex->rc==SQLITE_OK + ){ + fts5MultiIterNext(p->pIndex, p, 0, 0); + } + } + } + + fts5IterSetOutputsTokendata(pIter); +} + +/* +** If the segment-iterator passed as the first argument is at EOF, then +** set pIter->term to a copy of buffer pTerm. +*/ +static void fts5TokendataSetTermIfEof(Fts5Iter *pIter, Fts5Buffer *pTerm){ + if( pIter && pIter->aSeg[0].pLeaf==0 ){ + fts5BufferSet(&pIter->pIndex->rc, &pIter->aSeg[0].term, pTerm->n, pTerm->p); + } +} + +/* +** This function sets up an iterator to use for a non-prefix query on a +** tokendata=1 table. +*/ +static Fts5Iter *fts5SetupTokendataIter( + Fts5Index *p, /* FTS index to query */ + const u8 *pToken, /* Buffer containing query term */ + int nToken, /* Size of buffer pToken in bytes */ + Fts5Colset *pColset /* Colset to filter on */ +){ + Fts5Iter *pRet = 0; + Fts5TokenDataIter *pSet = 0; + Fts5Structure *pStruct = 0; + const int flags = FTS5INDEX_QUERY_SCANONETERM | FTS5INDEX_QUERY_SCAN; + + Fts5Buffer bSeek = {0, 0, 0}; + Fts5Buffer *pSmall = 0; + + fts5IndexFlush(p); + pStruct = fts5StructureRead(p); + + while( p->rc==SQLITE_OK ){ + Fts5Iter *pPrev = pSet ? pSet->apIter[pSet->nIter-1] : 0; + Fts5Iter *pNew = 0; + Fts5SegIter *pNewIter = 0; + Fts5SegIter *pPrevIter = 0; + + int iLvl, iSeg, ii; + + pNew = fts5MultiIterAlloc(p, pStruct->nSegment); + if( pSmall ){ + fts5BufferSet(&p->rc, &bSeek, pSmall->n, pSmall->p); + fts5BufferAppendBlob(&p->rc, &bSeek, 1, (const u8*)"\0"); + }else{ + fts5BufferSet(&p->rc, &bSeek, nToken, pToken); + } + if( p->rc ){ + sqlite3Fts5IterClose((Fts5IndexIter*)pNew); + break; + } + + pNewIter = &pNew->aSeg[0]; + pPrevIter = (pPrev ? &pPrev->aSeg[0] : 0); + for(iLvl=0; iLvlnLevel; iLvl++){ + for(iSeg=pStruct->aLevel[iLvl].nSeg-1; iSeg>=0; iSeg--){ + Fts5StructureSegment *pSeg = &pStruct->aLevel[iLvl].aSeg[iSeg]; + int bDone = 0; + + if( pPrevIter ){ + if( fts5BufferCompare(pSmall, &pPrevIter->term) ){ + memcpy(pNewIter, pPrevIter, sizeof(Fts5SegIter)); + memset(pPrevIter, 0, sizeof(Fts5SegIter)); + bDone = 1; + }else if( pPrevIter->iEndofDoclist>pPrevIter->pLeaf->szLeaf ){ + fts5SegIterNextInit(p,(const char*)bSeek.p,bSeek.n-1,pSeg,pNewIter); + bDone = 1; + } + } + + if( bDone==0 ){ + fts5SegIterSeekInit(p, bSeek.p, bSeek.n, flags, pSeg, pNewIter); + } + + if( pPrevIter ){ + if( pPrevIter->pTombArray ){ + pNewIter->pTombArray = pPrevIter->pTombArray; + pNewIter->pTombArray->nRef++; + } + }else{ + fts5SegIterAllocTombstone(p, pNewIter); + } + + pNewIter++; + if( pPrevIter ) pPrevIter++; + if( p->rc ) break; + } + } + fts5TokendataSetTermIfEof(pPrev, pSmall); + + pNew->bSkipEmpty = 1; + pNew->pColset = pColset; + fts5IterSetOutputCb(&p->rc, pNew); + + /* Loop through all segments in the new iterator. Find the smallest + ** term that any segment-iterator points to. Iterator pNew will be + ** used for this term. Also, set any iterator that points to a term that + ** does not match pToken/nToken to point to EOF */ + pSmall = 0; + for(ii=0; iinSeg; ii++){ + Fts5SegIter *pII = &pNew->aSeg[ii]; + if( 0==fts5IsTokendataPrefix(&pII->term, pToken, nToken) ){ + fts5SegIterSetEOF(pII); + } + if( pII->pLeaf && (!pSmall || fts5BufferCompare(pSmall, &pII->term)>0) ){ + pSmall = &pII->term; + } + } + + /* If pSmall is still NULL at this point, then the new iterator does + ** not point to any terms that match the query. So delete it and break + ** out of the loop - all required iterators have been collected. */ + if( pSmall==0 ){ + sqlite3Fts5IterClose((Fts5IndexIter*)pNew); + break; + } + + /* Append this iterator to the set and continue. */ + pSet = fts5AppendTokendataIter(p, pSet, pNew); + } + + if( p->rc==SQLITE_OK && pSet ){ + int ii; + for(ii=0; iinIter; ii++){ + Fts5Iter *pIter = pSet->apIter[ii]; + int iSeg; + for(iSeg=0; iSegnSeg; iSeg++){ + pIter->aSeg[iSeg].flags |= FTS5_SEGITER_ONETERM; + } + fts5MultiIterFinishSetup(p, pIter); + } + } + + if( p->rc==SQLITE_OK ){ + pRet = fts5MultiIterAlloc(p, 0); + } + if( pRet ){ + pRet->pTokenDataIter = pSet; + if( pSet ){ + fts5IterSetOutputsTokendata(pRet); + }else{ + pRet->base.bEof = 1; + } + }else{ + fts5TokendataIterDelete(pSet); + } + + fts5StructureRelease(pStruct); + fts5BufferFree(&bSeek); + return pRet; +} + + /* ** Open a new iterator to iterate though all rowid that match the ** specified token or token prefix. @@ -242715,8 +245570,13 @@ static int sqlite3Fts5IndexQuery( if( sqlite3Fts5BufferSize(&p->rc, &buf, nToken+1)==0 ){ int iIdx = 0; /* Index to search */ int iPrefixIdx = 0; /* +1 prefix index */ + int bTokendata = pConfig->bTokendata; if( nToken>0 ) memcpy(&buf.p[1], pToken, nToken); + if( flags & (FTS5INDEX_QUERY_NOTOKENDATA|FTS5INDEX_QUERY_SCAN) ){ + bTokendata = 0; + } + /* Figure out which index to search and set iIdx accordingly. If this ** is a prefix query for which there is no prefix index, set iIdx to ** greater than pConfig->nPrefix to indicate that the query will be @@ -242742,7 +245602,10 @@ static int sqlite3Fts5IndexQuery( } } - if( iIdx<=pConfig->nPrefix ){ + if( bTokendata && iIdx==0 ){ + buf.p[0] = '0'; + pRet = fts5SetupTokendataIter(p, buf.p, nToken+1, pColset); + }else if( iIdx<=pConfig->nPrefix ){ /* Straight index lookup */ Fts5Structure *pStruct = fts5StructureRead(p); buf.p[0] = (u8)(FTS5_MAIN_PREFIX + iIdx); @@ -242789,7 +245652,11 @@ static int sqlite3Fts5IndexQuery( static int sqlite3Fts5IterNext(Fts5IndexIter *pIndexIter){ Fts5Iter *pIter = (Fts5Iter*)pIndexIter; assert( pIter->pIndex->rc==SQLITE_OK ); - fts5MultiIterNext(pIter->pIndex, pIter, 0, 0); + if( pIter->pTokenDataIter ){ + fts5TokendataIterNext(pIter, 0, 0); + }else{ + fts5MultiIterNext(pIter->pIndex, pIter, 0, 0); + } return fts5IndexReturn(pIter->pIndex); } @@ -242822,7 +245689,11 @@ static int sqlite3Fts5IterNextScan(Fts5IndexIter *pIndexIter){ */ static int sqlite3Fts5IterNextFrom(Fts5IndexIter *pIndexIter, i64 iMatch){ Fts5Iter *pIter = (Fts5Iter*)pIndexIter; - fts5MultiIterNextFrom(pIter->pIndex, pIter, iMatch); + if( pIter->pTokenDataIter ){ + fts5TokendataIterNext(pIter, 1, iMatch); + }else{ + fts5MultiIterNextFrom(pIter->pIndex, pIter, iMatch); + } return fts5IndexReturn(pIter->pIndex); } @@ -242837,6 +245708,99 @@ static const char *sqlite3Fts5IterTerm(Fts5IndexIter *pIndexIter, int *pn){ return (z ? &z[1] : 0); } +/* +** This is used by xInstToken() to access the token at offset iOff, column +** iCol of row iRowid. The token is returned via output variables *ppOut +** and *pnOut. The iterator passed as the first argument must be a tokendata=1 +** iterator (pIter->pTokenDataIter!=0). +*/ +static int sqlite3Fts5IterToken( + Fts5IndexIter *pIndexIter, + i64 iRowid, + int iCol, + int iOff, + const char **ppOut, int *pnOut +){ + Fts5Iter *pIter = (Fts5Iter*)pIndexIter; + Fts5TokenDataIter *pT = pIter->pTokenDataIter; + Fts5TokenDataMap *aMap = pT->aMap; + i64 iPos = (((i64)iCol)<<32) + iOff; + + int i1 = 0; + int i2 = pT->nMap; + int iTest = 0; + + while( i2>i1 ){ + iTest = (i1 + i2) / 2; + + if( aMap[iTest].iRowidiRowid ){ + i2 = iTest; + }else{ + if( aMap[iTest].iPosiPos ){ + i2 = iTest; + }else{ + break; + } + } + } + + if( i2>i1 ){ + Fts5Iter *pMap = pT->apIter[aMap[iTest].iIter]; + *ppOut = (const char*)pMap->aSeg[0].term.p+1; + *pnOut = pMap->aSeg[0].term.n-1; + } + + return SQLITE_OK; +} + +/* +** Clear any existing entries from the token-map associated with the +** iterator passed as the only argument. +*/ +static void sqlite3Fts5IndexIterClearTokendata(Fts5IndexIter *pIndexIter){ + Fts5Iter *pIter = (Fts5Iter*)pIndexIter; + if( pIter && pIter->pTokenDataIter ){ + pIter->pTokenDataIter->nMap = 0; + } +} + +/* +** Set a token-mapping for the iterator passed as the first argument. This +** is used in detail=column or detail=none mode when a token is requested +** using the xInstToken() API. In this case the caller tokenizers the +** current row and configures the token-mapping via multiple calls to this +** function. +*/ +static int sqlite3Fts5IndexIterWriteTokendata( + Fts5IndexIter *pIndexIter, + const char *pToken, int nToken, + i64 iRowid, int iCol, int iOff +){ + Fts5Iter *pIter = (Fts5Iter*)pIndexIter; + Fts5TokenDataIter *pT = pIter->pTokenDataIter; + Fts5Index *p = pIter->pIndex; + int ii; + + assert( p->pConfig->eDetail!=FTS5_DETAIL_FULL ); + assert( pIter->pTokenDataIter ); + + for(ii=0; iinIter; ii++){ + Fts5Buffer *pTerm = &pT->apIter[ii]->aSeg[0].term; + if( nToken==pTerm->n-1 && memcmp(pToken, pTerm->p+1, nToken)==0 ) break; + } + if( iinIter ){ + fts5TokendataIterAppendMap(p, pT, ii, iRowid, (((i64)iCol)<<32) + iOff); + } + return fts5IndexReturn(p); +} + /* ** Close an iterator opened by an earlier call to sqlite3Fts5IndexQuery(). */ @@ -242844,6 +245808,7 @@ static void sqlite3Fts5IterClose(Fts5IndexIter *pIndexIter){ if( pIndexIter ){ Fts5Iter *pIter = (Fts5Iter*)pIndexIter; Fts5Index *pIndex = pIter->pIndex; + fts5TokendataIterDelete(pIter->pTokenDataIter); fts5MultiIterFree(pIter); sqlite3Fts5IndexCloseReader(pIndex); } @@ -243351,7 +246316,9 @@ static int fts5QueryCksum( int eDetail = p->pConfig->eDetail; u64 cksum = *pCksum; Fts5IndexIter *pIter = 0; - int rc = sqlite3Fts5IndexQuery(p, z, n, flags, 0, &pIter); + int rc = sqlite3Fts5IndexQuery( + p, z, n, (flags | FTS5INDEX_QUERY_NOTOKENDATA), 0, &pIter + ); while( rc==SQLITE_OK && ALWAYS(pIter!=0) && 0==sqlite3Fts5IterEof(pIter) ){ i64 rowid = pIter->iRowid; @@ -243518,7 +246485,7 @@ static void fts5IndexIntegrityCheckEmpty( } static void fts5IntegrityCheckPgidx(Fts5Index *p, Fts5Data *pLeaf){ - int iTermOff = 0; + i64 iTermOff = 0; int ii; Fts5Buffer buf1 = {0,0,0}; @@ -243527,7 +246494,7 @@ static void fts5IntegrityCheckPgidx(Fts5Index *p, Fts5Data *pLeaf){ ii = pLeaf->szLeaf; while( iinn && p->rc==SQLITE_OK ){ int res; - int iOff; + i64 iOff; int nIncr; ii += fts5GetVarint32(&pLeaf->p[ii], nIncr); @@ -244049,6 +247016,24 @@ static void fts5DecodeRowidList( } #endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */ +#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) +static void fts5BufferAppendTerm(int *pRc, Fts5Buffer *pBuf, Fts5Buffer *pTerm){ + int ii; + fts5BufferGrow(pRc, pBuf, pTerm->n*2 + 1); + if( *pRc==SQLITE_OK ){ + for(ii=0; iin; ii++){ + if( pTerm->p[ii]==0x00 ){ + pBuf->p[pBuf->n++] = '\\'; + pBuf->p[pBuf->n++] = '0'; + }else{ + pBuf->p[pBuf->n++] = pTerm->p[ii]; + } + } + pBuf->p[pBuf->n] = 0x00; + } +} +#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */ + #if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) /* ** The implementation of user-defined scalar function fts5_decode(). @@ -244156,9 +247141,8 @@ static void fts5DecodeFunction( iOff += fts5GetVarint32(&a[iOff], nAppend); term.n = nKeep; fts5BufferAppendBlob(&rc, &term, nAppend, &a[iOff]); - sqlite3Fts5BufferAppendPrintf( - &rc, &s, " term=%.*s", term.n, (const char*)term.p - ); + sqlite3Fts5BufferAppendPrintf(&rc, &s, " term="); + fts5BufferAppendTerm(&rc, &s, &term); iOff += nAppend; /* Figure out where the doclist for this term ends */ @@ -244266,9 +247250,8 @@ static void fts5DecodeFunction( fts5BufferAppendBlob(&rc, &term, nByte, &a[iOff]); iOff += nByte; - sqlite3Fts5BufferAppendPrintf( - &rc, &s, " term=%.*s", term.n, (const char*)term.p - ); + sqlite3Fts5BufferAppendPrintf(&rc, &s, " term="); + fts5BufferAppendTerm(&rc, &s, &term); iOff += fts5DecodeDoclist(&rc, &s, &a[iOff], iEnd-iOff); } @@ -244743,7 +247726,7 @@ struct Fts5FullTable { Fts5Global *pGlobal; /* Global (connection wide) data */ Fts5Cursor *pSortCsr; /* Sort data from this cursor */ int iSavepoint; /* Successful xSavepoint()+1 */ - int bInSavepoint; + #ifdef SQLITE_DEBUG struct Fts5TransactionState ts; #endif @@ -245281,12 +248264,15 @@ static int fts5BestIndexMethod(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ } idxStr[iIdxStr] = '\0'; - /* Set idxFlags flags for the ORDER BY clause */ + /* Set idxFlags flags for the ORDER BY clause + ** + ** Note that tokendata=1 tables cannot currently handle "ORDER BY rowid DESC". + */ if( pInfo->nOrderBy==1 ){ int iSort = pInfo->aOrderBy[0].iColumn; if( iSort==(pConfig->nCol+1) && bSeenMatch ){ idxFlags |= FTS5_BI_ORDER_RANK; - }else if( iSort==-1 ){ + }else if( iSort==-1 && (!pInfo->aOrderBy[0].desc || !pConfig->bTokendata) ){ idxFlags |= FTS5_BI_ORDER_ROWID; } if( BitFlagTest(idxFlags, FTS5_BI_ORDER_RANK|FTS5_BI_ORDER_ROWID) ){ @@ -245538,6 +248524,16 @@ static int fts5NextMethod(sqlite3_vtab_cursor *pCursor){ ); assert( !CsrFlagTest(pCsr, FTS5CSR_EOF) ); + /* If this cursor uses FTS5_PLAN_MATCH and this is a tokendata=1 table, + ** clear any token mappings accumulated at the fts5_index.c level. In + ** other cases, specifically FTS5_PLAN_SOURCE and FTS5_PLAN_SORTED_MATCH, + ** we need to retain the mappings for the entire query. */ + if( pCsr->ePlan==FTS5_PLAN_MATCH + && ((Fts5Table*)pCursor->pVtab)->pConfig->bTokendata + ){ + sqlite3Fts5ExprClearTokens(pCsr->pExpr); + } + if( pCsr->ePlan<3 ){ int bSkip = 0; if( (rc = fts5CursorReseek(pCsr, &bSkip)) || bSkip ) return rc; @@ -246198,7 +249194,10 @@ static int fts5SpecialInsert( }else if( 0==sqlite3_stricmp("flush", zCmd) ){ rc = sqlite3Fts5FlushToDisk(&pTab->p); }else{ - rc = sqlite3Fts5IndexLoadConfig(pTab->p.pIndex); + rc = sqlite3Fts5FlushToDisk(&pTab->p); + if( rc==SQLITE_OK ){ + rc = sqlite3Fts5IndexLoadConfig(pTab->p.pIndex); + } if( rc==SQLITE_OK ){ rc = sqlite3Fts5ConfigSetValue(pTab->p.pConfig, zCmd, pVal, &bError); } @@ -246523,7 +249522,10 @@ static int fts5ApiColumnText( ){ int rc = SQLITE_OK; Fts5Cursor *pCsr = (Fts5Cursor*)pCtx; - if( fts5IsContentless((Fts5FullTable*)(pCsr->base.pVtab)) + Fts5Table *pTab = (Fts5Table*)(pCsr->base.pVtab); + if( iCol<0 || iCol>=pTab->pConfig->nCol ){ + rc = SQLITE_RANGE; + }else if( fts5IsContentless((Fts5FullTable*)(pCsr->base.pVtab)) || pCsr->ePlan==FTS5_PLAN_SPECIAL ){ *pz = 0; @@ -246548,8 +249550,9 @@ static int fts5CsrPoslist( int rc = SQLITE_OK; int bLive = (pCsr->pSorter==0); - if( CsrFlagTest(pCsr, FTS5CSR_REQUIRE_POSLIST) ){ - + if( iPhrase<0 || iPhrase>=sqlite3Fts5ExprPhraseCount(pCsr->pExpr) ){ + rc = SQLITE_RANGE; + }else if( CsrFlagTest(pCsr, FTS5CSR_REQUIRE_POSLIST) ){ if( pConfig->eDetail!=FTS5_DETAIL_FULL ){ Fts5PoslistPopulator *aPopulator; int i; @@ -246573,15 +249576,21 @@ static int fts5CsrPoslist( CsrFlagClear(pCsr, FTS5CSR_REQUIRE_POSLIST); } - if( pCsr->pSorter && pConfig->eDetail==FTS5_DETAIL_FULL ){ - Fts5Sorter *pSorter = pCsr->pSorter; - int i1 = (iPhrase==0 ? 0 : pSorter->aIdx[iPhrase-1]); - *pn = pSorter->aIdx[iPhrase] - i1; - *pa = &pSorter->aPoslist[i1]; + if( rc==SQLITE_OK ){ + if( pCsr->pSorter && pConfig->eDetail==FTS5_DETAIL_FULL ){ + Fts5Sorter *pSorter = pCsr->pSorter; + int i1 = (iPhrase==0 ? 0 : pSorter->aIdx[iPhrase-1]); + *pn = pSorter->aIdx[iPhrase] - i1; + *pa = &pSorter->aPoslist[i1]; + }else{ + *pn = sqlite3Fts5ExprPoslist(pCsr->pExpr, iPhrase, pa); + } }else{ - *pn = sqlite3Fts5ExprPoslist(pCsr->pExpr, iPhrase, pa); + *pa = 0; + *pn = 0; } + return rc; } @@ -246688,12 +249697,6 @@ static int fts5ApiInst( ){ if( iIdx<0 || iIdx>=pCsr->nInstCount ){ rc = SQLITE_RANGE; -#if 0 - }else if( fts5IsOffsetless((Fts5Table*)pCsr->base.pVtab) ){ - *piPhrase = pCsr->aInst[iIdx*3]; - *piCol = pCsr->aInst[iIdx*3 + 2]; - *piOff = -1; -#endif }else{ *piPhrase = pCsr->aInst[iIdx*3]; *piCol = pCsr->aInst[iIdx*3 + 1]; @@ -246948,13 +249951,56 @@ static int fts5ApiPhraseFirstColumn( return rc; } +/* +** xQueryToken() API implemenetation. +*/ +static int fts5ApiQueryToken( + Fts5Context* pCtx, + int iPhrase, + int iToken, + const char **ppOut, + int *pnOut +){ + Fts5Cursor *pCsr = (Fts5Cursor*)pCtx; + return sqlite3Fts5ExprQueryToken(pCsr->pExpr, iPhrase, iToken, ppOut, pnOut); +} + +/* +** xInstToken() API implemenetation. +*/ +static int fts5ApiInstToken( + Fts5Context *pCtx, + int iIdx, + int iToken, + const char **ppOut, int *pnOut +){ + Fts5Cursor *pCsr = (Fts5Cursor*)pCtx; + int rc = SQLITE_OK; + if( CsrFlagTest(pCsr, FTS5CSR_REQUIRE_INST)==0 + || SQLITE_OK==(rc = fts5CacheInstArray(pCsr)) + ){ + if( iIdx<0 || iIdx>=pCsr->nInstCount ){ + rc = SQLITE_RANGE; + }else{ + int iPhrase = pCsr->aInst[iIdx*3]; + int iCol = pCsr->aInst[iIdx*3 + 1]; + int iOff = pCsr->aInst[iIdx*3 + 2]; + i64 iRowid = fts5CursorRowid(pCsr); + rc = sqlite3Fts5ExprInstToken( + pCsr->pExpr, iRowid, iPhrase, iCol, iOff, iToken, ppOut, pnOut + ); + } + } + return rc; +} + static int fts5ApiQueryPhrase(Fts5Context*, int, void*, int(*)(const Fts5ExtensionApi*, Fts5Context*, void*) ); static const Fts5ExtensionApi sFts5Api = { - 2, /* iVersion */ + 3, /* iVersion */ fts5ApiUserData, fts5ApiColumnCount, fts5ApiRowCount, @@ -246974,6 +250020,8 @@ static const Fts5ExtensionApi sFts5Api = { fts5ApiPhraseNext, fts5ApiPhraseFirstColumn, fts5ApiPhraseNextColumn, + fts5ApiQueryToken, + fts5ApiInstToken }; /* @@ -247240,9 +250288,7 @@ static int fts5RenameMethod( ){ int rc; Fts5FullTable *pTab = (Fts5FullTable*)pVtab; - pTab->bInSavepoint = 1; rc = sqlite3Fts5StorageRename(pTab->pStorage, zName); - pTab->bInSavepoint = 0; return rc; } @@ -247259,26 +250305,12 @@ static int sqlite3Fts5FlushToDisk(Fts5Table *pTab){ static int fts5SavepointMethod(sqlite3_vtab *pVtab, int iSavepoint){ Fts5FullTable *pTab = (Fts5FullTable*)pVtab; int rc = SQLITE_OK; - char *zSql = 0; - fts5CheckTransactionState(pTab, FTS5_SAVEPOINT, iSavepoint); - if( pTab->bInSavepoint==0 ){ - zSql = sqlite3_mprintf("INSERT INTO %Q.%Q(%Q) VALUES('flush')", - pTab->p.pConfig->zDb, pTab->p.pConfig->zName, pTab->p.pConfig->zName - ); - if( zSql ){ - pTab->bInSavepoint = 1; - rc = sqlite3_exec(pTab->p.pConfig->db, zSql, 0, 0, 0); - pTab->bInSavepoint = 0; - sqlite3_free(zSql); - }else{ - rc = SQLITE_NOMEM; - } - if( rc==SQLITE_OK ){ - pTab->iSavepoint = iSavepoint+1; - } + fts5CheckTransactionState(pTab, FTS5_SAVEPOINT, iSavepoint); + rc = sqlite3Fts5FlushToDisk((Fts5Table*)pVtab); + if( rc==SQLITE_OK ){ + pTab->iSavepoint = iSavepoint+1; } - return rc; } @@ -247310,8 +250342,8 @@ static int fts5RollbackToMethod(sqlite3_vtab *pVtab, int iSavepoint){ int rc = SQLITE_OK; fts5CheckTransactionState(pTab, FTS5_ROLLBACKTO, iSavepoint); fts5TripCursors(pTab); - pTab->p.pConfig->pgsz = 0; if( (iSavepoint+1)<=pTab->iSavepoint ){ + pTab->p.pConfig->pgsz = 0; rc = sqlite3Fts5StorageRollback(pTab->pStorage); } return rc; @@ -247516,7 +250548,7 @@ static void fts5SourceIdFunc( ){ assert( nArg==0 ); UNUSED_PARAM2(nArg, apUnused); - sqlite3_result_text(pCtx, "fts5: 2023-11-01 11:23:50 17129ba1ff7f0daf37100ee82d507aef7827cf38de1866e2633096ae6ad81301", -1, SQLITE_TRANSIENT); + sqlite3_result_text(pCtx, "fts5: 2024-01-30 16:01:20 e876e51a0ed5c5b3126f52e532044363a014bc594cfefa87ffb5b82257cc467a", -1, SQLITE_TRANSIENT); } /* @@ -247539,7 +250571,7 @@ static int fts5ShadowName(const char *zName){ ** if anything is found amiss. Return a NULL pointer if everything is ** OK. */ -static int fts5Integrity( +static int fts5IntegrityMethod( sqlite3_vtab *pVtab, /* the FTS5 virtual table to check */ const char *zSchema, /* Name of schema in which this table lives */ const char *zTabname, /* Name of the table itself */ @@ -247547,27 +250579,21 @@ static int fts5Integrity( char **pzErr /* Write error message here */ ){ Fts5FullTable *pTab = (Fts5FullTable*)pVtab; - Fts5Config *pConfig = pTab->p.pConfig; - char *zSql; - char *zErr = 0; int rc; + assert( pzErr!=0 && *pzErr==0 ); UNUSED_PARAM(isQuick); - zSql = sqlite3_mprintf( - "INSERT INTO \"%w\".\"%w\"(\"%w\") VALUES('integrity-check');", - zSchema, zTabname, pConfig->zName); - if( zSql==0 ) return SQLITE_NOMEM; - rc = sqlite3_exec(pConfig->db, zSql, 0, 0, &zErr); - sqlite3_free(zSql); + rc = sqlite3Fts5StorageIntegrity(pTab->pStorage, 0); if( (rc&0xff)==SQLITE_CORRUPT ){ *pzErr = sqlite3_mprintf("malformed inverted index for FTS5 table %s.%s", zSchema, zTabname); }else if( rc!=SQLITE_OK ){ *pzErr = sqlite3_mprintf("unable to validate the inverted index for" " FTS5 table %s.%s: %s", - zSchema, zTabname, zErr); + zSchema, zTabname, sqlite3_errstr(rc)); } - sqlite3_free(zErr); + sqlite3Fts5IndexCloseReader(pTab->p.pIndex); + return SQLITE_OK; } @@ -247597,7 +250623,7 @@ static int fts5Init(sqlite3 *db){ /* xRelease */ fts5ReleaseMethod, /* xRollbackTo */ fts5RollbackToMethod, /* xShadowName */ fts5ShadowName, - /* xIntegrity */ fts5Integrity + /* xIntegrity */ fts5IntegrityMethod }; int rc; @@ -248363,7 +251389,7 @@ static int sqlite3Fts5StorageRebuild(Fts5Storage *p){ } if( rc==SQLITE_OK ){ - rc = fts5StorageGetStmt(p, FTS5_STMT_SCAN, &pScan, 0); + rc = fts5StorageGetStmt(p, FTS5_STMT_SCAN, &pScan, pConfig->pzErrmsg); } while( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pScan) ){ @@ -249150,6 +252176,12 @@ static const unsigned char sqlite3Utf8Trans1[] = { #endif /* ifndef SQLITE_AMALGAMATION */ +#define FTS5_SKIP_UTF8(zIn) { \ + if( ((unsigned char)(*(zIn++)))>=0xc0 ){ \ + while( (((unsigned char)*zIn) & 0xc0)==0x80 ){ zIn++; } \ + } \ +} + typedef struct Unicode61Tokenizer Unicode61Tokenizer; struct Unicode61Tokenizer { unsigned char aTokenChar[128]; /* ASCII range token characters */ @@ -250185,6 +253217,7 @@ static int fts5PorterTokenize( typedef struct TrigramTokenizer TrigramTokenizer; struct TrigramTokenizer { int bFold; /* True to fold to lower-case */ + int iFoldParam; /* Parameter to pass to Fts5UnicodeFold() */ }; /* @@ -250211,6 +253244,7 @@ static int fts5TriCreate( }else{ int i; pNew->bFold = 1; + pNew->iFoldParam = 0; for(i=0; rc==SQLITE_OK && ibFold = (zArg[0]=='0'); } + }else if( 0==sqlite3_stricmp(azArg[i], "remove_diacritics") ){ + if( (zArg[0]!='0' && zArg[0]!='1' && zArg[0]!='2') || zArg[1] ){ + rc = SQLITE_ERROR; + }else{ + pNew->iFoldParam = (zArg[0]!='0') ? 2 : 0; + } }else{ rc = SQLITE_ERROR; } } + + if( pNew->iFoldParam!=0 && pNew->bFold==0 ){ + rc = SQLITE_ERROR; + } + if( rc!=SQLITE_OK ){ fts5TriDelete((Fts5Tokenizer*)pNew); pNew = 0; @@ -250245,40 +253290,62 @@ static int fts5TriTokenize( TrigramTokenizer *p = (TrigramTokenizer*)pTok; int rc = SQLITE_OK; char aBuf[32]; + char *zOut = aBuf; + int ii; const unsigned char *zIn = (const unsigned char*)pText; const unsigned char *zEof = &zIn[nText]; u32 iCode; + int aStart[3]; /* Input offset of each character in aBuf[] */ UNUSED_PARAM(unusedFlags); - while( 1 ){ - char *zOut = aBuf; - int iStart = zIn - (const unsigned char*)pText; - const unsigned char *zNext; - - READ_UTF8(zIn, zEof, iCode); - if( iCode==0 ) break; - zNext = zIn; - if( zInbFold ) iCode = sqlite3Fts5UnicodeFold(iCode, 0); - WRITE_UTF8(zOut, iCode); + + /* Populate aBuf[] with the characters for the first trigram. */ + for(ii=0; ii<3; ii++){ + do { + aStart[ii] = zIn - (const unsigned char*)pText; READ_UTF8(zIn, zEof, iCode); - if( iCode==0 ) break; - }else{ - break; - } - if( zInbFold ) iCode = sqlite3Fts5UnicodeFold(iCode, 0); - WRITE_UTF8(zOut, iCode); + if( iCode==0 ) return SQLITE_OK; + if( p->bFold ) iCode = sqlite3Fts5UnicodeFold(iCode, p->iFoldParam); + }while( iCode==0 ); + WRITE_UTF8(zOut, iCode); + } + + /* At the start of each iteration of this loop: + ** + ** aBuf: Contains 3 characters. The 3 characters of the next trigram. + ** zOut: Points to the byte following the last character in aBuf. + ** aStart[3]: Contains the byte offset in the input text corresponding + ** to the start of each of the three characters in the buffer. + */ + assert( zIn<=zEof ); + while( 1 ){ + int iNext; /* Start of character following current tri */ + const char *z1; + + /* Read characters from the input up until the first non-diacritic */ + do { + iNext = zIn - (const unsigned char*)pText; READ_UTF8(zIn, zEof, iCode); if( iCode==0 ) break; - if( p->bFold ) iCode = sqlite3Fts5UnicodeFold(iCode, 0); - WRITE_UTF8(zOut, iCode); - }else{ - break; - } - rc = xToken(pCtx, 0, aBuf, zOut-aBuf, iStart, iStart + zOut-aBuf); - if( rc!=SQLITE_OK ) break; - zIn = zNext; + if( p->bFold ) iCode = sqlite3Fts5UnicodeFold(iCode, p->iFoldParam); + }while( iCode==0 ); + + /* Pass the current trigram back to fts5 */ + rc = xToken(pCtx, 0, aBuf, zOut-aBuf, aStart[0], iNext); + if( iCode==0 || rc!=SQLITE_OK ) break; + + /* Remove the first character from buffer aBuf[]. Append the character + ** with codepoint iCode. */ + z1 = aBuf; + FTS5_SKIP_UTF8(z1); + memmove(aBuf, z1, zOut - z1); + zOut -= (z1 - aBuf); + WRITE_UTF8(zOut, iCode); + + /* Update the aStart[] array */ + aStart[0] = aStart[1]; + aStart[1] = aStart[2]; + aStart[2] = iNext; } return rc; @@ -250301,7 +253368,9 @@ static int sqlite3Fts5TokenizerPattern( ){ if( xCreate==fts5TriCreate ){ TrigramTokenizer *p = (TrigramTokenizer*)pTok; - return p->bFold ? FTS5_PATTERN_LIKE : FTS5_PATTERN_GLOB; + if( p->iFoldParam==0 ){ + return p->bFold ? FTS5_PATTERN_LIKE : FTS5_PATTERN_GLOB; + } } return FTS5_PATTERN_NONE; } @@ -252090,7 +255159,7 @@ static int fts5VocabFilterMethod( if( pEq ){ zTerm = (const char *)sqlite3_value_text(pEq); nTerm = sqlite3_value_bytes(pEq); - f = 0; + f = FTS5INDEX_QUERY_NOTOKENDATA; }else{ if( pGe ){ zTerm = (const char *)sqlite3_value_text(pGe); diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h index 4eff82d9..a07a5195 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h @@ -147,9 +147,9 @@ extern "C" { ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.44.0" -#define SQLITE_VERSION_NUMBER 3044000 -#define SQLITE_SOURCE_ID "2023-11-01 11:23:50 17129ba1ff7f0daf37100ee82d507aef7827cf38de1866e2633096ae6ad81301" +#define SQLITE_VERSION "3.45.1" +#define SQLITE_VERSION_NUMBER 3045001 +#define SQLITE_SOURCE_ID "2024-01-30 16:01:20 e876e51a0ed5c5b3126f52e532044363a014bc594cfefa87ffb5b82257cc467a" /* ** CAPI3REF: Run-Time Library Version Numbers @@ -3955,15 +3955,17 @@ SQLITE_API void sqlite3_free_filename(sqlite3_filename); ** ** ** ^The sqlite3_errmsg() and sqlite3_errmsg16() return English-language -** text that describes the error, as either UTF-8 or UTF-16 respectively. +** text that describes the error, as either UTF-8 or UTF-16 respectively, +** or NULL if no error message is available. ** (See how SQLite handles [invalid UTF] for exceptions to this rule.) ** ^(Memory to hold the error message string is managed internally. ** The application does not need to worry about freeing the result. ** However, the error string might be overwritten or deallocated by ** subsequent calls to other SQLite interface functions.)^ ** -** ^The sqlite3_errstr() interface returns the English-language text -** that describes the [result code], as UTF-8. +** ^The sqlite3_errstr(E) interface returns the English-language text +** that describes the [result code] E, as UTF-8, or NULL if E is not an +** result code for which a text error message is available. ** ^(Memory to hold the error message string is managed internally ** and must not be freed by the application)^. ** @@ -5574,13 +5576,27 @@ SQLITE_API int sqlite3_create_window_function( ** ** ** [[SQLITE_SUBTYPE]]
    SQLITE_SUBTYPE
    -** The SQLITE_SUBTYPE flag indicates to SQLite that a function may call +** The SQLITE_SUBTYPE flag indicates to SQLite that a function might call ** [sqlite3_value_subtype()] to inspect the sub-types of its arguments. -** Specifying this flag makes no difference for scalar or aggregate user -** functions. However, if it is not specified for a user-defined window -** function, then any sub-types belonging to arguments passed to the window -** function may be discarded before the window function is called (i.e. -** sqlite3_value_subtype() will always return 0). +** This flag instructs SQLite to omit some corner-case optimizations that +** might disrupt the operation of the [sqlite3_value_subtype()] function, +** causing it to return zero rather than the correct subtype(). +** SQL functions that invokes [sqlite3_value_subtype()] should have this +** property. If the SQLITE_SUBTYPE property is omitted, then the return +** value from [sqlite3_value_subtype()] might sometimes be zero even though +** a non-zero subtype was specified by the function argument expression. +** +** [[SQLITE_RESULT_SUBTYPE]]
    SQLITE_RESULT_SUBTYPE
    +** The SQLITE_RESULT_SUBTYPE flag indicates to SQLite that a function might call +** [sqlite3_result_subtype()] to cause a sub-type to be associated with its +** result. +** Every function that invokes [sqlite3_result_subtype()] should have this +** property. If it does not, then the call to [sqlite3_result_subtype()] +** might become a no-op if the function is used as term in an +** [expression index]. On the other hand, SQL functions that never invoke +** [sqlite3_result_subtype()] should avoid setting this property, as the +** purpose of this property is to disable certain optimizations that are +** incompatible with subtypes. **
    ** */ @@ -5588,6 +5604,7 @@ SQLITE_API int sqlite3_create_window_function( #define SQLITE_DIRECTONLY 0x000080000 #define SQLITE_SUBTYPE 0x000100000 #define SQLITE_INNOCUOUS 0x000200000 +#define SQLITE_RESULT_SUBTYPE 0x001000000 /* ** CAPI3REF: Deprecated Functions @@ -5784,6 +5801,12 @@ SQLITE_API int sqlite3_value_encoding(sqlite3_value*); ** information can be used to pass a limited amount of context from ** one SQL function to another. Use the [sqlite3_result_subtype()] ** routine to set the subtype for the return value of an SQL function. +** +** Every [application-defined SQL function] that invoke this interface +** should include the [SQLITE_SUBTYPE] property in the text +** encoding argument when the function is [sqlite3_create_function|registered]. +** If the [SQLITE_SUBTYPE] property is omitted, then sqlite3_value_subtype() +** might return zero instead of the upstream subtype in some corner cases. */ SQLITE_API unsigned int sqlite3_value_subtype(sqlite3_value*); @@ -5914,14 +5937,22 @@ SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context*); **
  • ^(when sqlite3_set_auxdata() is invoked again on the same ** parameter)^, or **
  • ^(during the original sqlite3_set_auxdata() call when a memory -** allocation error occurs.)^ +** allocation error occurs.)^ +**
  • ^(during the original sqlite3_set_auxdata() call if the function +** is evaluated during query planning instead of during query execution, +** as sometimes happens with [SQLITE_ENABLE_STAT4].)^ ** -** Note the last bullet in particular. The destructor X in +** Note the last two bullets in particular. The destructor X in ** sqlite3_set_auxdata(C,N,P,X) might be called immediately, before the ** sqlite3_set_auxdata() interface even returns. Hence sqlite3_set_auxdata() ** should be called near the end of the function implementation and the ** function implementation should not make any use of P after -** sqlite3_set_auxdata() has been called. +** sqlite3_set_auxdata() has been called. Furthermore, a call to +** sqlite3_get_auxdata() that occurs immediately after a corresponding call +** to sqlite3_set_auxdata() might still return NULL if an out-of-memory +** condition occurred during the sqlite3_set_auxdata() call or if the +** function is being evaluated during query planning rather than during +** query execution. ** ** ^(In practice, auxiliary data is preserved between function calls for ** function parameters that are compile-time constants, including literal @@ -6195,6 +6226,20 @@ SQLITE_API int sqlite3_result_zeroblob64(sqlite3_context*, sqlite3_uint64 n); ** higher order bits are discarded. ** The number of subtype bytes preserved by SQLite might increase ** in future releases of SQLite. +** +** Every [application-defined SQL function] that invokes this interface +** should include the [SQLITE_RESULT_SUBTYPE] property in its +** text encoding argument when the SQL function is +** [sqlite3_create_function|registered]. If the [SQLITE_RESULT_SUBTYPE] +** property is omitted from the function that invokes sqlite3_result_subtype(), +** then in some cases the sqlite3_result_subtype() might fail to set +** the result subtype. +** +** If SQLite is compiled with -DSQLITE_STRICT_SUBTYPE=1, then any +** SQL function that invokes the sqlite3_result_subtype() interface +** and that does not have the SQLITE_RESULT_SUBTYPE property will raise +** an error. Future versions of SQLite might enable -DSQLITE_STRICT_SUBTYPE=1 +** by default. */ SQLITE_API void sqlite3_result_subtype(sqlite3_context*,unsigned int); @@ -7995,9 +8040,11 @@ SQLITE_API int sqlite3_vfs_unregister(sqlite3_vfs*); ** ** ^(Some systems (for example, Windows 95) do not support the operation ** implemented by sqlite3_mutex_try(). On those systems, sqlite3_mutex_try() -** will always return SQLITE_BUSY. The SQLite core only ever uses -** sqlite3_mutex_try() as an optimization so this is acceptable -** behavior.)^ +** will always return SQLITE_BUSY. In most cases the SQLite core only uses +** sqlite3_mutex_try() as an optimization, so this is acceptable +** behavior. The exceptions are unix builds that set the +** SQLITE_ENABLE_SETLK_TIMEOUT build option. In that case a working +** sqlite3_mutex_try() is required.)^ ** ** ^The sqlite3_mutex_leave() routine exits a mutex that was ** previously entered by the same thread. The behavior @@ -8256,6 +8303,7 @@ SQLITE_API int sqlite3_test_control(int op, ...); #define SQLITE_TESTCTRL_ASSERT 12 #define SQLITE_TESTCTRL_ALWAYS 13 #define SQLITE_TESTCTRL_RESERVE 14 /* NOT USED */ +#define SQLITE_TESTCTRL_JSON_SELFCHECK 14 #define SQLITE_TESTCTRL_OPTIMIZATIONS 15 #define SQLITE_TESTCTRL_ISKEYWORD 16 /* NOT USED */ #define SQLITE_TESTCTRL_SCRATCHMALLOC 17 /* NOT USED */ @@ -12769,8 +12817,11 @@ struct Fts5PhraseIter { ** created with the "columnsize=0" option. ** ** xColumnText: -** This function attempts to retrieve the text of column iCol of the -** current document. If successful, (*pz) is set to point to a buffer +** If parameter iCol is less than zero, or greater than or equal to the +** number of columns in the table, SQLITE_RANGE is returned. +** +** Otherwise, this function attempts to retrieve the text of column iCol of +** the current document. If successful, (*pz) is set to point to a buffer ** containing the text in utf-8 encoding, (*pn) is set to the size in bytes ** (not characters) of the buffer and SQLITE_OK is returned. Otherwise, ** if an error occurs, an SQLite error code is returned and the final values @@ -12780,8 +12831,10 @@ struct Fts5PhraseIter { ** Returns the number of phrases in the current query expression. ** ** xPhraseSize: -** Returns the number of tokens in phrase iPhrase of the query. Phrases -** are numbered starting from zero. +** If parameter iCol is less than zero, or greater than or equal to the +** number of phrases in the current query, as returned by xPhraseCount, +** 0 is returned. Otherwise, this function returns the number of tokens in +** phrase iPhrase of the query. Phrases are numbered starting from zero. ** ** xInstCount: ** Set *pnInst to the total number of occurrences of all phrases within @@ -12797,12 +12850,13 @@ struct Fts5PhraseIter { ** Query for the details of phrase match iIdx within the current row. ** Phrase matches are numbered starting from zero, so the iIdx argument ** should be greater than or equal to zero and smaller than the value -** output by xInstCount(). +** output by xInstCount(). If iIdx is less than zero or greater than +** or equal to the value returned by xInstCount(), SQLITE_RANGE is returned. ** -** Usually, output parameter *piPhrase is set to the phrase number, *piCol +** Otherwise, output parameter *piPhrase is set to the phrase number, *piCol ** to the column in which it occurs and *piOff the token offset of the -** first token of the phrase. Returns SQLITE_OK if successful, or an error -** code (i.e. SQLITE_NOMEM) if an error occurs. +** first token of the phrase. SQLITE_OK is returned if successful, or an +** error code (i.e. SQLITE_NOMEM) if an error occurs. ** ** This API can be quite slow if used with an FTS5 table created with the ** "detail=none" or "detail=column" option. @@ -12828,6 +12882,10 @@ struct Fts5PhraseIter { ** Invoking Api.xUserData() returns a copy of the pointer passed as ** the third argument to pUserData. ** +** If parameter iPhrase is less than zero, or greater than or equal to +** the number of phrases in the query, as returned by xPhraseCount(), +** this function returns SQLITE_RANGE. +** ** If the callback function returns any value other than SQLITE_OK, the ** query is abandoned and the xQueryPhrase function returns immediately. ** If the returned value is SQLITE_DONE, xQueryPhrase returns SQLITE_OK. @@ -12942,9 +13000,42 @@ struct Fts5PhraseIter { ** ** xPhraseNextColumn() ** See xPhraseFirstColumn above. +** +** xQueryToken(pFts5, iPhrase, iToken, ppToken, pnToken) +** This is used to access token iToken of phrase iPhrase of the current +** query. Before returning, output parameter *ppToken is set to point +** to a buffer containing the requested token, and *pnToken to the +** size of this buffer in bytes. +** +** If iPhrase or iToken are less than zero, or if iPhrase is greater than +** or equal to the number of phrases in the query as reported by +** xPhraseCount(), or if iToken is equal to or greater than the number of +** tokens in the phrase, SQLITE_RANGE is returned and *ppToken and *pnToken + are both zeroed. +** +** The output text is not a copy of the query text that specified the +** token. It is the output of the tokenizer module. For tokendata=1 +** tables, this includes any embedded 0x00 and trailing data. +** +** xInstToken(pFts5, iIdx, iToken, ppToken, pnToken) +** This is used to access token iToken of phrase hit iIdx within the +** current row. If iIdx is less than zero or greater than or equal to the +** value returned by xInstCount(), SQLITE_RANGE is returned. Otherwise, +** output variable (*ppToken) is set to point to a buffer containing the +** matching document token, and (*pnToken) to the size of that buffer in +** bytes. This API is not available if the specified token matches a +** prefix query term. In that case both output variables are always set +** to 0. +** +** The output text is not a copy of the document text that was tokenized. +** It is the output of the tokenizer module. For tokendata=1 tables, this +** includes any embedded 0x00 and trailing data. +** +** This API can be quite slow if used with an FTS5 table created with the +** "detail=none" or "detail=column" option. */ struct Fts5ExtensionApi { - int iVersion; /* Currently always set to 2 */ + int iVersion; /* Currently always set to 3 */ void *(*xUserData)(Fts5Context*); @@ -12979,6 +13070,13 @@ struct Fts5ExtensionApi { int (*xPhraseFirstColumn)(Fts5Context*, int iPhrase, Fts5PhraseIter*, int*); void (*xPhraseNextColumn)(Fts5Context*, Fts5PhraseIter*, int *piCol); + + /* Below this point are iVersion>=3 only */ + int (*xQueryToken)(Fts5Context*, + int iPhrase, int iToken, + const char **ppToken, int *pnToken + ); + int (*xInstToken)(Fts5Context*, int iIdx, int iToken, const char**, int*); }; /* diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3.go b/vendor/github.com/mattn/go-sqlite3/sqlite3.go index 5e4e2ff5..4b3b6cab 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3.go @@ -21,7 +21,6 @@ package sqlite3 #cgo CFLAGS: -DSQLITE_DEFAULT_WAL_SYNCHRONOUS=1 #cgo CFLAGS: -DSQLITE_ENABLE_UPDATE_DELETE_LIMIT #cgo CFLAGS: -Wno-deprecated-declarations -#cgo linux,!android CFLAGS: -DHAVE_PREAD64=1 -DHAVE_PWRITE64=1 #cgo openbsd CFLAGS: -I/usr/local/include #cgo openbsd LDFLAGS: -L/usr/local/lib #ifndef USE_LIBSQLITE3 @@ -48,6 +47,18 @@ package sqlite3 # define SQLITE_DETERMINISTIC 0 #endif +#if defined(HAVE_PREAD64) && defined(HAVE_PWRITE64) +# undef USE_PREAD +# undef USE_PWRITE +# define USE_PREAD64 1 +# define USE_PWRITE64 1 +#elif defined(HAVE_PREAD) && defined(HAVE_PWRITE) +# undef USE_PREAD +# undef USE_PWRITE +# define USE_PREAD64 1 +# define USE_PWRITE64 1 +#endif + static int _sqlite3_open_v2(const char *filename, sqlite3 **ppDb, int flags, const char *zVfs) { #ifdef SQLITE_OPEN_URI @@ -596,10 +607,9 @@ func (c *SQLiteConn) RegisterAuthorizer(callback func(int, string, string, strin // RegisterFunc makes a Go function available as a SQLite function. // // The Go function can have arguments of the following types: any -// numeric type except complex, bool, []byte, string and -// interface{}. interface{} arguments are given the direct translation -// of the SQLite data type: int64 for INTEGER, float64 for FLOAT, -// []byte for BLOB, string for TEXT. +// numeric type except complex, bool, []byte, string and any. +// any arguments are given the direct translation of the SQLite data type: +// int64 for INTEGER, float64 for FLOAT, []byte for BLOB, string for TEXT. // // The function can additionally be variadic, as long as the type of // the variadic argument is one of the above. @@ -609,7 +619,7 @@ func (c *SQLiteConn) RegisterAuthorizer(callback func(int, string, string, strin // optimizations in its queries. // // See _example/go_custom_funcs for a detailed example. -func (c *SQLiteConn) RegisterFunc(name string, impl interface{}, pure bool) error { +func (c *SQLiteConn) RegisterFunc(name string, impl any, pure bool) error { var fi functionInfo fi.f = reflect.ValueOf(impl) t := fi.f.Type() @@ -691,7 +701,7 @@ func sqlite3CreateFunction(db *C.sqlite3, zFunctionName *C.char, nArg C.int, eTe // return an error in addition to their other return values. // // See _example/go_custom_funcs for a detailed example. -func (c *SQLiteConn) RegisterAggregator(name string, impl interface{}, pure bool) error { +func (c *SQLiteConn) RegisterAggregator(name string, impl any, pure bool) error { var ai aggInfo ai.constructor = reflect.ValueOf(impl) t := ai.constructor.Type() @@ -865,14 +875,16 @@ func (c *SQLiteConn) exec(ctx context.Context, query string, args []driver.Named // consume the number of arguments used in the current // statement and append all named arguments not // contained therein - stmtArgs = append(stmtArgs, args[start:start+na]...) - for i := range args { - if (i < start || i >= na) && args[i].Name != "" { - stmtArgs = append(stmtArgs, args[i]) + if len(args[start:start+na]) > 0 { + stmtArgs = append(stmtArgs, args[start:start+na]...) + for i := range args { + if (i < start || i >= na) && args[i].Name != "" { + stmtArgs = append(stmtArgs, args[i]) + } + } + for i := range stmtArgs { + stmtArgs[i].Ordinal = i + 1 } - } - for i := range stmtArgs { - stmtArgs[i].Ordinal = i + 1 } res, err = s.(*SQLiteStmt).exec(ctx, stmtArgs) if err != nil && err != driver.ErrSkip { @@ -965,103 +977,104 @@ func (c *SQLiteConn) begin(ctx context.Context) (driver.Tx, error) { // The argument is may be either in parentheses or it may be separated from // the pragma name by an equal sign. The two syntaxes yield identical results. // In many pragmas, the argument is a boolean. The boolean can be one of: -// 1 yes true on -// 0 no false off +// +// 1 yes true on +// 0 no false off // // You can specify a DSN string using a URI as the filename. -// test.db -// file:test.db?cache=shared&mode=memory -// :memory: -// file::memory: // -// mode -// Access mode of the database. -// https://www.sqlite.org/c3ref/open.html -// Values: -// - ro -// - rw -// - rwc -// - memory +// test.db +// file:test.db?cache=shared&mode=memory +// :memory: +// file::memory: // -// cache -// SQLite Shared-Cache Mode -// https://www.sqlite.org/sharedcache.html -// Values: -// - shared -// - private +// mode +// Access mode of the database. +// https://www.sqlite.org/c3ref/open.html +// Values: +// - ro +// - rw +// - rwc +// - memory // -// immutable=Boolean -// The immutable parameter is a boolean query parameter that indicates -// that the database file is stored on read-only media. When immutable is set, -// SQLite assumes that the database file cannot be changed, -// even by a process with higher privilege, -// and so the database is opened read-only and all locking and change detection is disabled. -// Caution: Setting the immutable property on a database file that -// does in fact change can result in incorrect query results and/or SQLITE_CORRUPT errors. +// cache +// SQLite Shared-Cache Mode +// https://www.sqlite.org/sharedcache.html +// Values: +// - shared +// - private // -// go-sqlite3 adds the following query parameters to those used by SQLite: -// _loc=XXX -// Specify location of time format. It's possible to specify "auto". +// immutable=Boolean +// The immutable parameter is a boolean query parameter that indicates +// that the database file is stored on read-only media. When immutable is set, +// SQLite assumes that the database file cannot be changed, +// even by a process with higher privilege, +// and so the database is opened read-only and all locking and change detection is disabled. +// Caution: Setting the immutable property on a database file that +// does in fact change can result in incorrect query results and/or SQLITE_CORRUPT errors. // -// _mutex=XXX -// Specify mutex mode. XXX can be "no", "full". +// go-sqlite3 adds the following query parameters to those used by SQLite: // -// _txlock=XXX -// Specify locking behavior for transactions. XXX can be "immediate", -// "deferred", "exclusive". +// _loc=XXX +// Specify location of time format. It's possible to specify "auto". // -// _auto_vacuum=X | _vacuum=X -// 0 | none - Auto Vacuum disabled -// 1 | full - Auto Vacuum FULL -// 2 | incremental - Auto Vacuum Incremental +// _mutex=XXX +// Specify mutex mode. XXX can be "no", "full". // -// _busy_timeout=XXX"| _timeout=XXX -// Specify value for sqlite3_busy_timeout. +// _txlock=XXX +// Specify locking behavior for transactions. XXX can be "immediate", +// "deferred", "exclusive". // -// _case_sensitive_like=Boolean | _cslike=Boolean -// https://www.sqlite.org/pragma.html#pragma_case_sensitive_like -// Default or disabled the LIKE operation is case-insensitive. -// When enabling this options behaviour of LIKE will become case-sensitive. +// _auto_vacuum=X | _vacuum=X +// 0 | none - Auto Vacuum disabled +// 1 | full - Auto Vacuum FULL +// 2 | incremental - Auto Vacuum Incremental // -// _defer_foreign_keys=Boolean | _defer_fk=Boolean -// Defer Foreign Keys until outermost transaction is committed. +// _busy_timeout=XXX"| _timeout=XXX +// Specify value for sqlite3_busy_timeout. // -// _foreign_keys=Boolean | _fk=Boolean -// Enable or disable enforcement of foreign keys. +// _case_sensitive_like=Boolean | _cslike=Boolean +// https://www.sqlite.org/pragma.html#pragma_case_sensitive_like +// Default or disabled the LIKE operation is case-insensitive. +// When enabling this options behaviour of LIKE will become case-sensitive. // -// _ignore_check_constraints=Boolean -// This pragma enables or disables the enforcement of CHECK constraints. -// The default setting is off, meaning that CHECK constraints are enforced by default. +// _defer_foreign_keys=Boolean | _defer_fk=Boolean +// Defer Foreign Keys until outermost transaction is committed. // -// _journal_mode=MODE | _journal=MODE -// Set journal mode for the databases associated with the current connection. -// https://www.sqlite.org/pragma.html#pragma_journal_mode +// _foreign_keys=Boolean | _fk=Boolean +// Enable or disable enforcement of foreign keys. // -// _locking_mode=X | _locking=X -// Sets the database connection locking-mode. -// The locking-mode is either NORMAL or EXCLUSIVE. -// https://www.sqlite.org/pragma.html#pragma_locking_mode +// _ignore_check_constraints=Boolean +// This pragma enables or disables the enforcement of CHECK constraints. +// The default setting is off, meaning that CHECK constraints are enforced by default. // -// _query_only=Boolean -// The query_only pragma prevents all changes to database files when enabled. +// _journal_mode=MODE | _journal=MODE +// Set journal mode for the databases associated with the current connection. +// https://www.sqlite.org/pragma.html#pragma_journal_mode // -// _recursive_triggers=Boolean | _rt=Boolean -// Enable or disable recursive triggers. +// _locking_mode=X | _locking=X +// Sets the database connection locking-mode. +// The locking-mode is either NORMAL or EXCLUSIVE. +// https://www.sqlite.org/pragma.html#pragma_locking_mode // -// _secure_delete=Boolean|FAST -// When secure_delete is on, SQLite overwrites deleted content with zeros. -// https://www.sqlite.org/pragma.html#pragma_secure_delete +// _query_only=Boolean +// The query_only pragma prevents all changes to database files when enabled. // -// _synchronous=X | _sync=X -// Change the setting of the "synchronous" flag. -// https://www.sqlite.org/pragma.html#pragma_synchronous +// _recursive_triggers=Boolean | _rt=Boolean +// Enable or disable recursive triggers. // -// _writable_schema=Boolean -// When this pragma is on, the SQLITE_MASTER tables in which database -// can be changed using ordinary UPDATE, INSERT, and DELETE statements. -// Warning: misuse of this pragma can easily result in a corrupt database file. +// _secure_delete=Boolean|FAST +// When secure_delete is on, SQLite overwrites deleted content with zeros. +// https://www.sqlite.org/pragma.html#pragma_secure_delete // +// _synchronous=X | _sync=X +// Change the setting of the "synchronous" flag. +// https://www.sqlite.org/pragma.html#pragma_synchronous // +// _writable_schema=Boolean +// When this pragma is on, the SQLITE_MASTER tables in which database +// can be changed using ordinary UPDATE, INSERT, and DELETE statements. +// Warning: misuse of this pragma can easily result in a corrupt database file. func (d *SQLiteDriver) Open(dsn string) (driver.Conn, error) { if C.sqlite3_threadsafe() == 0 { return nil, errors.New("sqlite library was not compiled for thread-safe operation") @@ -1895,6 +1908,7 @@ func (s *SQLiteStmt) Close() error { if rv != C.SQLITE_OK { return s.c.lastError() } + s.c = nil runtime.SetFinalizer(s, nil) return nil } @@ -2000,6 +2014,7 @@ func (s *SQLiteStmt) query(ctx context.Context, args []driver.NamedValue) (drive closed: false, ctx: ctx, } + runtime.SetFinalizer(rows, (*SQLiteRows).Close) return rows, nil } @@ -2045,6 +2060,7 @@ func (s *SQLiteStmt) exec(ctx context.Context, args []driver.NamedValue) (driver err error } resultCh := make(chan result) + defer close(resultCh) go func() { r, err := s.execSync(args) resultCh <- result{r, err} @@ -2111,6 +2127,8 @@ func (rc *SQLiteRows) Close() error { return rc.s.c.lastError() } rc.s.mu.Unlock() + rc.s = nil + runtime.SetFinalizer(rc, nil) return nil } @@ -2157,6 +2175,7 @@ func (rc *SQLiteRows) Next(dest []driver.Value) error { return rc.nextSyncLocked(dest) } resultCh := make(chan error) + defer close(resultCh) go func() { resultCh <- rc.nextSyncLocked(dest) }() diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_func_crypt.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_func_crypt.go index afd93333..bd9a3bc0 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_func_crypt.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_func_crypt.go @@ -50,15 +50,15 @@ import ( // perhaps using a cryptographic hash function like SHA1. // CryptEncoderSHA1 encodes a password with SHA1 -func CryptEncoderSHA1(pass []byte, hash interface{}) []byte { +func CryptEncoderSHA1(pass []byte, hash any) []byte { h := sha1.Sum(pass) return h[:] } // CryptEncoderSSHA1 encodes a password with SHA1 with the // configured salt. -func CryptEncoderSSHA1(salt string) func(pass []byte, hash interface{}) []byte { - return func(pass []byte, hash interface{}) []byte { +func CryptEncoderSSHA1(salt string) func(pass []byte, hash any) []byte { + return func(pass []byte, hash any) []byte { s := []byte(salt) p := append(pass, s...) h := sha1.Sum(p) @@ -67,15 +67,15 @@ func CryptEncoderSSHA1(salt string) func(pass []byte, hash interface{}) []byte { } // CryptEncoderSHA256 encodes a password with SHA256 -func CryptEncoderSHA256(pass []byte, hash interface{}) []byte { +func CryptEncoderSHA256(pass []byte, hash any) []byte { h := sha256.Sum256(pass) return h[:] } // CryptEncoderSSHA256 encodes a password with SHA256 // with the configured salt -func CryptEncoderSSHA256(salt string) func(pass []byte, hash interface{}) []byte { - return func(pass []byte, hash interface{}) []byte { +func CryptEncoderSSHA256(salt string) func(pass []byte, hash any) []byte { + return func(pass []byte, hash any) []byte { s := []byte(salt) p := append(pass, s...) h := sha256.Sum256(p) @@ -84,15 +84,15 @@ func CryptEncoderSSHA256(salt string) func(pass []byte, hash interface{}) []byte } // CryptEncoderSHA384 encodes a password with SHA384 -func CryptEncoderSHA384(pass []byte, hash interface{}) []byte { +func CryptEncoderSHA384(pass []byte, hash any) []byte { h := sha512.Sum384(pass) return h[:] } // CryptEncoderSSHA384 encodes a password with SHA384 // with the configured salt -func CryptEncoderSSHA384(salt string) func(pass []byte, hash interface{}) []byte { - return func(pass []byte, hash interface{}) []byte { +func CryptEncoderSSHA384(salt string) func(pass []byte, hash any) []byte { + return func(pass []byte, hash any) []byte { s := []byte(salt) p := append(pass, s...) h := sha512.Sum384(p) @@ -101,15 +101,15 @@ func CryptEncoderSSHA384(salt string) func(pass []byte, hash interface{}) []byte } // CryptEncoderSHA512 encodes a password with SHA512 -func CryptEncoderSHA512(pass []byte, hash interface{}) []byte { +func CryptEncoderSHA512(pass []byte, hash any) []byte { h := sha512.Sum512(pass) return h[:] } // CryptEncoderSSHA512 encodes a password with SHA512 // with the configured salt -func CryptEncoderSSHA512(salt string) func(pass []byte, hash interface{}) []byte { - return func(pass []byte, hash interface{}) []byte { +func CryptEncoderSSHA512(salt string) func(pass []byte, hash any) []byte { + return func(pass []byte, hash any) []byte { s := []byte(salt) p := append(pass, s...) h := sha512.Sum512(p) diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_go18.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_go18.go index 514fd7ec..34cad08e 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_go18.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_go18.go @@ -3,8 +3,8 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. -// +build cgo -// +build go1.8 +//go:build cgo && go1.8 +// +build cgo,go1.8 package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_libsqlite3.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_libsqlite3.go index ac609c95..95cc7c0b 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_libsqlite3.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_libsqlite3.go @@ -3,6 +3,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build libsqlite3 // +build libsqlite3 package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_load_extension.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_load_extension.go index 9433fea8..03cbc8b6 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_load_extension.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_load_extension.go @@ -3,6 +3,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build !sqlite_omit_load_extension // +build !sqlite_omit_load_extension package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_load_extension_omit.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_load_extension_omit.go index 8c75f9bd..d4f8ce65 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_load_extension_omit.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_load_extension_omit.go @@ -3,6 +3,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build sqlite_omit_load_extension // +build sqlite_omit_load_extension package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_allow_uri_authority.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_allow_uri_authority.go index 8c4d4d20..51240cbf 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_allow_uri_authority.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_allow_uri_authority.go @@ -4,6 +4,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build sqlite_allow_uri_authority // +build sqlite_allow_uri_authority package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_app_armor.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_app_armor.go index 63c80cfe..565dbc29 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_app_armor.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_app_armor.go @@ -4,8 +4,8 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. -// +build !windows -// +build sqlite_app_armor +//go:build !windows && sqlite_app_armor +// +build !windows,sqlite_app_armor package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_column_metadata.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_column_metadata.go index c67fa82b..63659b46 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_column_metadata.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_column_metadata.go @@ -1,3 +1,4 @@ +//go:build sqlite_column_metadata // +build sqlite_column_metadata package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_foreign_keys.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_foreign_keys.go index a676e097..82c944e1 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_foreign_keys.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_foreign_keys.go @@ -4,6 +4,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build sqlite_foreign_keys // +build sqlite_foreign_keys package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_fts5.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_fts5.go index 0f38df75..2645f284 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_fts5.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_fts5.go @@ -3,6 +3,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build sqlite_fts5 || fts5 // +build sqlite_fts5 fts5 package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_icu.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_icu.go index f82bdd0d..2d47827b 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_icu.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_icu.go @@ -3,6 +3,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build sqlite_icu || icu // +build sqlite_icu icu package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_introspect.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_introspect.go index 6512b2b3..cd2e5401 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_introspect.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_introspect.go @@ -4,6 +4,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build sqlite_introspect // +build sqlite_introspect package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_math_functions.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_math_functions.go index 7cd68d3f..bd62d9a2 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_math_functions.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_math_functions.go @@ -3,6 +3,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build sqlite_math_functions // +build sqlite_math_functions package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_preupdate.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_preupdate.go index cea032e3..ed725eeb 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_preupdate.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_preupdate.go @@ -4,6 +4,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build cgo // +build cgo package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_preupdate_hook.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_preupdate_hook.go index b43e4821..8cce278f 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_preupdate_hook.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_preupdate_hook.go @@ -4,6 +4,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build sqlite_preupdate_hook // +build sqlite_preupdate_hook package sqlite3 @@ -54,10 +55,10 @@ func (d *SQLitePreUpdateData) Count() int { return int(C.sqlite3_preupdate_count(d.Conn.db)) } -func (d *SQLitePreUpdateData) row(dest []interface{}, new bool) error { +func (d *SQLitePreUpdateData) row(dest []any, new bool) error { for i := 0; i < d.Count() && i < len(dest); i++ { var val *C.sqlite3_value - var src interface{} + var src any // Initially I tried making this just a function pointer argument, but // it's absurdly complicated to pass C function pointers. @@ -95,7 +96,7 @@ func (d *SQLitePreUpdateData) row(dest []interface{}, new bool) error { // Old populates dest with the row data to be replaced. This works similar to // database/sql's Rows.Scan() -func (d *SQLitePreUpdateData) Old(dest ...interface{}) error { +func (d *SQLitePreUpdateData) Old(dest ...any) error { if d.Op == SQLITE_INSERT { return errors.New("There is no old row for INSERT operations") } @@ -104,7 +105,7 @@ func (d *SQLitePreUpdateData) Old(dest ...interface{}) error { // New populates dest with the replacement row data. This works similar to // database/sql's Rows.Scan() -func (d *SQLitePreUpdateData) New(dest ...interface{}) error { +func (d *SQLitePreUpdateData) New(dest ...any) error { if d.Op == SQLITE_DELETE { return errors.New("There is no new row for DELETE operations") } diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_preupdate_omit.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_preupdate_omit.go index c510a15b..f60da6c1 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_preupdate_omit.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_preupdate_omit.go @@ -4,6 +4,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build !sqlite_preupdate_hook && cgo // +build !sqlite_preupdate_hook,cgo package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_secure_delete.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_secure_delete.go index 934fa6b8..6bb05b84 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_secure_delete.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_secure_delete.go @@ -4,6 +4,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build sqlite_secure_delete // +build sqlite_secure_delete package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_secure_delete_fast.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_secure_delete_fast.go index b0de130f..982020ae 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_secure_delete_fast.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_secure_delete_fast.go @@ -4,6 +4,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build sqlite_secure_delete_fast // +build sqlite_secure_delete_fast package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_serialize.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_serialize.go index 2560c43a..f1710c1c 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_serialize.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_serialize.go @@ -1,3 +1,4 @@ +//go:build !libsqlite3 || sqlite_serialize // +build !libsqlite3 sqlite_serialize package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_serialize_omit.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_serialize_omit.go index b154dd34..d00ead0b 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_serialize_omit.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_serialize_omit.go @@ -1,3 +1,4 @@ +//go:build libsqlite3 && !sqlite_serialize // +build libsqlite3,!sqlite_serialize package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_stat4.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_stat4.go index d4d30f0d..799fbb0f 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_stat4.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_stat4.go @@ -4,6 +4,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build sqlite_stat4 // +build sqlite_stat4 package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.go index adfa26c5..76f7bbfb 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.go @@ -3,8 +3,8 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. -// +build cgo -// +build sqlite_unlock_notify +//go:build cgo && sqlite_unlock_notify +// +build cgo,sqlite_unlock_notify package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth.go index b62b6084..de9630c2 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth.go @@ -3,6 +3,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build sqlite_userauth // +build sqlite_userauth package sqlite3 @@ -79,7 +80,7 @@ var ( // If a database contains the SQLITE_USER table, then the // call to Authenticate must be invoked with an // appropriate username and password prior to enable read and write -//access to the database. +// access to the database. // // Return SQLITE_OK on success or SQLITE_ERROR if the username/password // combination is incorrect or unknown. @@ -103,9 +104,10 @@ func (c *SQLiteConn) Authenticate(username, password string) error { // It is however exported for usage within SQL by the user. // // Returns: -// C.SQLITE_OK (0) -// C.SQLITE_ERROR (1) -// C.SQLITE_AUTH (23) +// +// C.SQLITE_OK (0) +// C.SQLITE_ERROR (1) +// C.SQLITE_AUTH (23) func (c *SQLiteConn) authenticate(username, password string) int { // Allocate C Variables cuser := C.CString(username) @@ -155,9 +157,10 @@ func (c *SQLiteConn) AuthUserAdd(username, password string, admin bool) error { // It is however exported for usage within SQL by the user. // // Returns: -// C.SQLITE_OK (0) -// C.SQLITE_ERROR (1) -// C.SQLITE_AUTH (23) +// +// C.SQLITE_OK (0) +// C.SQLITE_ERROR (1) +// C.SQLITE_AUTH (23) func (c *SQLiteConn) authUserAdd(username, password string, admin int) int { // Allocate C Variables cuser := C.CString(username) @@ -207,9 +210,10 @@ func (c *SQLiteConn) AuthUserChange(username, password string, admin bool) error // It is however exported for usage within SQL by the user. // // Returns: -// C.SQLITE_OK (0) -// C.SQLITE_ERROR (1) -// C.SQLITE_AUTH (23) +// +// C.SQLITE_OK (0) +// C.SQLITE_ERROR (1) +// C.SQLITE_AUTH (23) func (c *SQLiteConn) authUserChange(username, password string, admin int) int { // Allocate C Variables cuser := C.CString(username) @@ -249,9 +253,10 @@ func (c *SQLiteConn) AuthUserDelete(username string) error { // It is however exported for usage within SQL by the user. // // Returns: -// C.SQLITE_OK (0) -// C.SQLITE_ERROR (1) -// C.SQLITE_AUTH (23) +// +// C.SQLITE_OK (0) +// C.SQLITE_ERROR (1) +// C.SQLITE_AUTH (23) func (c *SQLiteConn) authUserDelete(username string) int { // Allocate C Variables cuser := C.CString(username) @@ -280,8 +285,9 @@ func (c *SQLiteConn) AuthEnabled() (exists bool) { // It is however exported for usage within SQL by the user. // // Returns: -// 0 - Disabled -// 1 - Enabled +// +// 0 - Disabled +// 1 - Enabled func (c *SQLiteConn) authEnabled() int { return int(C._sqlite3_auth_enabled(c.db)) } diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth_omit.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth_omit.go index 302cd57a..15370df7 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth_omit.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth_omit.go @@ -3,6 +3,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build !sqlite_userauth // +build !sqlite_userauth package sqlite3 @@ -17,7 +18,7 @@ import ( // If a database contains the SQLITE_USER table, then the // call to Authenticate must be invoked with an // appropriate username and password prior to enable read and write -//access to the database. +// access to the database. // // Return SQLITE_OK on success or SQLITE_ERROR if the username/password // combination is incorrect or unknown. @@ -34,9 +35,10 @@ func (c *SQLiteConn) Authenticate(username, password string) error { // It is however exported for usage within SQL by the user. // // Returns: -// C.SQLITE_OK (0) -// C.SQLITE_ERROR (1) -// C.SQLITE_AUTH (23) +// +// C.SQLITE_OK (0) +// C.SQLITE_ERROR (1) +// C.SQLITE_AUTH (23) func (c *SQLiteConn) authenticate(username, password string) int { // NOOP return 0 @@ -65,9 +67,10 @@ func (c *SQLiteConn) AuthUserAdd(username, password string, admin bool) error { // It is however exported for usage within SQL by the user. // // Returns: -// C.SQLITE_OK (0) -// C.SQLITE_ERROR (1) -// C.SQLITE_AUTH (23) +// +// C.SQLITE_OK (0) +// C.SQLITE_ERROR (1) +// C.SQLITE_AUTH (23) func (c *SQLiteConn) authUserAdd(username, password string, admin int) int { // NOOP return 0 @@ -96,9 +99,10 @@ func (c *SQLiteConn) AuthUserChange(username, password string, admin bool) error // It is however exported for usage within SQL by the user. // // Returns: -// C.SQLITE_OK (0) -// C.SQLITE_ERROR (1) -// C.SQLITE_AUTH (23) +// +// C.SQLITE_OK (0) +// C.SQLITE_ERROR (1) +// C.SQLITE_AUTH (23) func (c *SQLiteConn) authUserChange(username, password string, admin int) int { // NOOP return 0 @@ -122,9 +126,10 @@ func (c *SQLiteConn) AuthUserDelete(username string) error { // It is however exported for usage within SQL by the user. // // Returns: -// C.SQLITE_OK (0) -// C.SQLITE_ERROR (1) -// C.SQLITE_AUTH (23) +// +// C.SQLITE_OK (0) +// C.SQLITE_ERROR (1) +// C.SQLITE_AUTH (23) func (c *SQLiteConn) authUserDelete(username string) int { // NOOP return 0 @@ -142,8 +147,9 @@ func (c *SQLiteConn) AuthEnabled() (exists bool) { // It is however exported for usage within SQL by the user. // // Returns: -// 0 - Disabled -// 1 - Enabled +// +// 0 - Disabled +// 1 - Enabled func (c *SQLiteConn) authEnabled() int { // NOOP return 0 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_vacuum_full.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_vacuum_full.go index 5185a96d..df13c9d2 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_vacuum_full.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_vacuum_full.go @@ -4,6 +4,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build sqlite_vacuum_full // +build sqlite_vacuum_full package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_vacuum_incr.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_vacuum_incr.go index a9d8a185..a2e48814 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_vacuum_incr.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_vacuum_incr.go @@ -4,6 +4,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build sqlite_vacuum_incr // +build sqlite_vacuum_incr package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_vtable.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_vtable.go index 4a93c465..9b164b3e 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_vtable.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_vtable.go @@ -3,6 +3,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build sqlite_vtable || vtable // +build sqlite_vtable vtable package sqlite3 @@ -516,7 +517,7 @@ func goMDestroy(pClientData unsafe.Pointer) { func goVFilter(pCursor unsafe.Pointer, idxNum C.int, idxName *C.char, argc C.int, argv **C.sqlite3_value) *C.char { vtc := lookupHandle(pCursor).(*sqliteVTabCursor) args := (*[(math.MaxInt32 - 1) / unsafe.Sizeof((*C.sqlite3_value)(nil))]*C.sqlite3_value)(unsafe.Pointer(argv))[:argc:argc] - vals := make([]interface{}, 0, argc) + vals := make([]any, 0, argc) for _, v := range args { conv, err := callbackArgGeneric(v) if err != nil { @@ -588,7 +589,7 @@ func goVUpdate(pVTab unsafe.Pointer, argc C.int, argv **C.sqlite3_value, pRowid if v, ok := vt.vTab.(VTabUpdater); ok { // convert argv args := (*[(math.MaxInt32 - 1) / unsafe.Sizeof((*C.sqlite3_value)(nil))]*C.sqlite3_value)(unsafe.Pointer(argv))[:argc:argc] - vals := make([]interface{}, 0, argc) + vals := make([]any, 0, argc) for _, v := range args { conv, err := callbackArgGeneric(v) if err != nil { @@ -662,9 +663,9 @@ type VTab interface { // deleted. // See: https://sqlite.org/vtab.html#xupdate type VTabUpdater interface { - Delete(interface{}) error - Insert(interface{}, []interface{}) (int64, error) - Update(interface{}, []interface{}) error + Delete(any) error + Insert(any, []any) (int64, error) + Update(any, []any) error } // VTabCursor describes cursors that point into the virtual table and are used @@ -673,7 +674,7 @@ type VTabCursor interface { // http://sqlite.org/vtab.html#xclose Close() error // http://sqlite.org/vtab.html#xfilter - Filter(idxNum int, idxStr string, vals []interface{}) error + Filter(idxNum int, idxStr string, vals []any) error // http://sqlite.org/vtab.html#xnext Next() error // http://sqlite.org/vtab.html#xeof diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_other.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_other.go index 077d3c64..1f9a7550 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_other.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_other.go @@ -3,6 +3,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build !windows // +build !windows package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_solaris.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_solaris.go index 102f90c9..fb4d3251 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_solaris.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_solaris.go @@ -3,6 +3,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build solaris // +build solaris package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_trace.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_trace.go index 56bb9149..6c47cce1 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_trace.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_trace.go @@ -3,6 +3,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build sqlite_trace || trace // +build sqlite_trace trace package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_type.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_type.go index 0fd8210b..20537a09 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_type.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_type.go @@ -74,7 +74,7 @@ func scanType(cdt string) reflect.Type { case SQLITE_TIME: return reflect.TypeOf(sql.NullTime{}) } - return reflect.TypeOf(new(interface{})) + return reflect.TypeOf(new(any)) } func databaseTypeConvSqlite(t string) int { diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_usleep_windows.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_usleep_windows.go index b6739bf6..6527f6fd 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_usleep_windows.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_usleep_windows.go @@ -3,6 +3,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build cgo // +build cgo package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_windows.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_windows.go index 81aa2abd..f863bcd3 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_windows.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_windows.go @@ -3,6 +3,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build windows // +build windows package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/static_mock.go b/vendor/github.com/mattn/go-sqlite3/static_mock.go index f19e842f..d2c5a276 100644 --- a/vendor/github.com/mattn/go-sqlite3/static_mock.go +++ b/vendor/github.com/mattn/go-sqlite3/static_mock.go @@ -3,6 +3,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build !cgo // +build !cgo package sqlite3 @@ -28,10 +29,10 @@ type ( ) func (SQLiteDriver) Open(s string) (driver.Conn, error) { return nil, errorMsg } -func (c *SQLiteConn) RegisterAggregator(string, interface{}, bool) error { return errorMsg } +func (c *SQLiteConn) RegisterAggregator(string, any, bool) error { return errorMsg } func (c *SQLiteConn) RegisterAuthorizer(func(int, string, string, string) int) {} func (c *SQLiteConn) RegisterCollation(string, func(string, string) int) error { return errorMsg } func (c *SQLiteConn) RegisterCommitHook(func() int) {} -func (c *SQLiteConn) RegisterFunc(string, interface{}, bool) error { return errorMsg } +func (c *SQLiteConn) RegisterFunc(string, any, bool) error { return errorMsg } func (c *SQLiteConn) RegisterRollbackHook(func()) {} func (c *SQLiteConn) RegisterUpdateHook(func(int, string, string, int64)) {} diff --git a/vendor/github.com/moby/sys/user/LICENSE b/vendor/github.com/moby/sys/user/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/moby/sys/user/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/moby/sys/user/lookup_unix.go b/vendor/github.com/moby/sys/user/lookup_unix.go new file mode 100644 index 00000000..f95c1409 --- /dev/null +++ b/vendor/github.com/moby/sys/user/lookup_unix.go @@ -0,0 +1,157 @@ +//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package user + +import ( + "io" + "os" + "strconv" + + "golang.org/x/sys/unix" +) + +// Unix-specific path to the passwd and group formatted files. +const ( + unixPasswdPath = "/etc/passwd" + unixGroupPath = "/etc/group" +) + +// LookupUser looks up a user by their username in /etc/passwd. If the user +// cannot be found (or there is no /etc/passwd file on the filesystem), then +// LookupUser returns an error. +func LookupUser(username string) (User, error) { + return lookupUserFunc(func(u User) bool { + return u.Name == username + }) +} + +// LookupUid looks up a user by their user id in /etc/passwd. If the user cannot +// be found (or there is no /etc/passwd file on the filesystem), then LookupId +// returns an error. +func LookupUid(uid int) (User, error) { + return lookupUserFunc(func(u User) bool { + return u.Uid == uid + }) +} + +func lookupUserFunc(filter func(u User) bool) (User, error) { + // Get operating system-specific passwd reader-closer. + passwd, err := GetPasswd() + if err != nil { + return User{}, err + } + defer passwd.Close() + + // Get the users. + users, err := ParsePasswdFilter(passwd, filter) + if err != nil { + return User{}, err + } + + // No user entries found. + if len(users) == 0 { + return User{}, ErrNoPasswdEntries + } + + // Assume the first entry is the "correct" one. + return users[0], nil +} + +// LookupGroup looks up a group by its name in /etc/group. If the group cannot +// be found (or there is no /etc/group file on the filesystem), then LookupGroup +// returns an error. +func LookupGroup(groupname string) (Group, error) { + return lookupGroupFunc(func(g Group) bool { + return g.Name == groupname + }) +} + +// LookupGid looks up a group by its group id in /etc/group. If the group cannot +// be found (or there is no /etc/group file on the filesystem), then LookupGid +// returns an error. +func LookupGid(gid int) (Group, error) { + return lookupGroupFunc(func(g Group) bool { + return g.Gid == gid + }) +} + +func lookupGroupFunc(filter func(g Group) bool) (Group, error) { + // Get operating system-specific group reader-closer. + group, err := GetGroup() + if err != nil { + return Group{}, err + } + defer group.Close() + + // Get the users. + groups, err := ParseGroupFilter(group, filter) + if err != nil { + return Group{}, err + } + + // No user entries found. + if len(groups) == 0 { + return Group{}, ErrNoGroupEntries + } + + // Assume the first entry is the "correct" one. + return groups[0], nil +} + +func GetPasswdPath() (string, error) { + return unixPasswdPath, nil +} + +func GetPasswd() (io.ReadCloser, error) { + return os.Open(unixPasswdPath) +} + +func GetGroupPath() (string, error) { + return unixGroupPath, nil +} + +func GetGroup() (io.ReadCloser, error) { + return os.Open(unixGroupPath) +} + +// CurrentUser looks up the current user by their user id in /etc/passwd. If the +// user cannot be found (or there is no /etc/passwd file on the filesystem), +// then CurrentUser returns an error. +func CurrentUser() (User, error) { + return LookupUid(unix.Getuid()) +} + +// CurrentGroup looks up the current user's group by their primary group id's +// entry in /etc/passwd. If the group cannot be found (or there is no +// /etc/group file on the filesystem), then CurrentGroup returns an error. +func CurrentGroup() (Group, error) { + return LookupGid(unix.Getgid()) +} + +func currentUserSubIDs(fileName string) ([]SubID, error) { + u, err := CurrentUser() + if err != nil { + return nil, err + } + filter := func(entry SubID) bool { + return entry.Name == u.Name || entry.Name == strconv.Itoa(u.Uid) + } + return ParseSubIDFileFilter(fileName, filter) +} + +func CurrentUserSubUIDs() ([]SubID, error) { + return currentUserSubIDs("/etc/subuid") +} + +func CurrentUserSubGIDs() ([]SubID, error) { + return currentUserSubIDs("/etc/subgid") +} + +func CurrentProcessUIDMap() ([]IDMap, error) { + return ParseIDMapFile("/proc/self/uid_map") +} + +func CurrentProcessGIDMap() ([]IDMap, error) { + return ParseIDMapFile("/proc/self/gid_map") +} diff --git a/vendor/github.com/moby/sys/user/user.go b/vendor/github.com/moby/sys/user/user.go new file mode 100644 index 00000000..984466d1 --- /dev/null +++ b/vendor/github.com/moby/sys/user/user.go @@ -0,0 +1,605 @@ +package user + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +const ( + minID = 0 + maxID = 1<<31 - 1 // for 32-bit systems compatibility +) + +var ( + // ErrNoPasswdEntries is returned if no matching entries were found in /etc/group. + ErrNoPasswdEntries = errors.New("no matching entries in passwd file") + // ErrNoGroupEntries is returned if no matching entries were found in /etc/passwd. + ErrNoGroupEntries = errors.New("no matching entries in group file") + // ErrRange is returned if a UID or GID is outside of the valid range. + ErrRange = fmt.Errorf("uids and gids must be in range %d-%d", minID, maxID) +) + +type User struct { + Name string + Pass string + Uid int + Gid int + Gecos string + Home string + Shell string +} + +type Group struct { + Name string + Pass string + Gid int + List []string +} + +// SubID represents an entry in /etc/sub{u,g}id +type SubID struct { + Name string + SubID int64 + Count int64 +} + +// IDMap represents an entry in /proc/PID/{u,g}id_map +type IDMap struct { + ID int64 + ParentID int64 + Count int64 +} + +func parseLine(line []byte, v ...interface{}) { + parseParts(bytes.Split(line, []byte(":")), v...) +} + +func parseParts(parts [][]byte, v ...interface{}) { + if len(parts) == 0 { + return + } + + for i, p := range parts { + // Ignore cases where we don't have enough fields to populate the arguments. + // Some configuration files like to misbehave. + if len(v) <= i { + break + } + + // Use the type of the argument to figure out how to parse it, scanf() style. + // This is legit. + switch e := v[i].(type) { + case *string: + *e = string(p) + case *int: + // "numbers", with conversion errors ignored because of some misbehaving configuration files. + *e, _ = strconv.Atoi(string(p)) + case *int64: + *e, _ = strconv.ParseInt(string(p), 10, 64) + case *[]string: + // Comma-separated lists. + if len(p) != 0 { + *e = strings.Split(string(p), ",") + } else { + *e = []string{} + } + default: + // Someone goof'd when writing code using this function. Scream so they can hear us. + panic(fmt.Sprintf("parseLine only accepts {*string, *int, *int64, *[]string} as arguments! %#v is not a pointer!", e)) + } + } +} + +func ParsePasswdFile(path string) ([]User, error) { + passwd, err := os.Open(path) + if err != nil { + return nil, err + } + defer passwd.Close() + return ParsePasswd(passwd) +} + +func ParsePasswd(passwd io.Reader) ([]User, error) { + return ParsePasswdFilter(passwd, nil) +} + +func ParsePasswdFileFilter(path string, filter func(User) bool) ([]User, error) { + passwd, err := os.Open(path) + if err != nil { + return nil, err + } + defer passwd.Close() + return ParsePasswdFilter(passwd, filter) +} + +func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) { + if r == nil { + return nil, errors.New("nil source for passwd-formatted data") + } + + var ( + s = bufio.NewScanner(r) + out = []User{} + ) + + for s.Scan() { + line := bytes.TrimSpace(s.Bytes()) + if len(line) == 0 { + continue + } + + // see: man 5 passwd + // name:password:UID:GID:GECOS:directory:shell + // Name:Pass:Uid:Gid:Gecos:Home:Shell + // root:x:0:0:root:/root:/bin/bash + // adm:x:3:4:adm:/var/adm:/bin/false + p := User{} + parseLine(line, &p.Name, &p.Pass, &p.Uid, &p.Gid, &p.Gecos, &p.Home, &p.Shell) + + if filter == nil || filter(p) { + out = append(out, p) + } + } + if err := s.Err(); err != nil { + return nil, err + } + + return out, nil +} + +func ParseGroupFile(path string) ([]Group, error) { + group, err := os.Open(path) + if err != nil { + return nil, err + } + + defer group.Close() + return ParseGroup(group) +} + +func ParseGroup(group io.Reader) ([]Group, error) { + return ParseGroupFilter(group, nil) +} + +func ParseGroupFileFilter(path string, filter func(Group) bool) ([]Group, error) { + group, err := os.Open(path) + if err != nil { + return nil, err + } + defer group.Close() + return ParseGroupFilter(group, filter) +} + +func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) { + if r == nil { + return nil, errors.New("nil source for group-formatted data") + } + rd := bufio.NewReader(r) + out := []Group{} + + // Read the file line-by-line. + for { + var ( + isPrefix bool + wholeLine []byte + err error + ) + + // Read the next line. We do so in chunks (as much as reader's + // buffer is able to keep), check if we read enough columns + // already on each step and store final result in wholeLine. + for { + var line []byte + line, isPrefix, err = rd.ReadLine() + + if err != nil { + // We should return no error if EOF is reached + // without a match. + if err == io.EOF { + err = nil + } + return out, err + } + + // Simple common case: line is short enough to fit in a + // single reader's buffer. + if !isPrefix && len(wholeLine) == 0 { + wholeLine = line + break + } + + wholeLine = append(wholeLine, line...) + + // Check if we read the whole line already. + if !isPrefix { + break + } + } + + // There's no spec for /etc/passwd or /etc/group, but we try to follow + // the same rules as the glibc parser, which allows comments and blank + // space at the beginning of a line. + wholeLine = bytes.TrimSpace(wholeLine) + if len(wholeLine) == 0 || wholeLine[0] == '#' { + continue + } + + // see: man 5 group + // group_name:password:GID:user_list + // Name:Pass:Gid:List + // root:x:0:root + // adm:x:4:root,adm,daemon + p := Group{} + parseLine(wholeLine, &p.Name, &p.Pass, &p.Gid, &p.List) + + if filter == nil || filter(p) { + out = append(out, p) + } + } +} + +type ExecUser struct { + Uid int + Gid int + Sgids []int + Home string +} + +// GetExecUserPath is a wrapper for GetExecUser. It reads data from each of the +// given file paths and uses that data as the arguments to GetExecUser. If the +// files cannot be opened for any reason, the error is ignored and a nil +// io.Reader is passed instead. +func GetExecUserPath(userSpec string, defaults *ExecUser, passwdPath, groupPath string) (*ExecUser, error) { + var passwd, group io.Reader + + if passwdFile, err := os.Open(passwdPath); err == nil { + passwd = passwdFile + defer passwdFile.Close() + } + + if groupFile, err := os.Open(groupPath); err == nil { + group = groupFile + defer groupFile.Close() + } + + return GetExecUser(userSpec, defaults, passwd, group) +} + +// GetExecUser parses a user specification string (using the passwd and group +// readers as sources for /etc/passwd and /etc/group data, respectively). In +// the case of blank fields or missing data from the sources, the values in +// defaults is used. +// +// GetExecUser will return an error if a user or group literal could not be +// found in any entry in passwd and group respectively. +// +// Examples of valid user specifications are: +// - "" +// - "user" +// - "uid" +// - "user:group" +// - "uid:gid +// - "user:gid" +// - "uid:group" +// +// It should be noted that if you specify a numeric user or group id, they will +// not be evaluated as usernames (only the metadata will be filled). So attempting +// to parse a user with user.Name = "1337" will produce the user with a UID of +// 1337. +func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (*ExecUser, error) { + if defaults == nil { + defaults = new(ExecUser) + } + + // Copy over defaults. + user := &ExecUser{ + Uid: defaults.Uid, + Gid: defaults.Gid, + Sgids: defaults.Sgids, + Home: defaults.Home, + } + + // Sgids slice *cannot* be nil. + if user.Sgids == nil { + user.Sgids = []int{} + } + + // Allow for userArg to have either "user" syntax, or optionally "user:group" syntax + var userArg, groupArg string + parseLine([]byte(userSpec), &userArg, &groupArg) + + // Convert userArg and groupArg to be numeric, so we don't have to execute + // Atoi *twice* for each iteration over lines. + uidArg, uidErr := strconv.Atoi(userArg) + gidArg, gidErr := strconv.Atoi(groupArg) + + // Find the matching user. + users, err := ParsePasswdFilter(passwd, func(u User) bool { + if userArg == "" { + // Default to current state of the user. + return u.Uid == user.Uid + } + + if uidErr == nil { + // If the userArg is numeric, always treat it as a UID. + return uidArg == u.Uid + } + + return u.Name == userArg + }) + + // If we can't find the user, we have to bail. + if err != nil && passwd != nil { + if userArg == "" { + userArg = strconv.Itoa(user.Uid) + } + return nil, fmt.Errorf("unable to find user %s: %w", userArg, err) + } + + var matchedUserName string + if len(users) > 0 { + // First match wins, even if there's more than one matching entry. + matchedUserName = users[0].Name + user.Uid = users[0].Uid + user.Gid = users[0].Gid + user.Home = users[0].Home + } else if userArg != "" { + // If we can't find a user with the given username, the only other valid + // option is if it's a numeric username with no associated entry in passwd. + + if uidErr != nil { + // Not numeric. + return nil, fmt.Errorf("unable to find user %s: %w", userArg, ErrNoPasswdEntries) + } + user.Uid = uidArg + + // Must be inside valid uid range. + if user.Uid < minID || user.Uid > maxID { + return nil, ErrRange + } + + // Okay, so it's numeric. We can just roll with this. + } + + // On to the groups. If we matched a username, we need to do this because of + // the supplementary group IDs. + if groupArg != "" || matchedUserName != "" { + groups, err := ParseGroupFilter(group, func(g Group) bool { + // If the group argument isn't explicit, we'll just search for it. + if groupArg == "" { + // Check if user is a member of this group. + for _, u := range g.List { + if u == matchedUserName { + return true + } + } + return false + } + + if gidErr == nil { + // If the groupArg is numeric, always treat it as a GID. + return gidArg == g.Gid + } + + return g.Name == groupArg + }) + if err != nil && group != nil { + return nil, fmt.Errorf("unable to find groups for spec %v: %w", matchedUserName, err) + } + + // Only start modifying user.Gid if it is in explicit form. + if groupArg != "" { + if len(groups) > 0 { + // First match wins, even if there's more than one matching entry. + user.Gid = groups[0].Gid + } else { + // If we can't find a group with the given name, the only other valid + // option is if it's a numeric group name with no associated entry in group. + + if gidErr != nil { + // Not numeric. + return nil, fmt.Errorf("unable to find group %s: %w", groupArg, ErrNoGroupEntries) + } + user.Gid = gidArg + + // Must be inside valid gid range. + if user.Gid < minID || user.Gid > maxID { + return nil, ErrRange + } + + // Okay, so it's numeric. We can just roll with this. + } + } else if len(groups) > 0 { + // Supplementary group ids only make sense if in the implicit form. + user.Sgids = make([]int, len(groups)) + for i, group := range groups { + user.Sgids[i] = group.Gid + } + } + } + + return user, nil +} + +// GetAdditionalGroups looks up a list of groups by name or group id +// against the given /etc/group formatted data. If a group name cannot +// be found, an error will be returned. If a group id cannot be found, +// or the given group data is nil, the id will be returned as-is +// provided it is in the legal range. +func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, error) { + groups := []Group{} + if group != nil { + var err error + groups, err = ParseGroupFilter(group, func(g Group) bool { + for _, ag := range additionalGroups { + if g.Name == ag || strconv.Itoa(g.Gid) == ag { + return true + } + } + return false + }) + if err != nil { + return nil, fmt.Errorf("Unable to find additional groups %v: %w", additionalGroups, err) + } + } + + gidMap := make(map[int]struct{}) + for _, ag := range additionalGroups { + var found bool + for _, g := range groups { + // if we found a matched group either by name or gid, take the + // first matched as correct + if g.Name == ag || strconv.Itoa(g.Gid) == ag { + if _, ok := gidMap[g.Gid]; !ok { + gidMap[g.Gid] = struct{}{} + found = true + break + } + } + } + // we asked for a group but didn't find it. let's check to see + // if we wanted a numeric group + if !found { + gid, err := strconv.ParseInt(ag, 10, 64) + if err != nil { + // Not a numeric ID either. + return nil, fmt.Errorf("Unable to find group %s: %w", ag, ErrNoGroupEntries) + } + // Ensure gid is inside gid range. + if gid < minID || gid > maxID { + return nil, ErrRange + } + gidMap[int(gid)] = struct{}{} + } + } + gids := []int{} + for gid := range gidMap { + gids = append(gids, gid) + } + return gids, nil +} + +// GetAdditionalGroupsPath is a wrapper around GetAdditionalGroups +// that opens the groupPath given and gives it as an argument to +// GetAdditionalGroups. +func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int, error) { + var group io.Reader + + if groupFile, err := os.Open(groupPath); err == nil { + group = groupFile + defer groupFile.Close() + } + return GetAdditionalGroups(additionalGroups, group) +} + +func ParseSubIDFile(path string) ([]SubID, error) { + subid, err := os.Open(path) + if err != nil { + return nil, err + } + defer subid.Close() + return ParseSubID(subid) +} + +func ParseSubID(subid io.Reader) ([]SubID, error) { + return ParseSubIDFilter(subid, nil) +} + +func ParseSubIDFileFilter(path string, filter func(SubID) bool) ([]SubID, error) { + subid, err := os.Open(path) + if err != nil { + return nil, err + } + defer subid.Close() + return ParseSubIDFilter(subid, filter) +} + +func ParseSubIDFilter(r io.Reader, filter func(SubID) bool) ([]SubID, error) { + if r == nil { + return nil, errors.New("nil source for subid-formatted data") + } + + var ( + s = bufio.NewScanner(r) + out = []SubID{} + ) + + for s.Scan() { + line := bytes.TrimSpace(s.Bytes()) + if len(line) == 0 { + continue + } + + // see: man 5 subuid + p := SubID{} + parseLine(line, &p.Name, &p.SubID, &p.Count) + + if filter == nil || filter(p) { + out = append(out, p) + } + } + if err := s.Err(); err != nil { + return nil, err + } + + return out, nil +} + +func ParseIDMapFile(path string) ([]IDMap, error) { + r, err := os.Open(path) + if err != nil { + return nil, err + } + defer r.Close() + return ParseIDMap(r) +} + +func ParseIDMap(r io.Reader) ([]IDMap, error) { + return ParseIDMapFilter(r, nil) +} + +func ParseIDMapFileFilter(path string, filter func(IDMap) bool) ([]IDMap, error) { + r, err := os.Open(path) + if err != nil { + return nil, err + } + defer r.Close() + return ParseIDMapFilter(r, filter) +} + +func ParseIDMapFilter(r io.Reader, filter func(IDMap) bool) ([]IDMap, error) { + if r == nil { + return nil, errors.New("nil source for idmap-formatted data") + } + + var ( + s = bufio.NewScanner(r) + out = []IDMap{} + ) + + for s.Scan() { + line := bytes.TrimSpace(s.Bytes()) + if len(line) == 0 { + continue + } + + // see: man 7 user_namespaces + p := IDMap{} + parseParts(bytes.Fields(line), &p.ID, &p.ParentID, &p.Count) + + if filter == nil || filter(p) { + out = append(out, p) + } + } + if err := s.Err(); err != nil { + return nil, err + } + + return out, nil +} diff --git a/vendor/github.com/moby/sys/user/user_fuzzer.go b/vendor/github.com/moby/sys/user/user_fuzzer.go new file mode 100644 index 00000000..e018eae6 --- /dev/null +++ b/vendor/github.com/moby/sys/user/user_fuzzer.go @@ -0,0 +1,43 @@ +//go:build gofuzz +// +build gofuzz + +package user + +import ( + "io" + "strings" +) + +func IsDivisbleBy(n int, divisibleby int) bool { + return (n % divisibleby) == 0 +} + +func FuzzUser(data []byte) int { + if len(data) == 0 { + return -1 + } + if !IsDivisbleBy(len(data), 5) { + return -1 + } + + var divided [][]byte + + chunkSize := len(data) / 5 + + for i := 0; i < len(data); i += chunkSize { + end := i + chunkSize + + divided = append(divided, data[i:end]) + } + + _, _ = ParsePasswdFilter(strings.NewReader(string(divided[0])), nil) + + var passwd, group io.Reader + + group = strings.NewReader(string(divided[1])) + _, _ = GetAdditionalGroups([]string{string(divided[2])}, group) + + passwd = strings.NewReader(string(divided[3])) + _, _ = GetExecUser(string(divided[4]), nil, passwd, group) + return 1 +} diff --git a/vendor/github.com/opencontainers/go-digest/digestset/set.go b/vendor/github.com/opencontainers/go-digest/digestset/set.go deleted file mode 100644 index 71f24184..00000000 --- a/vendor/github.com/opencontainers/go-digest/digestset/set.go +++ /dev/null @@ -1,262 +0,0 @@ -// Copyright 2020, 2020 OCI Contributors -// Copyright 2017 Docker, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package digestset - -import ( - "errors" - "sort" - "strings" - "sync" - - digest "github.com/opencontainers/go-digest" -) - -var ( - // ErrDigestNotFound is used when a matching digest - // could not be found in a set. - ErrDigestNotFound = errors.New("digest not found") - - // ErrDigestAmbiguous is used when multiple digests - // are found in a set. None of the matching digests - // should be considered valid matches. - ErrDigestAmbiguous = errors.New("ambiguous digest string") -) - -// Set is used to hold a unique set of digests which -// may be easily referenced by easily referenced by a string -// representation of the digest as well as short representation. -// The uniqueness of the short representation is based on other -// digests in the set. If digests are omitted from this set, -// collisions in a larger set may not be detected, therefore it -// is important to always do short representation lookups on -// the complete set of digests. To mitigate collisions, an -// appropriately long short code should be used. -type Set struct { - mutex sync.RWMutex - entries digestEntries -} - -// NewSet creates an empty set of digests -// which may have digests added. -func NewSet() *Set { - return &Set{ - entries: digestEntries{}, - } -} - -// checkShortMatch checks whether two digests match as either whole -// values or short values. This function does not test equality, -// rather whether the second value could match against the first -// value. -func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool { - if len(hex) == len(shortHex) { - if hex != shortHex { - return false - } - if len(shortAlg) > 0 && string(alg) != shortAlg { - return false - } - } else if !strings.HasPrefix(hex, shortHex) { - return false - } else if len(shortAlg) > 0 && string(alg) != shortAlg { - return false - } - return true -} - -// Lookup looks for a digest matching the given string representation. -// If no digests could be found ErrDigestNotFound will be returned -// with an empty digest value. If multiple matches are found -// ErrDigestAmbiguous will be returned with an empty digest value. -func (dst *Set) Lookup(d string) (digest.Digest, error) { - dst.mutex.RLock() - defer dst.mutex.RUnlock() - if len(dst.entries) == 0 { - return "", ErrDigestNotFound - } - var ( - searchFunc func(int) bool - alg digest.Algorithm - hex string - ) - dgst, err := digest.Parse(d) - if err == digest.ErrDigestInvalidFormat { - hex = d - searchFunc = func(i int) bool { - return dst.entries[i].val >= d - } - } else { - hex = dgst.Hex() - alg = dgst.Algorithm() - searchFunc = func(i int) bool { - if dst.entries[i].val == hex { - return dst.entries[i].alg >= alg - } - return dst.entries[i].val >= hex - } - } - idx := sort.Search(len(dst.entries), searchFunc) - if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) { - return "", ErrDigestNotFound - } - if dst.entries[idx].alg == alg && dst.entries[idx].val == hex { - return dst.entries[idx].digest, nil - } - if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) { - return "", ErrDigestAmbiguous - } - - return dst.entries[idx].digest, nil -} - -// Add adds the given digest to the set. An error will be returned -// if the given digest is invalid. If the digest already exists in the -// set, this operation will be a no-op. -func (dst *Set) Add(d digest.Digest) error { - if err := d.Validate(); err != nil { - return err - } - dst.mutex.Lock() - defer dst.mutex.Unlock() - entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} - searchFunc := func(i int) bool { - if dst.entries[i].val == entry.val { - return dst.entries[i].alg >= entry.alg - } - return dst.entries[i].val >= entry.val - } - idx := sort.Search(len(dst.entries), searchFunc) - if idx == len(dst.entries) { - dst.entries = append(dst.entries, entry) - return nil - } else if dst.entries[idx].digest == d { - return nil - } - - entries := append(dst.entries, nil) - copy(entries[idx+1:], entries[idx:len(entries)-1]) - entries[idx] = entry - dst.entries = entries - return nil -} - -// Remove removes the given digest from the set. An err will be -// returned if the given digest is invalid. If the digest does -// not exist in the set, this operation will be a no-op. -func (dst *Set) Remove(d digest.Digest) error { - if err := d.Validate(); err != nil { - return err - } - dst.mutex.Lock() - defer dst.mutex.Unlock() - entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} - searchFunc := func(i int) bool { - if dst.entries[i].val == entry.val { - return dst.entries[i].alg >= entry.alg - } - return dst.entries[i].val >= entry.val - } - idx := sort.Search(len(dst.entries), searchFunc) - // Not found if idx is after or value at idx is not digest - if idx == len(dst.entries) || dst.entries[idx].digest != d { - return nil - } - - entries := dst.entries - copy(entries[idx:], entries[idx+1:]) - entries = entries[:len(entries)-1] - dst.entries = entries - - return nil -} - -// All returns all the digests in the set -func (dst *Set) All() []digest.Digest { - dst.mutex.RLock() - defer dst.mutex.RUnlock() - retValues := make([]digest.Digest, len(dst.entries)) - for i := range dst.entries { - retValues[i] = dst.entries[i].digest - } - - return retValues -} - -// ShortCodeTable returns a map of Digest to unique short codes. The -// length represents the minimum value, the maximum length may be the -// entire value of digest if uniqueness cannot be achieved without the -// full value. This function will attempt to make short codes as short -// as possible to be unique. -func ShortCodeTable(dst *Set, length int) map[digest.Digest]string { - dst.mutex.RLock() - defer dst.mutex.RUnlock() - m := make(map[digest.Digest]string, len(dst.entries)) - l := length - resetIdx := 0 - for i := 0; i < len(dst.entries); i++ { - var short string - extended := true - for extended { - extended = false - if len(dst.entries[i].val) <= l { - short = dst.entries[i].digest.String() - } else { - short = dst.entries[i].val[:l] - for j := i + 1; j < len(dst.entries); j++ { - if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) { - if j > resetIdx { - resetIdx = j - } - extended = true - } else { - break - } - } - if extended { - l++ - } - } - } - m[dst.entries[i].digest] = short - if i >= resetIdx { - l = length - } - } - return m -} - -type digestEntry struct { - alg digest.Algorithm - val string - digest digest.Digest -} - -type digestEntries []*digestEntry - -func (d digestEntries) Len() int { - return len(d) -} - -func (d digestEntries) Less(i, j int) bool { - if d[i].val != d[j].val { - return d[i].val < d[j].val - } - return d[i].alg < d[j].alg -} - -func (d digestEntries) Swap(i, j int) { - d[i], d[j] = d[j], d[i] -} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go index 892ba3de..ce8313e7 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go @@ -21,12 +21,20 @@ const ( // MediaTypeLayoutHeader specifies the media type for the oci-layout. MediaTypeLayoutHeader = "application/vnd.oci.layout.header.v1+json" + // MediaTypeImageIndex specifies the media type for an image index. + MediaTypeImageIndex = "application/vnd.oci.image.index.v1+json" + // MediaTypeImageManifest specifies the media type for an image manifest. MediaTypeImageManifest = "application/vnd.oci.image.manifest.v1+json" - // MediaTypeImageIndex specifies the media type for an image index. - MediaTypeImageIndex = "application/vnd.oci.image.index.v1+json" + // MediaTypeImageConfig specifies the media type for the image configuration. + MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json" + + // MediaTypeEmptyJSON specifies the media type for an unused blob containing the value "{}". + MediaTypeEmptyJSON = "application/vnd.oci.empty.v1+json" +) +const ( // MediaTypeImageLayer is the media type used for layers referenced by the manifest. MediaTypeImageLayer = "application/vnd.oci.image.layer.v1.tar" @@ -37,7 +45,15 @@ const ( // MediaTypeImageLayerZstd is the media type used for zstd compressed // layers referenced by the manifest. MediaTypeImageLayerZstd = "application/vnd.oci.image.layer.v1.tar+zstd" +) +// Non-distributable layer media-types. +// +// Deprecated: Non-distributable layers are deprecated, and not recommended +// for future use. Implementations SHOULD NOT produce new non-distributable +// layers. +// https://github.com/opencontainers/image-spec/pull/965 +const ( // MediaTypeImageLayerNonDistributable is the media type for layers referenced by // the manifest but with distribution restrictions. // @@ -66,10 +82,4 @@ const ( // layers. // https://github.com/opencontainers/image-spec/pull/965 MediaTypeImageLayerNonDistributableZstd = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd" - - // MediaTypeImageConfig specifies the media type for the image configuration. - MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json" - - // MediaTypeEmptyJSON specifies the media type for an unused blob containing the value `{}` - MediaTypeEmptyJSON = "application/vnd.oci.empty.v1+json" ) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go index 11e09b58..7069ae44 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/version.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/version.go @@ -25,7 +25,7 @@ const ( VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "-rc.5" + VersionDev = "" ) // Version is the specification version that the package types support. diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go index 4e7717d5..d1236ba7 100644 --- a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go +++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go @@ -187,6 +187,10 @@ type Hook struct { type Hooks struct { // Prestart is Deprecated. Prestart is a list of hooks to be run before the container process is executed. // It is called in the Runtime Namespace + // + // Deprecated: use [Hooks.CreateRuntime], [Hooks.CreateContainer], and + // [Hooks.StartContainer] instead, which allow more granular hook control + // during the create and start phase. Prestart []Hook `json:"prestart,omitempty"` // CreateRuntime is a list of hooks to be run after the container has been created but before pivot_root or any equivalent operation has been called // It is called in the Runtime Namespace @@ -371,6 +375,12 @@ type LinuxMemory struct { // Total memory limit (memory + swap). Swap *int64 `json:"swap,omitempty"` // Kernel memory limit (in bytes). + // + // Deprecated: kernel-memory limits are not supported in cgroups v2, and + // were obsoleted in [kernel v5.4]. This field should no longer be used, + // as it may be ignored by runtimes. + // + // [kernel v5.4]: https://github.com/torvalds/linux/commit/0158115f702b0ba208ab0 Kernel *int64 `json:"kernel,omitempty"` // Kernel memory limit for tcp (in bytes) KernelTCP *int64 `json:"kernelTCP,omitempty"` diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go index 35358c2c..503971e0 100644 --- a/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go +++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go @@ -6,12 +6,12 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 1 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 1 + VersionMinor = 2 // VersionPatch is for backwards-compatible bug fixes VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "+dev" + VersionDev = "" ) // Version is the specification version that the package types support. diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/ed25519ph.go b/vendor/github.com/sigstore/sigstore/pkg/signature/ed25519ph.go new file mode 100644 index 00000000..d1660796 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/ed25519ph.go @@ -0,0 +1,211 @@ +// +// Copyright 2024 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package signature + +import ( + "crypto" + "crypto/ed25519" + "crypto/rand" + "errors" + "fmt" + "io" + + "github.com/sigstore/sigstore/pkg/signature/options" +) + +var ed25519phSupportedHashFuncs = []crypto.Hash{ + crypto.SHA512, +} + +// ED25519phSigner is a signature.Signer that uses the Ed25519 public-key signature system with pre-hashing +type ED25519phSigner struct { + priv ed25519.PrivateKey +} + +// LoadED25519phSigner calculates signatures using the specified private key. +func LoadED25519phSigner(priv ed25519.PrivateKey) (*ED25519phSigner, error) { + if priv == nil { + return nil, errors.New("invalid ED25519 private key specified") + } + + return &ED25519phSigner{ + priv: priv, + }, nil +} + +// ToED25519SignerVerifier creates a ED25519SignerVerifier from a ED25519phSignerVerifier +// +// Clients that use ED25519phSignerVerifier should use this method to get a +// SignerVerifier that uses the same ED25519 private key, but with the Pure +// Ed25519 algorithm. This might be necessary to interact with Fulcio, which +// only supports the Pure Ed25519 algorithm. +func (e ED25519phSignerVerifier) ToED25519SignerVerifier() (*ED25519SignerVerifier, error) { + return LoadED25519SignerVerifier(e.priv) +} + +// SignMessage signs the provided message. If the message is provided, +// this method will compute the digest according to the hash function specified +// when the ED25519phSigner was created. +// +// This function recognizes the following Options listed in order of preference: +// +// - WithDigest() +// +// All other options are ignored if specified. +func (e ED25519phSigner) SignMessage(message io.Reader, opts ...SignOption) ([]byte, error) { + digest, _, err := ComputeDigestForSigning(message, crypto.SHA512, ed25519phSupportedHashFuncs, opts...) + if err != nil { + return nil, err + } + + return e.priv.Sign(nil, digest, crypto.SHA512) +} + +// Public returns the public key that can be used to verify signatures created by +// this signer. +func (e ED25519phSigner) Public() crypto.PublicKey { + if e.priv == nil { + return nil + } + + return e.priv.Public() +} + +// PublicKey returns the public key that can be used to verify signatures created by +// this signer. As this value is held in memory, all options provided in arguments +// to this method are ignored. +func (e ED25519phSigner) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) { + return e.Public(), nil +} + +// Sign computes the signature for the specified message; the first and third arguments to this +// function are ignored as they are not used by the ED25519ph algorithm. +func (e ED25519phSigner) Sign(_ io.Reader, digest []byte, _ crypto.SignerOpts) ([]byte, error) { + return e.SignMessage(nil, options.WithDigest(digest)) +} + +// ED25519phVerifier is a signature.Verifier that uses the Ed25519 public-key signature system +type ED25519phVerifier struct { + publicKey ed25519.PublicKey +} + +// LoadED25519phVerifier returns a Verifier that verifies signatures using the +// specified ED25519 public key. +func LoadED25519phVerifier(pub ed25519.PublicKey) (*ED25519phVerifier, error) { + if pub == nil { + return nil, errors.New("invalid ED25519 public key specified") + } + + return &ED25519phVerifier{ + publicKey: pub, + }, nil +} + +// PublicKey returns the public key that is used to verify signatures by +// this verifier. As this value is held in memory, all options provided in arguments +// to this method are ignored. +func (e *ED25519phVerifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) { + return e.publicKey, nil +} + +// VerifySignature verifies the signature for the given message. Unless provided +// in an option, the digest of the message will be computed using the hash function specified +// when the ED25519phVerifier was created. +// +// This function returns nil if the verification succeeded, and an error message otherwise. +// +// This function recognizes the following Options listed in order of preference: +// +// - WithDigest() +// +// All other options are ignored if specified. +func (e *ED25519phVerifier) VerifySignature(signature, message io.Reader, opts ...VerifyOption) error { + if signature == nil { + return errors.New("nil signature passed to VerifySignature") + } + + digest, _, err := ComputeDigestForVerifying(message, crypto.SHA512, ed25519phSupportedHashFuncs, opts...) + if err != nil { + return err + } + + sigBytes, err := io.ReadAll(signature) + if err != nil { + return fmt.Errorf("reading signature: %w", err) + } + + if err := ed25519.VerifyWithOptions(e.publicKey, digest, sigBytes, &ed25519.Options{Hash: crypto.SHA512}); err != nil { + return fmt.Errorf("failed to verify signature: %w", err) + } + return nil +} + +// ED25519phSignerVerifier is a signature.SignerVerifier that uses the Ed25519 public-key signature system +type ED25519phSignerVerifier struct { + *ED25519phSigner + *ED25519phVerifier +} + +// LoadED25519phSignerVerifier creates a combined signer and verifier. This is +// a convenience object that simply wraps an instance of ED25519phSigner and ED25519phVerifier. +func LoadED25519phSignerVerifier(priv ed25519.PrivateKey) (*ED25519phSignerVerifier, error) { + signer, err := LoadED25519phSigner(priv) + if err != nil { + return nil, fmt.Errorf("initializing signer: %w", err) + } + pub, ok := priv.Public().(ed25519.PublicKey) + if !ok { + return nil, fmt.Errorf("given key is not ed25519.PublicKey") + } + verifier, err := LoadED25519phVerifier(pub) + if err != nil { + return nil, fmt.Errorf("initializing verifier: %w", err) + } + + return &ED25519phSignerVerifier{ + ED25519phSigner: signer, + ED25519phVerifier: verifier, + }, nil +} + +// NewDefaultED25519phSignerVerifier creates a combined signer and verifier using ED25519. +// This creates a new ED25519 key using crypto/rand as an entropy source. +func NewDefaultED25519phSignerVerifier() (*ED25519phSignerVerifier, ed25519.PrivateKey, error) { + return NewED25519phSignerVerifier(rand.Reader) +} + +// NewED25519phSignerVerifier creates a combined signer and verifier using ED25519. +// This creates a new ED25519 key using the specified entropy source. +func NewED25519phSignerVerifier(rand io.Reader) (*ED25519phSignerVerifier, ed25519.PrivateKey, error) { + _, priv, err := ed25519.GenerateKey(rand) + if err != nil { + return nil, nil, err + } + + sv, err := LoadED25519phSignerVerifier(priv) + if err != nil { + return nil, nil, err + } + + return sv, priv, nil +} + +// PublicKey returns the public key that is used to verify signatures by +// this verifier. As this value is held in memory, all options provided in arguments +// to this method are ignored. +func (e ED25519phSignerVerifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) { + return e.publicKey, nil +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/options.go b/vendor/github.com/sigstore/sigstore/pkg/signature/options.go index 0be699f7..e17e768c 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/options.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/options.go @@ -18,6 +18,7 @@ package signature import ( "context" "crypto" + "crypto/rsa" "io" "github.com/sigstore/sigstore/pkg/signature/options" @@ -55,3 +56,10 @@ type VerifyOption interface { RPCOption MessageOption } + +// LoadOption specifies options to be used when creating a Signer/Verifier +type LoadOption interface { + ApplyHash(*crypto.Hash) + ApplyED25519ph(*bool) + ApplyRSAPSS(**rsa.PSSOptions) +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/options/loadoptions.go b/vendor/github.com/sigstore/sigstore/pkg/signature/options/loadoptions.go new file mode 100644 index 00000000..e5f3f011 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/options/loadoptions.go @@ -0,0 +1,76 @@ +// +// Copyright 2024 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package options + +import ( + "crypto" + "crypto/rsa" +) + +// RequestHash implements the functional option pattern for setting a Hash +// function when loading a signer or verifier +type RequestHash struct { + NoOpOptionImpl + hashFunc crypto.Hash +} + +// ApplyHash sets the hash as requested by the functional option +func (r RequestHash) ApplyHash(hash *crypto.Hash) { + *hash = r.hashFunc +} + +// WithHash specifies that the given hash function should be used when loading a signer or verifier +func WithHash(hash crypto.Hash) RequestHash { + return RequestHash{hashFunc: hash} +} + +// RequestED25519ph implements the functional option pattern for specifying +// ED25519ph (pre-hashed) should be used when loading a signer or verifier and a +// ED25519 key is +type RequestED25519ph struct { + NoOpOptionImpl + useED25519ph bool +} + +// ApplyED25519ph sets the ED25519ph flag as requested by the functional option +func (r RequestED25519ph) ApplyED25519ph(useED25519ph *bool) { + *useED25519ph = r.useED25519ph +} + +// WithED25519ph specifies that the ED25519ph algorithm should be used when a ED25519 key is used +func WithED25519ph() RequestED25519ph { + return RequestED25519ph{useED25519ph: true} +} + +// RequestPSSOptions implements the functional option pattern for specifying RSA +// PSS should be used when loading a signer or verifier and a RSA key is +// detected +type RequestPSSOptions struct { + NoOpOptionImpl + opts *rsa.PSSOptions +} + +// ApplyRSAPSS sets the RSAPSS options as requested by the functional option +func (r RequestPSSOptions) ApplyRSAPSS(opts **rsa.PSSOptions) { + *opts = r.opts +} + +// WithRSAPSS specifies that the RSAPSS algorithm should be used when a RSA key is used +// Note that the RSA PSSOptions contains an hash algorithm, which will override +// the hash function specified with WithHash. +func WithRSAPSS(opts *rsa.PSSOptions) RequestPSSOptions { + return RequestPSSOptions{opts: opts} +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/options/noop.go b/vendor/github.com/sigstore/sigstore/pkg/signature/options/noop.go index c7f1ccb9..0c0e5185 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/options/noop.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/options/noop.go @@ -18,6 +18,7 @@ package options import ( "context" "crypto" + "crypto/rsa" "io" ) @@ -47,3 +48,12 @@ func (NoOpOptionImpl) ApplyKeyVersion(_ *string) {} // ApplyKeyVersionUsed is a no-op required to fully implement the requisite interfaces func (NoOpOptionImpl) ApplyKeyVersionUsed(_ **string) {} + +// ApplyHash is a no-op required to fully implement the requisite interfaces +func (NoOpOptionImpl) ApplyHash(_ *crypto.Hash) {} + +// ApplyED25519ph is a no-op required to fully implement the requisite interfaces +func (NoOpOptionImpl) ApplyED25519ph(_ *bool) {} + +// ApplyRSAPSS is a no-op required to fully implement the requisite interfaces +func (NoOpOptionImpl) ApplyRSAPSS(_ **rsa.PSSOptions) {} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go b/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go index 3bd3823c..e26def9f 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go @@ -30,6 +30,7 @@ import ( _ "crypto/sha512" "github.com/sigstore/sigstore/pkg/cryptoutils" + "github.com/sigstore/sigstore/pkg/signature/options" // these ensure we have the implementations loaded _ "golang.org/x/crypto/sha3" @@ -59,12 +60,33 @@ func (s SignerOpts) HashFunc() crypto.Hash { // If privateKey is an RSA key, a RSAPKCS1v15Signer will be returned. If a // RSAPSSSigner is desired instead, use the LoadRSAPSSSigner() method directly. func LoadSigner(privateKey crypto.PrivateKey, hashFunc crypto.Hash) (Signer, error) { + return LoadSignerWithOpts(privateKey, options.WithHash(hashFunc)) +} + +// LoadSignerWithOpts returns a signature.Signer based on the algorithm of the private key +// provided. +func LoadSignerWithOpts(privateKey crypto.PrivateKey, opts ...LoadOption) (Signer, error) { + var rsaPSSOptions *rsa.PSSOptions + var useED25519ph bool + hashFunc := crypto.SHA256 + for _, o := range opts { + o.ApplyED25519ph(&useED25519ph) + o.ApplyHash(&hashFunc) + o.ApplyRSAPSS(&rsaPSSOptions) + } + switch pk := privateKey.(type) { case *rsa.PrivateKey: + if rsaPSSOptions != nil { + return LoadRSAPSSSigner(pk, hashFunc, rsaPSSOptions) + } return LoadRSAPKCS1v15Signer(pk, hashFunc) case *ecdsa.PrivateKey: return LoadECDSASigner(pk, hashFunc) case ed25519.PrivateKey: + if useED25519ph { + return LoadED25519phSigner(pk) + } return LoadED25519Signer(pk) } return nil, errors.New("unsupported public key type") @@ -87,3 +109,17 @@ func LoadSignerFromPEMFile(path string, hashFunc crypto.Hash, pf cryptoutils.Pas } return LoadSigner(priv, hashFunc) } + +// LoadSignerFromPEMFileWithOpts returns a signature.Signer based on the algorithm of the private key +// in the file. The Signer will use the hash function specified in the options when computing digests. +func LoadSignerFromPEMFileWithOpts(path string, pf cryptoutils.PassFunc, opts ...LoadOption) (Signer, error) { + fileBytes, err := os.ReadFile(filepath.Clean(path)) + if err != nil { + return nil, err + } + priv, err := cryptoutils.UnmarshalPEMToPrivateKey(fileBytes, pf) + if err != nil { + return nil, err + } + return LoadSignerWithOpts(priv, opts...) +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go b/vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go index 90667f2a..70253b12 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go @@ -25,6 +25,7 @@ import ( "path/filepath" "github.com/sigstore/sigstore/pkg/cryptoutils" + "github.com/sigstore/sigstore/pkg/signature/options" ) // SignerVerifier creates and verifies digital signatures over a message using a specified key pair @@ -39,12 +40,33 @@ type SignerVerifier interface { // If privateKey is an RSA key, a RSAPKCS1v15SignerVerifier will be returned. If a // RSAPSSSignerVerifier is desired instead, use the LoadRSAPSSSignerVerifier() method directly. func LoadSignerVerifier(privateKey crypto.PrivateKey, hashFunc crypto.Hash) (SignerVerifier, error) { + return LoadSignerVerifierWithOpts(privateKey, options.WithHash(hashFunc)) +} + +// LoadSignerVerifierWithOpts returns a signature.SignerVerifier based on the +// algorithm of the private key provided and the user's choice. +func LoadSignerVerifierWithOpts(privateKey crypto.PrivateKey, opts ...LoadOption) (SignerVerifier, error) { + var rsaPSSOptions *rsa.PSSOptions + var useED25519ph bool + hashFunc := crypto.SHA256 + for _, o := range opts { + o.ApplyED25519ph(&useED25519ph) + o.ApplyHash(&hashFunc) + o.ApplyRSAPSS(&rsaPSSOptions) + } + switch pk := privateKey.(type) { case *rsa.PrivateKey: + if rsaPSSOptions != nil { + return LoadRSAPSSSignerVerifier(pk, hashFunc, rsaPSSOptions) + } return LoadRSAPKCS1v15SignerVerifier(pk, hashFunc) case *ecdsa.PrivateKey: return LoadECDSASignerVerifier(pk, hashFunc) case ed25519.PrivateKey: + if useED25519ph { + return LoadED25519phSignerVerifier(pk) + } return LoadED25519SignerVerifier(pk) } return nil, errors.New("unsupported public key type") @@ -67,3 +89,17 @@ func LoadSignerVerifierFromPEMFile(path string, hashFunc crypto.Hash, pf cryptou } return LoadSignerVerifier(priv, hashFunc) } + +// LoadSignerVerifierFromPEMFileWithOpts returns a signature.SignerVerifier based on the algorithm of the private key +// in the file. The SignerVerifier will use the hash function specified in the options when computing digests. +func LoadSignerVerifierFromPEMFileWithOpts(path string, pf cryptoutils.PassFunc, opts ...LoadOption) (SignerVerifier, error) { + fileBytes, err := os.ReadFile(filepath.Clean(path)) + if err != nil { + return nil, err + } + priv, err := cryptoutils.UnmarshalPEMToPrivateKey(fileBytes, pf) + if err != nil { + return nil, err + } + return LoadSignerVerifierWithOpts(priv, opts...) +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go b/vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go index 9ca60492..cdde9fc5 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go @@ -26,6 +26,7 @@ import ( "path/filepath" "github.com/sigstore/sigstore/pkg/cryptoutils" + "github.com/sigstore/sigstore/pkg/signature/options" ) // Verifier verifies the digital signature using a specified public key @@ -40,12 +41,33 @@ type Verifier interface { // If publicKey is an RSA key, a RSAPKCS1v15Verifier will be returned. If a // RSAPSSVerifier is desired instead, use the LoadRSAPSSVerifier() method directly. func LoadVerifier(publicKey crypto.PublicKey, hashFunc crypto.Hash) (Verifier, error) { + return LoadVerifierWithOpts(publicKey, options.WithHash(hashFunc)) +} + +// LoadVerifierWithOpts returns a signature.Verifier based on the algorithm of the public key +// provided that will use the hash function specified when computing digests. +func LoadVerifierWithOpts(publicKey crypto.PublicKey, opts ...LoadOption) (Verifier, error) { + var rsaPSSOptions *rsa.PSSOptions + var useED25519ph bool + hashFunc := crypto.SHA256 + for _, o := range opts { + o.ApplyED25519ph(&useED25519ph) + o.ApplyHash(&hashFunc) + o.ApplyRSAPSS(&rsaPSSOptions) + } + switch pk := publicKey.(type) { case *rsa.PublicKey: + if rsaPSSOptions != nil { + return LoadRSAPSSVerifier(pk, hashFunc, rsaPSSOptions) + } return LoadRSAPKCS1v15Verifier(pk, hashFunc) case *ecdsa.PublicKey: return LoadECDSAVerifier(pk, hashFunc) case ed25519.PublicKey: + if useED25519ph { + return LoadED25519phVerifier(pk) + } return LoadED25519Verifier(pk) } return nil, errors.New("unsupported public key type") @@ -98,3 +120,19 @@ func LoadVerifierFromPEMFile(path string, hashFunc crypto.Hash) (Verifier, error return LoadVerifier(pubKey, hashFunc) } + +// LoadVerifierFromPEMFileWithOpts returns a signature.Verifier based on the contents of a +// file located at path. The Verifier wil use the hash function specified in the options when computing digests. +func LoadVerifierFromPEMFileWithOpts(path string, opts ...LoadOption) (Verifier, error) { + fileBytes, err := os.ReadFile(filepath.Clean(path)) + if err != nil { + return nil, err + } + + pubKey, err := cryptoutils.UnmarshalPEMToPublicKey(fileBytes) + if err != nil { + return nil, err + } + + return LoadVerifierWithOpts(pubKey, opts...) +} diff --git a/vendor/github.com/vbauerster/mpb/v8/README.md b/vendor/github.com/vbauerster/mpb/v8/README.md index 09825ca0..af97c92a 100644 --- a/vendor/github.com/vbauerster/mpb/v8/README.md +++ b/vendor/github.com/vbauerster/mpb/v8/README.md @@ -42,11 +42,9 @@ func main() { mpb.BarStyle().Lbound("╢").Filler("▌").Tip("▌").Padding("░").Rbound("╟"), mpb.PrependDecorators( // display our name with one space on the right - decor.Name(name, decor.WC{W: len(name) + 1, C: decor.DidentRight}), + decor.Name(name, decor.WC{C: decor.DindentRight | decor.DextraSpace}), // replace ETA decorator with "done" message, OnComplete event - decor.OnComplete( - decor.AverageETA(decor.ET_STYLE_GO, decor.WC{W: 4}), "done", - ), + decor.OnComplete(decor.AverageETA(decor.ET_STYLE_GO), "done"), ), mpb.AppendDecorators(decor.Percentage()), ) diff --git a/vendor/github.com/vbauerster/mpb/v8/bar.go b/vendor/github.com/vbauerster/mpb/v8/bar.go index 5f814121..bca79829 100644 --- a/vendor/github.com/vbauerster/mpb/v8/bar.go +++ b/vendor/github.com/vbauerster/mpb/v8/bar.go @@ -305,9 +305,6 @@ func (b *Bar) EwmaIncrBy(n int, iterDur time.Duration) { // EwmaIncrInt64 increments progress by amount of n and updates EWMA based // decorators by dur of a single iteration. func (b *Bar) EwmaIncrInt64(n int64, iterDur time.Duration) { - if n <= 0 { - return - } select { case b.operateState <- func(s *bState) { s.decoratorEwmaUpdate(n, iterDur) @@ -429,13 +426,11 @@ func (b *Bar) render(tw int) { return } } - frame := &renderFrame{ - rows: rows, - shutdown: s.shutdown, - rmOnComplete: s.rmOnComplete, - noPop: s.noPop, - } + frame := &renderFrame{rows: rows} if s.completed || s.aborted { + frame.shutdown = s.shutdown + frame.rmOnComplete = s.rmOnComplete + frame.noPop = s.noPop // post increment makes sure OnComplete decorators are rendered s.shutdown++ } @@ -460,12 +455,15 @@ func (b *Bar) triggerCompletion(s *bState) { } func (b *Bar) tryEarlyRefresh(renderReq chan<- time.Time) { - var anyOtherRunning bool + var otherRunning int b.container.traverseBars(func(bar *Bar) bool { - anyOtherRunning = b != bar && bar.IsRunning() - return anyOtherRunning + if b != bar && bar.IsRunning() { + otherRunning++ + return false // stop traverse + } + return true // continue traverse }) - if !anyOtherRunning { + if otherRunning == 0 { for { select { case renderReq <- time.Now(): diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/decorator.go b/vendor/github.com/vbauerster/mpb/v8/decor/decorator.go index f537d3f7..31062ebd 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/decorator.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/decorator.go @@ -8,29 +8,27 @@ import ( ) const ( - // DidentRight bit specifies identation direction. + // DindentRight sets indentation from right to left. // - // |foo |b | With DidentRight - // | foo| b| Without DidentRight - DidentRight = 1 << iota + // |foo |b | DindentRight is set + // | foo| b| DindentRight is not set + DindentRight = 1 << iota - // DextraSpace bit adds extra space, makes sense with DSyncWidth only. - // When DidentRight bit set, the space will be added to the right, - // otherwise to the left. + // DextraSpace bit adds extra indentation space. DextraSpace // DSyncWidth bit enables same column width synchronization. // Effective with multiple bars only. DSyncWidth - // DSyncWidthR is shortcut for DSyncWidth|DidentRight - DSyncWidthR = DSyncWidth | DidentRight + // DSyncWidthR is shortcut for DSyncWidth|DindentRight + DSyncWidthR = DSyncWidth | DindentRight // DSyncSpace is shortcut for DSyncWidth|DextraSpace DSyncSpace = DSyncWidth | DextraSpace - // DSyncSpaceR is shortcut for DSyncWidth|DextraSpace|DidentRight - DSyncSpaceR = DSyncWidth | DextraSpace | DidentRight + // DSyncSpaceR is shortcut for DSyncWidth|DextraSpace|DindentRight + DSyncSpaceR = DSyncWidth | DextraSpace | DindentRight ) // TimeStyle enum. @@ -143,11 +141,10 @@ func (wc WC) Format(str string) (string, int) { viewWidth := runewidth.StringWidth(str) if wc.W > viewWidth { viewWidth = wc.W + } else if (wc.C & DextraSpace) != 0 { + viewWidth++ } if (wc.C & DSyncWidth) != 0 { - if (wc.C & DextraSpace) != 0 { - viewWidth++ - } wc.wsync <- viewWidth viewWidth = <-wc.wsync } @@ -156,7 +153,7 @@ func (wc WC) Format(str string) (string, int) { // Init initializes width related config. func (wc *WC) Init() WC { - if (wc.C & DidentRight) != 0 { + if (wc.C & DindentRight) != 0 { wc.fill = runewidth.FillRight } else { wc.fill = runewidth.FillLeft diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/eta.go b/vendor/github.com/vbauerster/mpb/v8/decor/eta.go index ecb6f8f9..ecf87b18 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/eta.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/eta.go @@ -54,18 +54,19 @@ func EwmaETA(style TimeStyle, age float64, wcc ...WC) Decorator { func MovingAverageETA(style TimeStyle, average ewma.MovingAverage, normalizer TimeNormalizer, wcc ...WC) Decorator { d := &movingAverageETA{ WC: initWC(wcc...), + producer: chooseTimeProducer(style), average: average, normalizer: normalizer, - producer: chooseTimeProducer(style), } return d } type movingAverageETA struct { WC + producer func(time.Duration) string average ewma.MovingAverage normalizer TimeNormalizer - producer func(time.Duration) string + zDur time.Duration } func (d *movingAverageETA) Decor(s Statistics) (string, int) { @@ -78,11 +79,17 @@ func (d *movingAverageETA) Decor(s Statistics) (string, int) { } func (d *movingAverageETA) EwmaUpdate(n int64, dur time.Duration) { - durPerItem := float64(dur) / float64(n) - if math.IsInf(durPerItem, 0) || math.IsNaN(durPerItem) { - return + if n <= 0 { + d.zDur += dur + } else { + durPerItem := float64(d.zDur+dur) / float64(n) + if math.IsInf(durPerItem, 0) || math.IsNaN(durPerItem) { + d.zDur += dur + return + } + d.zDur = 0 + d.average.Add(durPerItem) } - d.average.Add(durPerItem) } // AverageETA decorator. It's wrapper of NewAverageETA. diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/speed.go b/vendor/github.com/vbauerster/mpb/v8/decor/speed.go index 5879d060..f3355c42 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/speed.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/speed.go @@ -69,8 +69,8 @@ func EwmaSpeed(unit interface{}, format string, age float64, wcc ...WC) Decorato func MovingAverageSpeed(unit interface{}, format string, average ewma.MovingAverage, wcc ...WC) Decorator { d := &movingAverageSpeed{ WC: initWC(wcc...), - average: average, producer: chooseSpeedProducer(unit, format), + average: average, } return d } @@ -79,26 +79,32 @@ type movingAverageSpeed struct { WC producer func(float64) string average ewma.MovingAverage - msg string + zDur time.Duration } func (d *movingAverageSpeed) Decor(s Statistics) (string, int) { - if !s.Completed { - var speed float64 - if v := d.average.Value(); v > 0 { - speed = 1 / v - } - d.msg = d.producer(speed * 1e9) + var str string + // ewma implementation may return 0 before accumulating certain number of samples + if v := d.average.Value(); v != 0 { + str = d.producer(1e9 / v) + } else { + str = d.producer(0) } - return d.Format(d.msg) + return d.Format(str) } func (d *movingAverageSpeed) EwmaUpdate(n int64, dur time.Duration) { - durPerByte := float64(dur) / float64(n) - if math.IsInf(durPerByte, 0) || math.IsNaN(durPerByte) { - return + if n <= 0 { + d.zDur += dur + } else { + durPerByte := float64(d.zDur+dur) / float64(n) + if math.IsInf(durPerByte, 0) || math.IsNaN(durPerByte) { + d.zDur += dur + return + } + d.zDur = 0 + d.average.Add(durPerByte) } - d.average.Add(durPerByte) } // AverageSpeed decorator with dynamic unit measure adjustment. It's diff --git a/vendor/github.com/vbauerster/mpb/v8/progress.go b/vendor/github.com/vbauerster/mpb/v8/progress.go index 8c609b46..3bdc75b4 100644 --- a/vendor/github.com/vbauerster/mpb/v8/progress.go +++ b/vendor/github.com/vbauerster/mpb/v8/progress.go @@ -74,8 +74,8 @@ func NewWithContext(ctx context.Context, options ...ContainerOption) *Progress { dropS: make(chan struct{}), dropD: make(chan struct{}), renderReq: make(chan time.Time), - refreshRate: defaultRefreshRate, popPriority: math.MinInt32, + refreshRate: defaultRefreshRate, queueBars: make(map[*Bar]*Bar), output: os.Stdout, debugOut: io.Discard, @@ -191,7 +191,7 @@ func (p *Progress) traverseBars(cb func(b *Bar) bool) { select { case p.operateState <- func(s *pState) { s.hm.iter(iter, drop) }: for b := range iter { - if cb(b) { + if !cb(b) { close(drop) break } @@ -258,34 +258,58 @@ func (p *Progress) Shutdown() { func (p *Progress) serve(s *pState, cw *cwriter.Writer) { defer p.pwg.Done() - render := func() error { return s.render(cw) } var err error + var w *cwriter.Writer + renderReq := s.renderReq + operateState := p.operateState + interceptIO := p.interceptIO + + if s.delayRC != nil { + w = cwriter.New(io.Discard) + } else { + w, cw = cw, nil + } for { select { - case op := <-p.operateState: + case <-s.delayRC: + w, cw = cw, nil + s.delayRC = nil + case op := <-operateState: op(s) - case fn := <-p.interceptIO: - fn(cw) - case <-s.renderReq: - e := render() - if e != nil { + case fn := <-interceptIO: + fn(w) + case <-renderReq: + err = s.render(w) + if err != nil { + // (*pState).(autoRefreshListener|manualRefreshListener) may block + // if not launching following short lived goroutine + go func() { + for { + select { + case <-s.renderReq: + case <-p.done: + return + } + } + }() p.cancel() // cancel all bars - render = func() error { return nil } - err = e + renderReq = nil + operateState = nil + interceptIO = nil } case <-p.done: - update := make(chan bool) - for s.autoRefresh && err == nil { - s.hm.state(update) - if <-update { - err = render() - } else { - break - } - } if err != nil { _, _ = fmt.Fprintln(s.debugOut, err.Error()) + } else if s.autoRefresh { + update := make(chan bool) + for i := 0; i == 0 || <-update; i++ { + if err := s.render(w); err != nil { + _, _ = fmt.Fprintln(s.debugOut, err.Error()) + break + } + s.hm.state(update) + } } s.hm.end(s.shutdownNotifier) return @@ -293,10 +317,7 @@ func (p *Progress) serve(s *pState, cw *cwriter.Writer) { } } -func (s pState) autoRefreshListener(done chan struct{}) { - if s.delayRC != nil { - <-s.delayRC - } +func (s *pState) autoRefreshListener(done chan struct{}) { ticker := time.NewTicker(s.refreshRate) defer ticker.Stop() for { @@ -310,7 +331,7 @@ func (s pState) autoRefreshListener(done chan struct{}) { } } -func (s pState) manualRefreshListener(done chan struct{}) { +func (s *pState) manualRefreshListener(done chan struct{}) { for { select { case x := <-s.manualRC: @@ -342,9 +363,9 @@ func (s *pState) render(cw *cwriter.Writer) (err error) { if s.reqWidth > 0 { width = s.reqWidth } else { - width = 100 + width = 80 } - height = 100 + height = width } for b := range iter { @@ -420,7 +441,7 @@ func (s *pState) flush(cw *cwriter.Writer, height int) error { return cw.Flush(len(rows) - popCount) } -func (s pState) push(wg *sync.WaitGroup, b *Bar, sync bool) { +func (s *pState) push(wg *sync.WaitGroup, b *Bar, sync bool) { s.hm.push(b, sync) wg.Done() } diff --git a/vendor/go.etcd.io/bbolt/bucket.go b/vendor/go.etcd.io/bbolt/bucket.go index 054467af..f3533d34 100644 --- a/vendor/go.etcd.io/bbolt/bucket.go +++ b/vendor/go.etcd.io/bbolt/bucket.go @@ -162,12 +162,17 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { return nil, ErrBucketNameRequired } + // Insert into node. + // Tip: Use a new variable `newKey` instead of reusing the existing `key` to prevent + // it from being marked as leaking, and accordingly cannot be allocated on stack. + newKey := cloneBytes(key) + // Move cursor to correct position. c := b.Cursor() - k, _, flags := c.seek(key) + k, _, flags := c.seek(newKey) // Return an error if there is an existing key. - if bytes.Equal(key, k) { + if bytes.Equal(newKey, k) { if (flags & bucketLeafFlag) != 0 { return nil, ErrBucketExists } @@ -182,16 +187,14 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { } var value = bucket.write() - // Insert into node. - key = cloneBytes(key) - c.node().put(key, key, value, 0, bucketLeafFlag) + c.node().put(newKey, newKey, value, 0, bucketLeafFlag) // Since subbuckets are not allowed on inline buckets, we need to // dereference the inline page, if it exists. This will cause the bucket // to be treated as a regular, non-inline bucket for the rest of the tx. b.page = nil - return b.Bucket(key), nil + return b.Bucket(newKey), nil } // CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it. @@ -288,18 +291,23 @@ func (b *Bucket) Put(key []byte, value []byte) error { return ErrValueTooLarge } + // Insert into node. + // Tip: Use a new variable `newKey` instead of reusing the existing `key` to prevent + // it from being marked as leaking, and accordingly cannot be allocated on stack. + newKey := cloneBytes(key) + // Move cursor to correct position. c := b.Cursor() - k, _, flags := c.seek(key) + k, _, flags := c.seek(newKey) // Return an error if there is an existing key with a bucket value. - if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 { + if bytes.Equal(newKey, k) && (flags&bucketLeafFlag) != 0 { return ErrIncompatibleValue } - // Insert into node. - key = cloneBytes(key) - c.node().put(key, key, value, 0, 0) + // gofail: var beforeBucketPut struct{} + + c.node().put(newKey, newKey, value, 0, 0) return nil } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go index 4e24f9ee..652aa48b 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go @@ -14,17 +14,22 @@ import ( ) // ArrayCodec is the Codec used for bsoncore.Array values. +// +// Deprecated: ArrayCodec will not be directly accessible in Go Driver 2.0. type ArrayCodec struct{} var defaultArrayCodec = NewArrayCodec() // NewArrayCodec returns an ArrayCodec. +// +// Deprecated: NewArrayCodec will not be available in Go Driver 2.0. See +// [ArrayCodec] for more details. func NewArrayCodec() *ArrayCodec { return &ArrayCodec{} } // EncodeValue is the ValueEncoder for bsoncore.Array values. -func (ac *ArrayCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +func (ac *ArrayCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tCoreArray { return ValueEncoderError{Name: "CoreArrayEncodeValue", Types: []reflect.Type{tCoreArray}, Received: val} } @@ -34,7 +39,7 @@ func (ac *ArrayCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val r } // DecodeValue is the ValueDecoder for bsoncore.Array values. -func (ac *ArrayCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { +func (ac *ArrayCodec) DecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tCoreArray { return ValueDecoderError{Name: "CoreArrayDecodeValue", Types: []reflect.Type{tCoreArray}, Received: val} } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go index 098ed69f..0693bd43 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go @@ -23,6 +23,8 @@ var ( // Marshaler is an interface implemented by types that can marshal themselves // into a BSON document represented as bytes. The bytes returned must be a valid // BSON document if the error is nil. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Marshaler] instead. type Marshaler interface { MarshalBSON() ([]byte, error) } @@ -31,6 +33,8 @@ type Marshaler interface { // themselves into a BSON value as bytes. The type must be the valid type for // the bytes returned. The bytes and byte type together must be valid if the // error is nil. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.ValueMarshaler] instead. type ValueMarshaler interface { MarshalBSONValue() (bsontype.Type, []byte, error) } @@ -39,6 +43,8 @@ type ValueMarshaler interface { // document representation of themselves. The BSON bytes can be assumed to be // valid. UnmarshalBSON must copy the BSON bytes if it wishes to retain the data // after returning. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Unmarshaler] instead. type Unmarshaler interface { UnmarshalBSON([]byte) error } @@ -47,6 +53,8 @@ type Unmarshaler interface { // BSON value representation of themselves. The BSON bytes and type can be // assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it // wishes to retain the data after returning. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.ValueUnmarshaler] instead. type ValueUnmarshaler interface { UnmarshalBSONValue(bsontype.Type, []byte) error } @@ -111,13 +119,93 @@ func (vde ValueDecoderError) Error() string { // value. type EncodeContext struct { *Registry + + // MinSize causes the Encoder to marshal Go integer values (int, int8, int16, int32, int64, + // uint, uint8, uint16, uint32, or uint64) as the minimum BSON int size (either 32 or 64 bits) + // that can represent the integer value. + // + // Deprecated: Use bson.Encoder.IntMinSize instead. MinSize bool + + errorOnInlineDuplicates bool + stringifyMapKeysWithFmt bool + nilMapAsEmpty bool + nilSliceAsEmpty bool + nilByteSliceAsEmpty bool + omitZeroStruct bool + useJSONStructTags bool +} + +// ErrorOnInlineDuplicates causes the Encoder to return an error if there is a duplicate field in +// the marshaled BSON when the "inline" struct tag option is set. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.ErrorOnInlineDuplicates] instead. +func (ec *EncodeContext) ErrorOnInlineDuplicates() { + ec.errorOnInlineDuplicates = true +} + +// StringifyMapKeysWithFmt causes the Encoder to convert Go map keys to BSON document field name +// strings using fmt.Sprintf() instead of the default string conversion logic. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.StringifyMapKeysWithFmt] instead. +func (ec *EncodeContext) StringifyMapKeysWithFmt() { + ec.stringifyMapKeysWithFmt = true +} + +// NilMapAsEmpty causes the Encoder to marshal nil Go maps as empty BSON documents instead of BSON +// null. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilMapAsEmpty] instead. +func (ec *EncodeContext) NilMapAsEmpty() { + ec.nilMapAsEmpty = true +} + +// NilSliceAsEmpty causes the Encoder to marshal nil Go slices as empty BSON arrays instead of BSON +// null. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilSliceAsEmpty] instead. +func (ec *EncodeContext) NilSliceAsEmpty() { + ec.nilSliceAsEmpty = true +} + +// NilByteSliceAsEmpty causes the Encoder to marshal nil Go byte slices as empty BSON binary values +// instead of BSON null. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilByteSliceAsEmpty] instead. +func (ec *EncodeContext) NilByteSliceAsEmpty() { + ec.nilByteSliceAsEmpty = true +} + +// OmitZeroStruct causes the Encoder to consider the zero value for a struct (e.g. MyStruct{}) +// as empty and omit it from the marshaled BSON when the "omitempty" struct tag option is set. +// +// Note that the Encoder only examines exported struct fields when determining if a struct is the +// zero value. It considers pointers to a zero struct value (e.g. &MyStruct{}) not empty. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.OmitZeroStruct] instead. +func (ec *EncodeContext) OmitZeroStruct() { + ec.omitZeroStruct = true +} + +// UseJSONStructTags causes the Encoder to fall back to using the "json" struct tag if a "bson" +// struct tag is not specified. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.UseJSONStructTags] instead. +func (ec *EncodeContext) UseJSONStructTags() { + ec.useJSONStructTags = true } // DecodeContext is the contextual information required for a Codec to decode a // value. type DecodeContext struct { *Registry + + // Truncate, if true, instructs decoders to to truncate the fractional part of BSON "double" + // values when attempting to unmarshal them into a Go integer (int, int8, int16, int32, int64, + // uint, uint8, uint16, uint32, or uint64) struct field. The truncation logic does not apply to + // BSON "decimal128" values. + // + // Deprecated: Use bson.Decoder.AllowTruncatingDoubles instead. Truncate bool // Ancestor is the type of a containing document. This is mainly used to determine what type @@ -125,7 +213,7 @@ type DecodeContext struct { // Ancestor is a bson.M, BSON embedded document values being decoded into an empty interface // will be decoded into a bson.M. // - // Deprecated: Use DefaultDocumentM or DefaultDocumentD instead. + // Deprecated: Use bson.Decoder.DefaultDocumentM or bson.Decoder.DefaultDocumentD instead. Ancestor reflect.Type // defaultDocumentType specifies the Go type to decode top-level and nested BSON documents into. In particular, the @@ -133,22 +221,74 @@ type DecodeContext struct { // set to a type that a BSON document cannot be unmarshaled into (e.g. "string"), unmarshalling will result in an // error. DocumentType overrides the Ancestor field. defaultDocumentType reflect.Type + + binaryAsSlice bool + useJSONStructTags bool + useLocalTimeZone bool + zeroMaps bool + zeroStructs bool } -// DefaultDocumentM will decode empty documents using the primitive.M type. This behavior is restricted to data typed as -// "interface{}" or "map[string]interface{}". +// BinaryAsSlice causes the Decoder to unmarshal BSON binary field values that are the "Generic" or +// "Old" BSON binary subtype as a Go byte slice instead of a primitive.Binary. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.BinaryAsSlice] instead. +func (dc *DecodeContext) BinaryAsSlice() { + dc.binaryAsSlice = true +} + +// UseJSONStructTags causes the Decoder to fall back to using the "json" struct tag if a "bson" +// struct tag is not specified. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseJSONStructTags] instead. +func (dc *DecodeContext) UseJSONStructTags() { + dc.useJSONStructTags = true +} + +// UseLocalTimeZone causes the Decoder to unmarshal time.Time values in the local timezone instead +// of the UTC timezone. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseLocalTimeZone] instead. +func (dc *DecodeContext) UseLocalTimeZone() { + dc.useLocalTimeZone = true +} + +// ZeroMaps causes the Decoder to delete any existing values from Go maps in the destination value +// passed to Decode before unmarshaling BSON documents into them. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroMaps] instead. +func (dc *DecodeContext) ZeroMaps() { + dc.zeroMaps = true +} + +// ZeroStructs causes the Decoder to delete any existing values from Go structs in the destination +// value passed to Decode before unmarshaling BSON documents into them. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroStructs] instead. +func (dc *DecodeContext) ZeroStructs() { + dc.zeroStructs = true +} + +// DefaultDocumentM causes the Decoder to always unmarshal documents into the primitive.M type. This +// behavior is restricted to data typed as "interface{}" or "map[string]interface{}". +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.DefaultDocumentM] instead. func (dc *DecodeContext) DefaultDocumentM() { dc.defaultDocumentType = reflect.TypeOf(primitive.M{}) } -// DefaultDocumentD will decode empty documents using the primitive.D type. This behavior is restricted to data typed as -// "interface{}" or "map[string]interface{}". +// DefaultDocumentD causes the Decoder to always unmarshal documents into the primitive.D type. This +// behavior is restricted to data typed as "interface{}" or "map[string]interface{}". +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.DefaultDocumentD] instead. func (dc *DecodeContext) DefaultDocumentD() { dc.defaultDocumentType = reflect.TypeOf(primitive.D{}) } -// ValueCodec is the interface that groups the methods to encode and decode +// ValueCodec is an interface for encoding and decoding a reflect.Value. // values. +// +// Deprecated: Use [ValueEncoder] and [ValueDecoder] instead. type ValueCodec interface { ValueEncoder ValueDecoder @@ -233,6 +373,10 @@ func decodeTypeOrValueWithInfo(vd ValueDecoder, td typeDecoder, dc DecodeContext // CodecZeroer is the interface implemented by Codecs that can also determine if // a value of the type that would be encoded is zero. +// +// Deprecated: Defining custom rules for the zero/empty value will not be supported in Go Driver +// 2.0. Users who want to omit empty complex values should use a pointer field and set the value to +// nil instead. type CodecZeroer interface { IsTypeZero(interface{}) bool } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go index 5a916cc1..0134b5a9 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go @@ -16,18 +16,45 @@ import ( ) // ByteSliceCodec is the Codec used for []byte values. +// +// Deprecated: ByteSliceCodec will not be directly configurable in Go Driver +// 2.0. To configure the byte slice encode and decode behavior, use the +// configuration methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the byte slice +// encode and decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to encode nil byte slices as empty +// BSON binary values, use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// NilByteSliceAsEmpty: true, +// }) +// +// See the deprecation notice for each field in ByteSliceCodec for the +// corresponding settings. type ByteSliceCodec struct { + // EncodeNilAsEmpty causes EncodeValue to marshal nil Go byte slices as empty BSON binary values + // instead of BSON null. + // + // Deprecated: Use bson.Encoder.NilByteSliceAsEmpty or options.BSONOptions.NilByteSliceAsEmpty + // instead. EncodeNilAsEmpty bool } var ( defaultByteSliceCodec = NewByteSliceCodec() - _ ValueCodec = defaultByteSliceCodec + // Assert that defaultByteSliceCodec satisfies the typeDecoder interface, which allows it to be + // used by collection type decoders (e.g. map, slice, etc) to set individual values in a + // collection. _ typeDecoder = defaultByteSliceCodec ) -// NewByteSliceCodec returns a StringCodec with options opts. +// NewByteSliceCodec returns a ByteSliceCodec with options opts. +// +// Deprecated: NewByteSliceCodec will not be available in Go Driver 2.0. See +// [ByteSliceCodec] for more details. func NewByteSliceCodec(opts ...*bsonoptions.ByteSliceCodecOptions) *ByteSliceCodec { byteSliceOpt := bsonoptions.MergeByteSliceCodecOptions(opts...) codec := ByteSliceCodec{} @@ -42,13 +69,13 @@ func (bsc *ByteSliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, if !val.IsValid() || val.Type() != tByteSlice { return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val} } - if val.IsNil() && !bsc.EncodeNilAsEmpty { + if val.IsNil() && !bsc.EncodeNilAsEmpty && !ec.nilByteSliceAsEmpty { return vw.WriteNull() } return vw.WriteBinary(val.Interface().([]byte)) } -func (bsc *ByteSliceCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (bsc *ByteSliceCodec) decodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tByteSlice { return emptyValue, ValueDecoderError{ Name: "ByteSliceDecodeValue", diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go new file mode 100644 index 00000000..844b5029 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go @@ -0,0 +1,166 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "reflect" + "sync" + "sync/atomic" +) + +// Runtime check that the kind encoder and decoder caches can store any valid +// reflect.Kind constant. +func init() { + if s := reflect.Kind(len(kindEncoderCache{}.entries)).String(); s != "kind27" { + panic("The capacity of kindEncoderCache is too small.\n" + + "This is due to a new type being added to reflect.Kind.") + } +} + +// statically assert array size +var _ = (kindEncoderCache{}).entries[reflect.UnsafePointer] +var _ = (kindDecoderCache{}).entries[reflect.UnsafePointer] + +type typeEncoderCache struct { + cache sync.Map // map[reflect.Type]ValueEncoder +} + +func (c *typeEncoderCache) Store(rt reflect.Type, enc ValueEncoder) { + c.cache.Store(rt, enc) +} + +func (c *typeEncoderCache) Load(rt reflect.Type) (ValueEncoder, bool) { + if v, _ := c.cache.Load(rt); v != nil { + return v.(ValueEncoder), true + } + return nil, false +} + +func (c *typeEncoderCache) LoadOrStore(rt reflect.Type, enc ValueEncoder) ValueEncoder { + if v, loaded := c.cache.LoadOrStore(rt, enc); loaded { + enc = v.(ValueEncoder) + } + return enc +} + +func (c *typeEncoderCache) Clone() *typeEncoderCache { + cc := new(typeEncoderCache) + c.cache.Range(func(k, v interface{}) bool { + if k != nil && v != nil { + cc.cache.Store(k, v) + } + return true + }) + return cc +} + +type typeDecoderCache struct { + cache sync.Map // map[reflect.Type]ValueDecoder +} + +func (c *typeDecoderCache) Store(rt reflect.Type, dec ValueDecoder) { + c.cache.Store(rt, dec) +} + +func (c *typeDecoderCache) Load(rt reflect.Type) (ValueDecoder, bool) { + if v, _ := c.cache.Load(rt); v != nil { + return v.(ValueDecoder), true + } + return nil, false +} + +func (c *typeDecoderCache) LoadOrStore(rt reflect.Type, dec ValueDecoder) ValueDecoder { + if v, loaded := c.cache.LoadOrStore(rt, dec); loaded { + dec = v.(ValueDecoder) + } + return dec +} + +func (c *typeDecoderCache) Clone() *typeDecoderCache { + cc := new(typeDecoderCache) + c.cache.Range(func(k, v interface{}) bool { + if k != nil && v != nil { + cc.cache.Store(k, v) + } + return true + }) + return cc +} + +// atomic.Value requires that all calls to Store() have the same concrete type +// so we wrap the ValueEncoder with a kindEncoderCacheEntry to ensure the type +// is always the same (since different concrete types may implement the +// ValueEncoder interface). +type kindEncoderCacheEntry struct { + enc ValueEncoder +} + +type kindEncoderCache struct { + entries [reflect.UnsafePointer + 1]atomic.Value // *kindEncoderCacheEntry +} + +func (c *kindEncoderCache) Store(rt reflect.Kind, enc ValueEncoder) { + if enc != nil && rt < reflect.Kind(len(c.entries)) { + c.entries[rt].Store(&kindEncoderCacheEntry{enc: enc}) + } +} + +func (c *kindEncoderCache) Load(rt reflect.Kind) (ValueEncoder, bool) { + if rt < reflect.Kind(len(c.entries)) { + if ent, ok := c.entries[rt].Load().(*kindEncoderCacheEntry); ok { + return ent.enc, ent.enc != nil + } + } + return nil, false +} + +func (c *kindEncoderCache) Clone() *kindEncoderCache { + cc := new(kindEncoderCache) + for i, v := range c.entries { + if val := v.Load(); val != nil { + cc.entries[i].Store(val) + } + } + return cc +} + +// atomic.Value requires that all calls to Store() have the same concrete type +// so we wrap the ValueDecoder with a kindDecoderCacheEntry to ensure the type +// is always the same (since different concrete types may implement the +// ValueDecoder interface). +type kindDecoderCacheEntry struct { + dec ValueDecoder +} + +type kindDecoderCache struct { + entries [reflect.UnsafePointer + 1]atomic.Value // *kindDecoderCacheEntry +} + +func (c *kindDecoderCache) Store(rt reflect.Kind, dec ValueDecoder) { + if rt < reflect.Kind(len(c.entries)) { + c.entries[rt].Store(&kindDecoderCacheEntry{dec: dec}) + } +} + +func (c *kindDecoderCache) Load(rt reflect.Kind) (ValueDecoder, bool) { + if rt < reflect.Kind(len(c.entries)) { + if ent, ok := c.entries[rt].Load().(*kindDecoderCacheEntry); ok { + return ent.dec, ent.dec != nil + } + } + return nil, false +} + +func (c *kindDecoderCache) Clone() *kindDecoderCache { + cc := new(kindDecoderCache) + for i, v := range c.entries { + if val := v.Load(); val != nil { + cc.entries[i].Store(val) + } + } + return cc +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go index e95cab58..7e08aab3 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go @@ -24,7 +24,7 @@ import ( var ( defaultValueDecoders DefaultValueDecoders - errCannotTruncate = errors.New("float64 can only be truncated to an integer type when truncation is enabled") + errCannotTruncate = errors.New("float64 can only be truncated to a lower precision type when truncation is enabled") ) type decodeBinaryError struct { @@ -41,13 +41,16 @@ func newDefaultStructCodec() *StructCodec { if err != nil { // This function is called from the codec registration path, so errors can't be propagated. If there's an error // constructing the StructCodec, we panic to avoid losing it. - panic(fmt.Errorf("error creating default StructCodec: %v", err)) + panic(fmt.Errorf("error creating default StructCodec: %w", err)) } return codec } // DefaultValueDecoders is a namespace type for the default ValueDecoders used // when creating a registry. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. type DefaultValueDecoders struct{} // RegisterDefaultDecoders will register the decoder methods attached to DefaultValueDecoders with @@ -56,6 +59,9 @@ type DefaultValueDecoders struct{} // There is no support for decoding map[string]interface{} because there is no decoder for // interface{}, so users must either register this decoder themselves or use the // EmptyInterfaceDecoder available in the bson package. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) RegisterDefaultDecoders(rb *RegistryBuilder) { if rb == nil { panic(errors.New("argument to RegisterDefaultDecoders must not be nil")) @@ -132,6 +138,9 @@ func (dvd DefaultValueDecoders) RegisterDefaultDecoders(rb *RegistryBuilder) { } // DDecodeValue is the ValueDecoderFunc for primitive.D instances. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) DDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.IsValid() || !val.CanSet() || val.Type() != tD { return ValueDecoderError{Name: "DDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} @@ -169,7 +178,7 @@ func (dvd DefaultValueDecoders) DDecodeValue(dc DecodeContext, vr bsonrw.ValueRe for { key, elemVr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { + if errors.Is(err, bsonrw.ErrEOD) { break } else if err != nil { return err @@ -188,7 +197,7 @@ func (dvd DefaultValueDecoders) DDecodeValue(dc DecodeContext, vr bsonrw.ValueRe return nil } -func (dvd DefaultValueDecoders) booleanDecodeType(dctx DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (dvd DefaultValueDecoders) booleanDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t.Kind() != reflect.Bool { return emptyValue, ValueDecoderError{ Name: "BooleanDecodeValue", @@ -235,6 +244,9 @@ func (dvd DefaultValueDecoders) booleanDecodeType(dctx DecodeContext, vr bsonrw. } // BooleanDecodeValue is the ValueDecoderFunc for bool types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) BooleanDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.IsValid() || !val.CanSet() || val.Kind() != reflect.Bool { return ValueDecoderError{Name: "BooleanDecodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val} @@ -333,6 +345,9 @@ func (DefaultValueDecoders) intDecodeType(dc DecodeContext, vr bsonrw.ValueReade } // IntDecodeValue is the ValueDecoderFunc for int types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) IntDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() { return ValueDecoderError{ @@ -434,7 +449,7 @@ func (dvd DefaultValueDecoders) UintDecodeValue(dc DecodeContext, vr bsonrw.Valu return nil } -func (dvd DefaultValueDecoders) floatDecodeType(ec DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (dvd DefaultValueDecoders) floatDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { var f float64 var err error switch vrType := vr.Type(); vrType { @@ -477,7 +492,7 @@ func (dvd DefaultValueDecoders) floatDecodeType(ec DecodeContext, vr bsonrw.Valu switch t.Kind() { case reflect.Float32: - if !ec.Truncate && float64(float32(f)) != f { + if !dc.Truncate && float64(float32(f)) != f { return emptyValue, errCannotTruncate } @@ -494,6 +509,9 @@ func (dvd DefaultValueDecoders) floatDecodeType(ec DecodeContext, vr bsonrw.Valu } // FloatDecodeValue is the ValueDecoderFunc for float types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) FloatDecodeValue(ec DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() { return ValueDecoderError{ @@ -515,7 +533,7 @@ func (dvd DefaultValueDecoders) FloatDecodeValue(ec DecodeContext, vr bsonrw.Val // StringDecodeValue is the ValueDecoderFunc for string types. // // Deprecated: StringDecodeValue is not registered by default. Use StringCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) StringDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { +func (dvd DefaultValueDecoders) StringDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { var str string var err error switch vr.Type() { @@ -536,7 +554,7 @@ func (dvd DefaultValueDecoders) StringDecodeValue(dctx DecodeContext, vr bsonrw. return nil } -func (DefaultValueDecoders) javaScriptDecodeType(dctx DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) javaScriptDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tJavaScript { return emptyValue, ValueDecoderError{ Name: "JavaScriptDecodeValue", @@ -565,6 +583,9 @@ func (DefaultValueDecoders) javaScriptDecodeType(dctx DecodeContext, vr bsonrw.V } // JavaScriptDecodeValue is the ValueDecoderFunc for the primitive.JavaScript type. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) JavaScriptDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tJavaScript { return ValueDecoderError{Name: "JavaScriptDecodeValue", Types: []reflect.Type{tJavaScript}, Received: val} @@ -579,7 +600,7 @@ func (dvd DefaultValueDecoders) JavaScriptDecodeValue(dctx DecodeContext, vr bso return nil } -func (DefaultValueDecoders) symbolDecodeType(dctx DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) symbolDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tSymbol { return emptyValue, ValueDecoderError{ Name: "SymbolDecodeValue", @@ -620,6 +641,9 @@ func (DefaultValueDecoders) symbolDecodeType(dctx DecodeContext, vr bsonrw.Value } // SymbolDecodeValue is the ValueDecoderFunc for the primitive.Symbol type. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) SymbolDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tSymbol { return ValueDecoderError{Name: "SymbolDecodeValue", Types: []reflect.Type{tSymbol}, Received: val} @@ -634,7 +658,7 @@ func (dvd DefaultValueDecoders) SymbolDecodeValue(dctx DecodeContext, vr bsonrw. return nil } -func (DefaultValueDecoders) binaryDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) binaryDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tBinary { return emptyValue, ValueDecoderError{ Name: "BinaryDecodeValue", @@ -664,6 +688,9 @@ func (DefaultValueDecoders) binaryDecodeType(dc DecodeContext, vr bsonrw.ValueRe } // BinaryDecodeValue is the ValueDecoderFunc for Binary. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) BinaryDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tBinary { return ValueDecoderError{Name: "BinaryDecodeValue", Types: []reflect.Type{tBinary}, Received: val} @@ -678,7 +705,7 @@ func (dvd DefaultValueDecoders) BinaryDecodeValue(dc DecodeContext, vr bsonrw.Va return nil } -func (DefaultValueDecoders) undefinedDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) undefinedDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tUndefined { return emptyValue, ValueDecoderError{ Name: "UndefinedDecodeValue", @@ -704,6 +731,9 @@ func (DefaultValueDecoders) undefinedDecodeType(dc DecodeContext, vr bsonrw.Valu } // UndefinedDecodeValue is the ValueDecoderFunc for Undefined. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) UndefinedDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tUndefined { return ValueDecoderError{Name: "UndefinedDecodeValue", Types: []reflect.Type{tUndefined}, Received: val} @@ -719,7 +749,7 @@ func (dvd DefaultValueDecoders) UndefinedDecodeValue(dc DecodeContext, vr bsonrw } // Accept both 12-byte string and pretty-printed 24-byte hex string formats. -func (dvd DefaultValueDecoders) objectIDDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (dvd DefaultValueDecoders) objectIDDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tOID { return emptyValue, ValueDecoderError{ Name: "ObjectIDDecodeValue", @@ -765,6 +795,9 @@ func (dvd DefaultValueDecoders) objectIDDecodeType(dc DecodeContext, vr bsonrw.V } // ObjectIDDecodeValue is the ValueDecoderFunc for primitive.ObjectID. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) ObjectIDDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tOID { return ValueDecoderError{Name: "ObjectIDDecodeValue", Types: []reflect.Type{tOID}, Received: val} @@ -779,7 +812,7 @@ func (dvd DefaultValueDecoders) ObjectIDDecodeValue(dc DecodeContext, vr bsonrw. return nil } -func (DefaultValueDecoders) dateTimeDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) dateTimeDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tDateTime { return emptyValue, ValueDecoderError{ Name: "DateTimeDecodeValue", @@ -808,6 +841,9 @@ func (DefaultValueDecoders) dateTimeDecodeType(dc DecodeContext, vr bsonrw.Value } // DateTimeDecodeValue is the ValueDecoderFunc for DateTime. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) DateTimeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tDateTime { return ValueDecoderError{Name: "DateTimeDecodeValue", Types: []reflect.Type{tDateTime}, Received: val} @@ -822,7 +858,7 @@ func (dvd DefaultValueDecoders) DateTimeDecodeValue(dc DecodeContext, vr bsonrw. return nil } -func (DefaultValueDecoders) nullDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) nullDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tNull { return emptyValue, ValueDecoderError{ Name: "NullDecodeValue", @@ -848,6 +884,9 @@ func (DefaultValueDecoders) nullDecodeType(dc DecodeContext, vr bsonrw.ValueRead } // NullDecodeValue is the ValueDecoderFunc for Null. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) NullDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tNull { return ValueDecoderError{Name: "NullDecodeValue", Types: []reflect.Type{tNull}, Received: val} @@ -862,7 +901,7 @@ func (dvd DefaultValueDecoders) NullDecodeValue(dc DecodeContext, vr bsonrw.Valu return nil } -func (DefaultValueDecoders) regexDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) regexDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tRegex { return emptyValue, ValueDecoderError{ Name: "RegexDecodeValue", @@ -891,6 +930,9 @@ func (DefaultValueDecoders) regexDecodeType(dc DecodeContext, vr bsonrw.ValueRea } // RegexDecodeValue is the ValueDecoderFunc for Regex. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) RegexDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tRegex { return ValueDecoderError{Name: "RegexDecodeValue", Types: []reflect.Type{tRegex}, Received: val} @@ -905,7 +947,7 @@ func (dvd DefaultValueDecoders) RegexDecodeValue(dc DecodeContext, vr bsonrw.Val return nil } -func (DefaultValueDecoders) dBPointerDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) dBPointerDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tDBPointer { return emptyValue, ValueDecoderError{ Name: "DBPointerDecodeValue", @@ -935,6 +977,9 @@ func (DefaultValueDecoders) dBPointerDecodeType(dc DecodeContext, vr bsonrw.Valu } // DBPointerDecodeValue is the ValueDecoderFunc for DBPointer. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) DBPointerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tDBPointer { return ValueDecoderError{Name: "DBPointerDecodeValue", Types: []reflect.Type{tDBPointer}, Received: val} @@ -949,7 +994,7 @@ func (dvd DefaultValueDecoders) DBPointerDecodeValue(dc DecodeContext, vr bsonrw return nil } -func (DefaultValueDecoders) timestampDecodeType(dc DecodeContext, vr bsonrw.ValueReader, reflectType reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) timestampDecodeType(_ DecodeContext, vr bsonrw.ValueReader, reflectType reflect.Type) (reflect.Value, error) { if reflectType != tTimestamp { return emptyValue, ValueDecoderError{ Name: "TimestampDecodeValue", @@ -978,6 +1023,9 @@ func (DefaultValueDecoders) timestampDecodeType(dc DecodeContext, vr bsonrw.Valu } // TimestampDecodeValue is the ValueDecoderFunc for Timestamp. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) TimestampDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tTimestamp { return ValueDecoderError{Name: "TimestampDecodeValue", Types: []reflect.Type{tTimestamp}, Received: val} @@ -992,7 +1040,7 @@ func (dvd DefaultValueDecoders) TimestampDecodeValue(dc DecodeContext, vr bsonrw return nil } -func (DefaultValueDecoders) minKeyDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) minKeyDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tMinKey { return emptyValue, ValueDecoderError{ Name: "MinKeyDecodeValue", @@ -1020,6 +1068,9 @@ func (DefaultValueDecoders) minKeyDecodeType(dc DecodeContext, vr bsonrw.ValueRe } // MinKeyDecodeValue is the ValueDecoderFunc for MinKey. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) MinKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tMinKey { return ValueDecoderError{Name: "MinKeyDecodeValue", Types: []reflect.Type{tMinKey}, Received: val} @@ -1034,7 +1085,7 @@ func (dvd DefaultValueDecoders) MinKeyDecodeValue(dc DecodeContext, vr bsonrw.Va return nil } -func (DefaultValueDecoders) maxKeyDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) maxKeyDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tMaxKey { return emptyValue, ValueDecoderError{ Name: "MaxKeyDecodeValue", @@ -1062,6 +1113,9 @@ func (DefaultValueDecoders) maxKeyDecodeType(dc DecodeContext, vr bsonrw.ValueRe } // MaxKeyDecodeValue is the ValueDecoderFunc for MaxKey. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) MaxKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tMaxKey { return ValueDecoderError{Name: "MaxKeyDecodeValue", Types: []reflect.Type{tMaxKey}, Received: val} @@ -1076,7 +1130,7 @@ func (dvd DefaultValueDecoders) MaxKeyDecodeValue(dc DecodeContext, vr bsonrw.Va return nil } -func (dvd DefaultValueDecoders) decimal128DecodeType(dctx DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (dvd DefaultValueDecoders) decimal128DecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tDecimal { return emptyValue, ValueDecoderError{ Name: "Decimal128DecodeValue", @@ -1105,6 +1159,9 @@ func (dvd DefaultValueDecoders) decimal128DecodeType(dctx DecodeContext, vr bson } // Decimal128DecodeValue is the ValueDecoderFunc for primitive.Decimal128. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) Decimal128DecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tDecimal { return ValueDecoderError{Name: "Decimal128DecodeValue", Types: []reflect.Type{tDecimal}, Received: val} @@ -1119,7 +1176,7 @@ func (dvd DefaultValueDecoders) Decimal128DecodeValue(dctx DecodeContext, vr bso return nil } -func (dvd DefaultValueDecoders) jsonNumberDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (dvd DefaultValueDecoders) jsonNumberDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tJSONNumber { return emptyValue, ValueDecoderError{ Name: "JSONNumberDecodeValue", @@ -1164,6 +1221,9 @@ func (dvd DefaultValueDecoders) jsonNumberDecodeType(dc DecodeContext, vr bsonrw } // JSONNumberDecodeValue is the ValueDecoderFunc for json.Number. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) JSONNumberDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tJSONNumber { return ValueDecoderError{Name: "JSONNumberDecodeValue", Types: []reflect.Type{tJSONNumber}, Received: val} @@ -1178,7 +1238,7 @@ func (dvd DefaultValueDecoders) JSONNumberDecodeValue(dc DecodeContext, vr bsonr return nil } -func (dvd DefaultValueDecoders) urlDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (dvd DefaultValueDecoders) urlDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tURL { return emptyValue, ValueDecoderError{ Name: "URLDecodeValue", @@ -1213,6 +1273,9 @@ func (dvd DefaultValueDecoders) urlDecodeType(dc DecodeContext, vr bsonrw.ValueR } // URLDecodeValue is the ValueDecoderFunc for url.URL. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) URLDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tURL { return ValueDecoderError{Name: "URLDecodeValue", Types: []reflect.Type{tURL}, Received: val} @@ -1230,7 +1293,7 @@ func (dvd DefaultValueDecoders) URLDecodeValue(dc DecodeContext, vr bsonrw.Value // TimeDecodeValue is the ValueDecoderFunc for time.Time. // // Deprecated: TimeDecodeValue is not registered by default. Use TimeCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) TimeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { +func (dvd DefaultValueDecoders) TimeDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if vr.Type() != bsontype.DateTime { return fmt.Errorf("cannot decode %v into a time.Time", vr.Type()) } @@ -1251,7 +1314,7 @@ func (dvd DefaultValueDecoders) TimeDecodeValue(dc DecodeContext, vr bsonrw.Valu // ByteSliceDecodeValue is the ValueDecoderFunc for []byte. // // Deprecated: ByteSliceDecodeValue is not registered by default. Use ByteSliceCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) ByteSliceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { +func (dvd DefaultValueDecoders) ByteSliceDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if vr.Type() != bsontype.Binary && vr.Type() != bsontype.Null { return fmt.Errorf("cannot decode %v into a []byte", vr.Type()) } @@ -1316,7 +1379,7 @@ func (dvd DefaultValueDecoders) MapDecodeValue(dc DecodeContext, vr bsonrw.Value keyType := val.Type().Key() for { key, vr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { + if errors.Is(err, bsonrw.ErrEOD) { break } if err != nil { @@ -1336,6 +1399,9 @@ func (dvd DefaultValueDecoders) MapDecodeValue(dc DecodeContext, vr bsonrw.Value } // ArrayDecodeValue is the ValueDecoderFunc for array types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) ArrayDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.IsValid() || val.Kind() != reflect.Array { return ValueDecoderError{Name: "ArrayDecodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val} @@ -1447,7 +1513,10 @@ func (dvd DefaultValueDecoders) SliceDecodeValue(dc DecodeContext, vr bsonrw.Val } // ValueUnmarshalerDecodeValue is the ValueDecoderFunc for ValueUnmarshaler implementations. -func (dvd DefaultValueDecoders) ValueUnmarshalerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) ValueUnmarshalerDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.IsValid() || (!val.Type().Implements(tValueUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tValueUnmarshaler)) { return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} } @@ -1471,16 +1540,19 @@ func (dvd DefaultValueDecoders) ValueUnmarshalerDecodeValue(dc DecodeContext, vr return err } - fn := val.Convert(tValueUnmarshaler).MethodByName("UnmarshalBSONValue") - errVal := fn.Call([]reflect.Value{reflect.ValueOf(t), reflect.ValueOf(src)})[0] - if !errVal.IsNil() { - return errVal.Interface().(error) + m, ok := val.Interface().(ValueUnmarshaler) + if !ok { + // NB: this error should be unreachable due to the above checks + return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} } - return nil + return m.UnmarshalBSONValue(t, src) } // UnmarshalerDecodeValue is the ValueDecoderFunc for Unmarshaler implementations. -func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.IsValid() || (!val.Type().Implements(tUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tUnmarshaler)) { return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} } @@ -1516,12 +1588,12 @@ func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(dc DecodeContext, vr bson val = val.Addr() // If the type doesn't implement the interface, a pointer to it must. } - fn := val.Convert(tUnmarshaler).MethodByName("UnmarshalBSON") - errVal := fn.Call([]reflect.Value{reflect.ValueOf(src)})[0] - if !errVal.IsNil() { - return errVal.Interface().(error) + m, ok := val.Interface().(Unmarshaler) + if !ok { + // NB: this error should be unreachable due to the above checks + return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} } - return nil + return m.UnmarshalBSON(src) } // EmptyInterfaceDecodeValue is the ValueDecoderFunc for interface{}. @@ -1565,7 +1637,10 @@ func (dvd DefaultValueDecoders) EmptyInterfaceDecodeValue(dc DecodeContext, vr b } // CoreDocumentDecodeValue is the ValueDecoderFunc for bsoncore.Document. -func (DefaultValueDecoders) CoreDocumentDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (DefaultValueDecoders) CoreDocumentDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tCoreDocument { return ValueDecoderError{Name: "CoreDocumentDecodeValue", Types: []reflect.Type{tCoreDocument}, Received: val} } @@ -1600,7 +1675,7 @@ func (dvd DefaultValueDecoders) decodeDefault(dc DecodeContext, vr bsonrw.ValueR idx := 0 for { vr, err := ar.ReadValue() - if err == bsonrw.ErrEOA { + if errors.Is(err, bsonrw.ErrEOA) { break } if err != nil { @@ -1671,6 +1746,9 @@ func (dvd DefaultValueDecoders) codeWithScopeDecodeType(dc DecodeContext, vr bso } // CodeWithScopeDecodeValue is the ValueDecoderFunc for CodeWithScope. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) CodeWithScopeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tCodeWithScope { return ValueDecoderError{Name: "CodeWithScopeDecodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val} @@ -1709,7 +1787,7 @@ func (DefaultValueDecoders) decodeElemsFromDocumentReader(dc DecodeContext, dr b elems := make([]reflect.Value, 0) for { key, vr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { + if errors.Is(err, bsonrw.ErrEOD) { break } if err != nil { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go index 6bdb43cb..4751ae99 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go @@ -58,10 +58,16 @@ func encodeElement(ec EncodeContext, dw bsonrw.DocumentWriter, e primitive.E) er // DefaultValueEncoders is a namespace type for the default ValueEncoders used // when creating a registry. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. type DefaultValueEncoders struct{} // RegisterDefaultEncoders will register the encoder methods attached to DefaultValueEncoders with // the provided RegistryBuilder. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. func (dve DefaultValueEncoders) RegisterDefaultEncoders(rb *RegistryBuilder) { if rb == nil { panic(errors.New("argument to RegisterDefaultEncoders must not be nil")) @@ -113,7 +119,10 @@ func (dve DefaultValueEncoders) RegisterDefaultEncoders(rb *RegistryBuilder) { } // BooleanEncodeValue is the ValueEncoderFunc for bool types. -func (dve DefaultValueEncoders) BooleanEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) BooleanEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Kind() != reflect.Bool { return ValueEncoderError{Name: "BooleanEncodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val} } @@ -125,6 +134,9 @@ func fitsIn32Bits(i int64) bool { } // IntEncodeValue is the ValueEncoderFunc for int types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. func (dve DefaultValueEncoders) IntEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { switch val.Kind() { case reflect.Int8, reflect.Int16, reflect.Int32: @@ -176,7 +188,10 @@ func (dve DefaultValueEncoders) UintEncodeValue(ec EncodeContext, vw bsonrw.Valu } // FloatEncodeValue is the ValueEncoderFunc for float types. -func (dve DefaultValueEncoders) FloatEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) FloatEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { switch val.Kind() { case reflect.Float32, reflect.Float64: return vw.WriteDouble(val.Float()) @@ -188,7 +203,7 @@ func (dve DefaultValueEncoders) FloatEncodeValue(ec EncodeContext, vw bsonrw.Val // StringEncodeValue is the ValueEncoderFunc for string types. // // Deprecated: StringEncodeValue is not registered by default. Use StringCodec.EncodeValue instead. -func (dve DefaultValueEncoders) StringEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +func (dve DefaultValueEncoders) StringEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if val.Kind() != reflect.String { return ValueEncoderError{ Name: "StringEncodeValue", @@ -201,7 +216,10 @@ func (dve DefaultValueEncoders) StringEncodeValue(ectx EncodeContext, vw bsonrw. } // ObjectIDEncodeValue is the ValueEncoderFunc for primitive.ObjectID. -func (dve DefaultValueEncoders) ObjectIDEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) ObjectIDEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tOID { return ValueEncoderError{Name: "ObjectIDEncodeValue", Types: []reflect.Type{tOID}, Received: val} } @@ -209,7 +227,10 @@ func (dve DefaultValueEncoders) ObjectIDEncodeValue(ec EncodeContext, vw bsonrw. } // Decimal128EncodeValue is the ValueEncoderFunc for primitive.Decimal128. -func (dve DefaultValueEncoders) Decimal128EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) Decimal128EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tDecimal { return ValueEncoderError{Name: "Decimal128EncodeValue", Types: []reflect.Type{tDecimal}, Received: val} } @@ -217,6 +238,9 @@ func (dve DefaultValueEncoders) Decimal128EncodeValue(ec EncodeContext, vw bsonr } // JSONNumberEncodeValue is the ValueEncoderFunc for json.Number. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. func (dve DefaultValueEncoders) JSONNumberEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tJSONNumber { return ValueEncoderError{Name: "JSONNumberEncodeValue", Types: []reflect.Type{tJSONNumber}, Received: val} @@ -237,7 +261,10 @@ func (dve DefaultValueEncoders) JSONNumberEncodeValue(ec EncodeContext, vw bsonr } // URLEncodeValue is the ValueEncoderFunc for url.URL. -func (dve DefaultValueEncoders) URLEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) URLEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tURL { return ValueEncoderError{Name: "URLEncodeValue", Types: []reflect.Type{tURL}, Received: val} } @@ -248,7 +275,7 @@ func (dve DefaultValueEncoders) URLEncodeValue(ec EncodeContext, vw bsonrw.Value // TimeEncodeValue is the ValueEncoderFunc for time.TIme. // // Deprecated: TimeEncodeValue is not registered by default. Use TimeCodec.EncodeValue instead. -func (dve DefaultValueEncoders) TimeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +func (dve DefaultValueEncoders) TimeEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tTime { return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val} } @@ -260,7 +287,7 @@ func (dve DefaultValueEncoders) TimeEncodeValue(ec EncodeContext, vw bsonrw.Valu // ByteSliceEncodeValue is the ValueEncoderFunc for []byte. // // Deprecated: ByteSliceEncodeValue is not registered by default. Use ByteSliceCodec.EncodeValue instead. -func (dve DefaultValueEncoders) ByteSliceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +func (dve DefaultValueEncoders) ByteSliceEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tByteSlice { return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val} } @@ -316,7 +343,7 @@ func (dve DefaultValueEncoders) mapEncodeValue(ec EncodeContext, dw bsonrw.Docum } currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.MapIndex(key)) - if lookupErr != nil && lookupErr != errInvalidValue { + if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { return lookupErr } @@ -325,7 +352,7 @@ func (dve DefaultValueEncoders) mapEncodeValue(ec EncodeContext, dw bsonrw.Docum return err } - if lookupErr == errInvalidValue { + if errors.Is(lookupErr, errInvalidValue) { err = vw.WriteNull() if err != nil { return err @@ -343,6 +370,9 @@ func (dve DefaultValueEncoders) mapEncodeValue(ec EncodeContext, dw bsonrw.Docum } // ArrayEncodeValue is the ValueEncoderFunc for array types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. func (dve DefaultValueEncoders) ArrayEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Kind() != reflect.Array { return ValueEncoderError{Name: "ArrayEncodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val} @@ -388,7 +418,7 @@ func (dve DefaultValueEncoders) ArrayEncodeValue(ec EncodeContext, vw bsonrw.Val for idx := 0; idx < val.Len(); idx++ { currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.Index(idx)) - if lookupErr != nil && lookupErr != errInvalidValue { + if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { return lookupErr } @@ -397,7 +427,7 @@ func (dve DefaultValueEncoders) ArrayEncodeValue(ec EncodeContext, vw bsonrw.Val return err } - if lookupErr == errInvalidValue { + if errors.Is(lookupErr, errInvalidValue) { err = vw.WriteNull() if err != nil { return err @@ -457,7 +487,7 @@ func (dve DefaultValueEncoders) SliceEncodeValue(ec EncodeContext, vw bsonrw.Val for idx := 0; idx < val.Len(); idx++ { currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.Index(idx)) - if lookupErr != nil && lookupErr != errInvalidValue { + if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { return lookupErr } @@ -466,7 +496,7 @@ func (dve DefaultValueEncoders) SliceEncodeValue(ec EncodeContext, vw bsonrw.Val return err } - if lookupErr == errInvalidValue { + if errors.Is(lookupErr, errInvalidValue) { err = vw.WriteNull() if err != nil { return err @@ -515,7 +545,10 @@ func (dve DefaultValueEncoders) EmptyInterfaceEncodeValue(ec EncodeContext, vw b } // ValueMarshalerEncodeValue is the ValueEncoderFunc for ValueMarshaler implementations. -func (dve DefaultValueEncoders) ValueMarshalerEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) ValueMarshalerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { // Either val or a pointer to val must implement ValueMarshaler switch { case !val.IsValid(): @@ -531,17 +564,22 @@ func (dve DefaultValueEncoders) ValueMarshalerEncodeValue(ec EncodeContext, vw b return ValueEncoderError{Name: "ValueMarshalerEncodeValue", Types: []reflect.Type{tValueMarshaler}, Received: val} } - fn := val.Convert(tValueMarshaler).MethodByName("MarshalBSONValue") - returns := fn.Call(nil) - if !returns[2].IsNil() { - return returns[2].Interface().(error) + m, ok := val.Interface().(ValueMarshaler) + if !ok { + return vw.WriteNull() + } + t, data, err := m.MarshalBSONValue() + if err != nil { + return err } - t, data := returns[0].Interface().(bsontype.Type), returns[1].Interface().([]byte) return bsonrw.Copier{}.CopyValueFromBytes(vw, t, data) } // MarshalerEncodeValue is the ValueEncoderFunc for Marshaler implementations. -func (dve DefaultValueEncoders) MarshalerEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) MarshalerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { // Either val or a pointer to val must implement Marshaler switch { case !val.IsValid(): @@ -557,16 +595,21 @@ func (dve DefaultValueEncoders) MarshalerEncodeValue(ec EncodeContext, vw bsonrw return ValueEncoderError{Name: "MarshalerEncodeValue", Types: []reflect.Type{tMarshaler}, Received: val} } - fn := val.Convert(tMarshaler).MethodByName("MarshalBSON") - returns := fn.Call(nil) - if !returns[1].IsNil() { - return returns[1].Interface().(error) + m, ok := val.Interface().(Marshaler) + if !ok { + return vw.WriteNull() + } + data, err := m.MarshalBSON() + if err != nil { + return err } - data := returns[0].Interface().([]byte) return bsonrw.Copier{}.CopyValueFromBytes(vw, bsontype.EmbeddedDocument, data) } // ProxyEncodeValue is the ValueEncoderFunc for Proxy implementations. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. func (dve DefaultValueEncoders) ProxyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { // Either val or a pointer to val must implement Proxy switch { @@ -583,27 +626,38 @@ func (dve DefaultValueEncoders) ProxyEncodeValue(ec EncodeContext, vw bsonrw.Val return ValueEncoderError{Name: "ProxyEncodeValue", Types: []reflect.Type{tProxy}, Received: val} } - fn := val.Convert(tProxy).MethodByName("ProxyBSON") - returns := fn.Call(nil) - if !returns[1].IsNil() { - return returns[1].Interface().(error) + m, ok := val.Interface().(Proxy) + if !ok { + return vw.WriteNull() } - data := returns[0] - var encoder ValueEncoder - var err error - if data.Elem().IsValid() { - encoder, err = ec.LookupEncoder(data.Elem().Type()) - } else { - encoder, err = ec.LookupEncoder(nil) + v, err := m.ProxyBSON() + if err != nil { + return err } + if v == nil { + encoder, err := ec.LookupEncoder(nil) + if err != nil { + return err + } + return encoder.EncodeValue(ec, vw, reflect.ValueOf(nil)) + } + vv := reflect.ValueOf(v) + switch vv.Kind() { + case reflect.Ptr, reflect.Interface: + vv = vv.Elem() + } + encoder, err := ec.LookupEncoder(vv.Type()) if err != nil { return err } - return encoder.EncodeValue(ec, vw, data.Elem()) + return encoder.EncodeValue(ec, vw, vv) } // JavaScriptEncodeValue is the ValueEncoderFunc for the primitive.JavaScript type. -func (DefaultValueEncoders) JavaScriptEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) JavaScriptEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tJavaScript { return ValueEncoderError{Name: "JavaScriptEncodeValue", Types: []reflect.Type{tJavaScript}, Received: val} } @@ -612,7 +666,10 @@ func (DefaultValueEncoders) JavaScriptEncodeValue(ectx EncodeContext, vw bsonrw. } // SymbolEncodeValue is the ValueEncoderFunc for the primitive.Symbol type. -func (DefaultValueEncoders) SymbolEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) SymbolEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tSymbol { return ValueEncoderError{Name: "SymbolEncodeValue", Types: []reflect.Type{tSymbol}, Received: val} } @@ -621,7 +678,10 @@ func (DefaultValueEncoders) SymbolEncodeValue(ectx EncodeContext, vw bsonrw.Valu } // BinaryEncodeValue is the ValueEncoderFunc for Binary. -func (DefaultValueEncoders) BinaryEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) BinaryEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tBinary { return ValueEncoderError{Name: "BinaryEncodeValue", Types: []reflect.Type{tBinary}, Received: val} } @@ -631,7 +691,10 @@ func (DefaultValueEncoders) BinaryEncodeValue(ec EncodeContext, vw bsonrw.ValueW } // UndefinedEncodeValue is the ValueEncoderFunc for Undefined. -func (DefaultValueEncoders) UndefinedEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) UndefinedEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tUndefined { return ValueEncoderError{Name: "UndefinedEncodeValue", Types: []reflect.Type{tUndefined}, Received: val} } @@ -640,7 +703,10 @@ func (DefaultValueEncoders) UndefinedEncodeValue(ec EncodeContext, vw bsonrw.Val } // DateTimeEncodeValue is the ValueEncoderFunc for DateTime. -func (DefaultValueEncoders) DateTimeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) DateTimeEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tDateTime { return ValueEncoderError{Name: "DateTimeEncodeValue", Types: []reflect.Type{tDateTime}, Received: val} } @@ -649,7 +715,10 @@ func (DefaultValueEncoders) DateTimeEncodeValue(ec EncodeContext, vw bsonrw.Valu } // NullEncodeValue is the ValueEncoderFunc for Null. -func (DefaultValueEncoders) NullEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) NullEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tNull { return ValueEncoderError{Name: "NullEncodeValue", Types: []reflect.Type{tNull}, Received: val} } @@ -658,7 +727,10 @@ func (DefaultValueEncoders) NullEncodeValue(ec EncodeContext, vw bsonrw.ValueWri } // RegexEncodeValue is the ValueEncoderFunc for Regex. -func (DefaultValueEncoders) RegexEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) RegexEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tRegex { return ValueEncoderError{Name: "RegexEncodeValue", Types: []reflect.Type{tRegex}, Received: val} } @@ -669,7 +741,10 @@ func (DefaultValueEncoders) RegexEncodeValue(ec EncodeContext, vw bsonrw.ValueWr } // DBPointerEncodeValue is the ValueEncoderFunc for DBPointer. -func (DefaultValueEncoders) DBPointerEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) DBPointerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tDBPointer { return ValueEncoderError{Name: "DBPointerEncodeValue", Types: []reflect.Type{tDBPointer}, Received: val} } @@ -680,7 +755,10 @@ func (DefaultValueEncoders) DBPointerEncodeValue(ec EncodeContext, vw bsonrw.Val } // TimestampEncodeValue is the ValueEncoderFunc for Timestamp. -func (DefaultValueEncoders) TimestampEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) TimestampEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tTimestamp { return ValueEncoderError{Name: "TimestampEncodeValue", Types: []reflect.Type{tTimestamp}, Received: val} } @@ -691,7 +769,10 @@ func (DefaultValueEncoders) TimestampEncodeValue(ec EncodeContext, vw bsonrw.Val } // MinKeyEncodeValue is the ValueEncoderFunc for MinKey. -func (DefaultValueEncoders) MinKeyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) MinKeyEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tMinKey { return ValueEncoderError{Name: "MinKeyEncodeValue", Types: []reflect.Type{tMinKey}, Received: val} } @@ -700,7 +781,10 @@ func (DefaultValueEncoders) MinKeyEncodeValue(ec EncodeContext, vw bsonrw.ValueW } // MaxKeyEncodeValue is the ValueEncoderFunc for MaxKey. -func (DefaultValueEncoders) MaxKeyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) MaxKeyEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tMaxKey { return ValueEncoderError{Name: "MaxKeyEncodeValue", Types: []reflect.Type{tMaxKey}, Received: val} } @@ -709,7 +793,10 @@ func (DefaultValueEncoders) MaxKeyEncodeValue(ec EncodeContext, vw bsonrw.ValueW } // CoreDocumentEncodeValue is the ValueEncoderFunc for bsoncore.Document. -func (DefaultValueEncoders) CoreDocumentEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) CoreDocumentEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tCoreDocument { return ValueEncoderError{Name: "CoreDocumentEncodeValue", Types: []reflect.Type{tCoreDocument}, Received: val} } @@ -720,6 +807,9 @@ func (DefaultValueEncoders) CoreDocumentEncodeValue(ec EncodeContext, vw bsonrw. } // CodeWithScopeEncodeValue is the ValueEncoderFunc for CodeWithScope. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. func (dve DefaultValueEncoders) CodeWithScopeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tCodeWithScope { return ValueEncoderError{Name: "CodeWithScopeEncodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go index 5f903ebe..4613e5a1 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go @@ -31,35 +31,39 @@ // allow the use of a function with the correct signature as a ValueDecoder. A DecodeContext // instance is provided and serves similar functionality to the EncodeContext. // -// # Registry and RegistryBuilder -// -// A Registry is an immutable store for ValueEncoders, ValueDecoders, and a type map. See the Registry type -// documentation for examples of registering various custom encoders and decoders. A Registry can be constructed using a -// RegistryBuilder, which handles three main types of codecs: -// -// 1. Type encoders/decoders - These can be registered using the RegisterTypeEncoder and RegisterTypeDecoder methods. -// The registered codec will be invoked when encoding/decoding a value whose type matches the registered type exactly. -// If the registered type is an interface, the codec will be invoked when encoding or decoding values whose type is the -// interface, but not for values with concrete types that implement the interface. -// -// 2. Hook encoders/decoders - These can be registered using the RegisterHookEncoder and RegisterHookDecoder methods. -// These methods only accept interface types and the registered codecs will be invoked when encoding or decoding values -// whose types implement the interface. An example of a hook defined by the driver is bson.Marshaler. The driver will -// call the MarshalBSON method for any value whose type implements bson.Marshaler, regardless of the value's concrete -// type. -// -// 3. Type map entries - This can be used to associate a BSON type with a Go type. These type associations are used when -// decoding into a bson.D/bson.M or a struct field of type interface{}. For example, by default, BSON int32 and int64 -// values decode as Go int32 and int64 instances, respectively, when decoding into a bson.D. The following code would -// change the behavior so these values decode as Go int instances instead: +// # Registry +// +// A Registry is a store for ValueEncoders, ValueDecoders, and a type map. See the Registry type +// documentation for examples of registering various custom encoders and decoders. A Registry can +// have three main types of codecs: +// +// 1. Type encoders/decoders - These can be registered using the RegisterTypeEncoder and +// RegisterTypeDecoder methods. The registered codec will be invoked when encoding/decoding a value +// whose type matches the registered type exactly. +// If the registered type is an interface, the codec will be invoked when encoding or decoding +// values whose type is the interface, but not for values with concrete types that implement the +// interface. +// +// 2. Hook encoders/decoders - These can be registered using the RegisterHookEncoder and +// RegisterHookDecoder methods. These methods only accept interface types and the registered codecs +// will be invoked when encoding or decoding values whose types implement the interface. An example +// of a hook defined by the driver is bson.Marshaler. The driver will call the MarshalBSON method +// for any value whose type implements bson.Marshaler, regardless of the value's concrete type. +// +// 3. Type map entries - This can be used to associate a BSON type with a Go type. These type +// associations are used when decoding into a bson.D/bson.M or a struct field of type interface{}. +// For example, by default, BSON int32 and int64 values decode as Go int32 and int64 instances, +// respectively, when decoding into a bson.D. The following code would change the behavior so these +// values decode as Go int instances instead: // // intType := reflect.TypeOf(int(0)) -// registryBuilder.RegisterTypeMapEntry(bsontype.Int32, intType).RegisterTypeMapEntry(bsontype.Int64, intType) +// registry.RegisterTypeMapEntry(bsontype.Int32, intType).RegisterTypeMapEntry(bsontype.Int64, intType) // -// 4. Kind encoder/decoders - These can be registered using the RegisterDefaultEncoder and RegisterDefaultDecoder -// methods. The registered codec will be invoked when encoding or decoding values whose reflect.Kind matches the -// registered reflect.Kind as long as the value's type doesn't match a registered type or hook encoder/decoder first. -// These methods should be used to change the behavior for all values for a specific kind. +// 4. Kind encoder/decoders - These can be registered using the RegisterDefaultEncoder and +// RegisterDefaultDecoder methods. The registered codec will be invoked when encoding or decoding +// values whose reflect.Kind matches the registered reflect.Kind as long as the value's type doesn't +// match a registered type or hook encoder/decoder first. These methods should be used to change the +// behavior for all values for a specific kind. // // # Registry Lookup Procedure // @@ -67,17 +71,18 @@ // // 1. A type encoder registered for the exact type of the value. // -// 2. A hook encoder registered for an interface that is implemented by the value or by a pointer to the value. If the -// value matches multiple hooks (e.g. the type implements bsoncodec.Marshaler and bsoncodec.ValueMarshaler), the first -// one registered will be selected. Note that registries constructed using bson.NewRegistryBuilder have driver-defined -// hooks registered for the bsoncodec.Marshaler, bsoncodec.ValueMarshaler, and bsoncodec.Proxy interfaces, so those -// will take precedence over any new hooks. +// 2. A hook encoder registered for an interface that is implemented by the value or by a pointer to +// the value. If the value matches multiple hooks (e.g. the type implements bsoncodec.Marshaler and +// bsoncodec.ValueMarshaler), the first one registered will be selected. Note that registries +// constructed using bson.NewRegistry have driver-defined hooks registered for the +// bsoncodec.Marshaler, bsoncodec.ValueMarshaler, and bsoncodec.Proxy interfaces, so those will take +// precedence over any new hooks. // // 3. A kind encoder registered for the value's kind. // -// If all of these lookups fail to find an encoder, an error of type ErrNoEncoder is returned. The same precedence -// rules apply for decoders, with the exception that an error of type ErrNoDecoder will be returned if no decoder is -// found. +// If all of these lookups fail to find an encoder, an error of type ErrNoEncoder is returned. The +// same precedence rules apply for decoders, with the exception that an error of type ErrNoDecoder +// will be returned if no decoder is found. // // # DefaultValueEncoders and DefaultValueDecoders // diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go index eda417cf..098368f0 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go @@ -16,18 +16,44 @@ import ( ) // EmptyInterfaceCodec is the Codec used for interface{} values. +// +// Deprecated: EmptyInterfaceCodec will not be directly configurable in Go +// Driver 2.0. To configure the empty interface encode and decode behavior, use +// the configuration methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the empty interface +// encode and decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to unmarshal BSON binary field +// values as a Go byte slice, use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// BinaryAsSlice: true, +// }) +// +// See the deprecation notice for each field in EmptyInterfaceCodec for the +// corresponding settings. type EmptyInterfaceCodec struct { + // DecodeBinaryAsSlice causes DecodeValue to unmarshal BSON binary field values that are the + // "Generic" or "Old" BSON binary subtype as a Go byte slice instead of a primitive.Binary. + // + // Deprecated: Use bson.Decoder.BinaryAsSlice or options.BSONOptions.BinaryAsSlice instead. DecodeBinaryAsSlice bool } var ( defaultEmptyInterfaceCodec = NewEmptyInterfaceCodec() - _ ValueCodec = defaultEmptyInterfaceCodec + // Assert that defaultEmptyInterfaceCodec satisfies the typeDecoder interface, which allows it + // to be used by collection type decoders (e.g. map, slice, etc) to set individual values in a + // collection. _ typeDecoder = defaultEmptyInterfaceCodec ) // NewEmptyInterfaceCodec returns a EmptyInterfaceCodec with options opts. +// +// Deprecated: NewEmptyInterfaceCodec will not be available in Go Driver 2.0. See +// [EmptyInterfaceCodec] for more details. func NewEmptyInterfaceCodec(opts ...*bsonoptions.EmptyInterfaceCodecOptions) *EmptyInterfaceCodec { interfaceOpt := bsonoptions.MergeEmptyInterfaceCodecOptions(opts...) @@ -121,7 +147,7 @@ func (eic EmptyInterfaceCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReade return emptyValue, err } - if eic.DecodeBinaryAsSlice && rtype == tBinary { + if (eic.DecodeBinaryAsSlice || dc.binaryAsSlice) && rtype == tBinary { binElem := elem.Interface().(primitive.Binary) if binElem.Subtype == bsontype.BinaryGeneric || binElem.Subtype == bsontype.BinaryBinaryOld { elem = reflect.ValueOf(binElem.Data) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go index e1fbef9c..d7e00ffa 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go @@ -8,6 +8,7 @@ package bsoncodec import ( "encoding" + "errors" "fmt" "reflect" "strconv" @@ -20,14 +21,44 @@ import ( var defaultMapCodec = NewMapCodec() // MapCodec is the Codec used for map values. +// +// Deprecated: MapCodec will not be directly configurable in Go Driver 2.0. To +// configure the map encode and decode behavior, use the configuration methods +// on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the map encode and +// decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to marshal nil Go maps as empty BSON +// documents, use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// NilMapAsEmpty: true, +// }) +// +// See the deprecation notice for each field in MapCodec for the corresponding +// settings. type MapCodec struct { - DecodeZerosMap bool - EncodeNilAsEmpty bool + // DecodeZerosMap causes DecodeValue to delete any existing values from Go maps in the destination + // value passed to Decode before unmarshaling BSON documents into them. + // + // Deprecated: Use bson.Decoder.ZeroMaps or options.BSONOptions.ZeroMaps instead. + DecodeZerosMap bool + + // EncodeNilAsEmpty causes EncodeValue to marshal nil Go maps as empty BSON documents instead of + // BSON null. + // + // Deprecated: Use bson.Encoder.NilMapAsEmpty or options.BSONOptions.NilMapAsEmpty instead. + EncodeNilAsEmpty bool + + // EncodeKeysWithStringer causes the Encoder to convert Go map keys to BSON document field name + // strings using fmt.Sprintf() instead of the default string conversion logic. + // + // Deprecated: Use bson.Encoder.StringifyMapKeysWithFmt or + // options.BSONOptions.StringifyMapKeysWithFmt instead. EncodeKeysWithStringer bool } -var _ ValueCodec = &MapCodec{} - // KeyMarshaler is the interface implemented by an object that can marshal itself into a string key. // This applies to types used as map keys and is similar to encoding.TextMarshaler. type KeyMarshaler interface { @@ -45,6 +76,9 @@ type KeyUnmarshaler interface { } // NewMapCodec returns a MapCodec with options opts. +// +// Deprecated: NewMapCodec will not be available in Go Driver 2.0. See +// [MapCodec] for more details. func NewMapCodec(opts ...*bsonoptions.MapCodecOptions) *MapCodec { mapOpt := bsonoptions.MergeMapCodecOptions(opts...) @@ -67,7 +101,7 @@ func (mc *MapCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val ref return ValueEncoderError{Name: "MapEncodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} } - if val.IsNil() && !mc.EncodeNilAsEmpty { + if val.IsNil() && !mc.EncodeNilAsEmpty && !ec.nilMapAsEmpty { // If we have a nil map but we can't WriteNull, that means we're probably trying to encode // to a TopLevel document. We can't currently tell if this is what actually happened, but if // there's a deeper underlying problem, the error will also be returned from WriteDocument, @@ -100,7 +134,7 @@ func (mc *MapCodec) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, v keys := val.MapKeys() for _, key := range keys { - keyStr, err := mc.encodeKey(key) + keyStr, err := mc.encodeKey(key, ec.stringifyMapKeysWithFmt) if err != nil { return err } @@ -110,7 +144,7 @@ func (mc *MapCodec) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, v } currEncoder, currVal, lookupErr := defaultValueEncoders.lookupElementEncoder(ec, encoder, val.MapIndex(key)) - if lookupErr != nil && lookupErr != errInvalidValue { + if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { return lookupErr } @@ -119,7 +153,7 @@ func (mc *MapCodec) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, v return err } - if lookupErr == errInvalidValue { + if errors.Is(lookupErr, errInvalidValue) { err = vw.WriteNull() if err != nil { return err @@ -163,7 +197,7 @@ func (mc *MapCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val ref val.Set(reflect.MakeMap(val.Type())) } - if val.Len() > 0 && mc.DecodeZerosMap { + if val.Len() > 0 && (mc.DecodeZerosMap || dc.zeroMaps) { clearMap(val) } @@ -182,7 +216,7 @@ func (mc *MapCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val ref for { key, vr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { + if errors.Is(err, bsonrw.ErrEOD) { break } if err != nil { @@ -211,8 +245,8 @@ func clearMap(m reflect.Value) { } } -func (mc *MapCodec) encodeKey(val reflect.Value) (string, error) { - if mc.EncodeKeysWithStringer { +func (mc *MapCodec) encodeKey(val reflect.Value, encodeKeysWithStringer bool) (string, error) { + if mc.EncodeKeysWithStringer || encodeKeysWithStringer { return fmt.Sprint(val), nil } @@ -295,7 +329,7 @@ func (mc *MapCodec) decodeKey(key string, keyType reflect.Type) (reflect.Value, if mc.EncodeKeysWithStringer { parsed, err := strconv.ParseFloat(key, 64) if err != nil { - return keyVal, fmt.Errorf("Map key is defined to be a decimal type (%v) but got error %v", keyType.Kind(), err) + return keyVal, fmt.Errorf("Map key is defined to be a decimal type (%v) but got error %w", keyType.Kind(), err) } keyVal = reflect.ValueOf(parsed) break diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go index 616a3e70..ddfa4a33 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go @@ -8,7 +8,6 @@ package bsoncodec import ( "reflect" - "sync" "go.mongodb.org/mongo-driver/bson/bsonrw" "go.mongodb.org/mongo-driver/bson/bsontype" @@ -18,18 +17,28 @@ var _ ValueEncoder = &PointerCodec{} var _ ValueDecoder = &PointerCodec{} // PointerCodec is the Codec used for pointers. +// +// Deprecated: PointerCodec will not be directly accessible in Go Driver 2.0. To +// override the default pointer encode and decode behavior, create a new registry +// with [go.mongodb.org/mongo-driver/bson.NewRegistry] and register a new +// encoder and decoder for pointers. +// +// For example, +// +// reg := bson.NewRegistry() +// reg.RegisterKindEncoder(reflect.Ptr, myPointerEncoder) +// reg.RegisterKindDecoder(reflect.Ptr, myPointerDecoder) type PointerCodec struct { - ecache map[reflect.Type]ValueEncoder - dcache map[reflect.Type]ValueDecoder - l sync.RWMutex + ecache typeEncoderCache + dcache typeDecoderCache } // NewPointerCodec returns a PointerCodec that has been initialized. +// +// Deprecated: NewPointerCodec will not be available in Go Driver 2.0. See +// [PointerCodec] for more details. func NewPointerCodec() *PointerCodec { - return &PointerCodec{ - ecache: make(map[reflect.Type]ValueEncoder), - dcache: make(map[reflect.Type]ValueDecoder), - } + return &PointerCodec{} } // EncodeValue handles encoding a pointer by either encoding it to BSON Null if the pointer is nil @@ -46,24 +55,19 @@ func (pc *PointerCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val return vw.WriteNull() } - pc.l.RLock() - enc, ok := pc.ecache[val.Type()] - pc.l.RUnlock() - if ok { - if enc == nil { - return ErrNoEncoder{Type: val.Type()} + typ := val.Type() + if v, ok := pc.ecache.Load(typ); ok { + if v == nil { + return ErrNoEncoder{Type: typ} } - return enc.EncodeValue(ec, vw, val.Elem()) + return v.EncodeValue(ec, vw, val.Elem()) } - - enc, err := ec.LookupEncoder(val.Type().Elem()) - pc.l.Lock() - pc.ecache[val.Type()] = enc - pc.l.Unlock() + // TODO(charlie): handle concurrent requests for the same type + enc, err := ec.LookupEncoder(typ.Elem()) + enc = pc.ecache.LoadOrStore(typ, enc) if err != nil { return err } - return enc.EncodeValue(ec, vw, val.Elem()) } @@ -74,36 +78,31 @@ func (pc *PointerCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val return ValueDecoderError{Name: "PointerCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: val} } + typ := val.Type() if vr.Type() == bsontype.Null { - val.Set(reflect.Zero(val.Type())) + val.Set(reflect.Zero(typ)) return vr.ReadNull() } if vr.Type() == bsontype.Undefined { - val.Set(reflect.Zero(val.Type())) + val.Set(reflect.Zero(typ)) return vr.ReadUndefined() } if val.IsNil() { - val.Set(reflect.New(val.Type().Elem())) + val.Set(reflect.New(typ.Elem())) } - pc.l.RLock() - dec, ok := pc.dcache[val.Type()] - pc.l.RUnlock() - if ok { - if dec == nil { - return ErrNoDecoder{Type: val.Type()} + if v, ok := pc.dcache.Load(typ); ok { + if v == nil { + return ErrNoDecoder{Type: typ} } - return dec.DecodeValue(dc, vr, val.Elem()) + return v.DecodeValue(dc, vr, val.Elem()) } - - dec, err := dc.LookupDecoder(val.Type().Elem()) - pc.l.Lock() - pc.dcache[val.Type()] = dec - pc.l.Unlock() + // TODO(charlie): handle concurrent requests for the same type + dec, err := dc.LookupDecoder(typ.Elem()) + dec = pc.dcache.LoadOrStore(typ, dec) if err != nil { return err } - return dec.DecodeValue(dc, vr, val.Elem()) } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go index 80644023..196c491b 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go @@ -16,12 +16,18 @@ import ( ) // ErrNilType is returned when nil is passed to either LookupEncoder or LookupDecoder. +// +// Deprecated: ErrNilType will not be supported in Go Driver 2.0. var ErrNilType = errors.New("cannot perform a decoder lookup on ") // ErrNotPointer is returned when a non-pointer type is provided to LookupDecoder. +// +// Deprecated: ErrNotPointer will not be supported in Go Driver 2.0. var ErrNotPointer = errors.New("non-pointer provided to LookupDecoder") // ErrNoEncoder is returned when there wasn't an encoder available for a type. +// +// Deprecated: ErrNoEncoder will not be supported in Go Driver 2.0. type ErrNoEncoder struct { Type reflect.Type } @@ -34,6 +40,8 @@ func (ene ErrNoEncoder) Error() string { } // ErrNoDecoder is returned when there wasn't a decoder available for a type. +// +// Deprecated: ErrNoDecoder will not be supported in Go Driver 2.0. type ErrNoDecoder struct { Type reflect.Type } @@ -43,6 +51,8 @@ func (end ErrNoDecoder) Error() string { } // ErrNoTypeMapEntry is returned when there wasn't a type available for the provided BSON type. +// +// Deprecated: ErrNoTypeMapEntry will not be supported in Go Driver 2.0. type ErrNoTypeMapEntry struct { Type bsontype.Type } @@ -52,63 +62,30 @@ func (entme ErrNoTypeMapEntry) Error() string { } // ErrNotInterface is returned when the provided type is not an interface. +// +// Deprecated: ErrNotInterface will not be supported in Go Driver 2.0. var ErrNotInterface = errors.New("The provided type is not an interface") // A RegistryBuilder is used to build a Registry. This type is not goroutine // safe. +// +// Deprecated: Use Registry instead. type RegistryBuilder struct { - typeEncoders map[reflect.Type]ValueEncoder - interfaceEncoders []interfaceValueEncoder - kindEncoders map[reflect.Kind]ValueEncoder - - typeDecoders map[reflect.Type]ValueDecoder - interfaceDecoders []interfaceValueDecoder - kindDecoders map[reflect.Kind]ValueDecoder - - typeMap map[bsontype.Type]reflect.Type -} - -// A Registry is used to store and retrieve codecs for types and interfaces. This type is the main -// typed passed around and Encoders and Decoders are constructed from it. -type Registry struct { - typeEncoders map[reflect.Type]ValueEncoder - typeDecoders map[reflect.Type]ValueDecoder - - interfaceEncoders []interfaceValueEncoder - interfaceDecoders []interfaceValueDecoder - - kindEncoders map[reflect.Kind]ValueEncoder - kindDecoders map[reflect.Kind]ValueDecoder - - typeMap map[bsontype.Type]reflect.Type - - mu sync.RWMutex + registry *Registry } // NewRegistryBuilder creates a new empty RegistryBuilder. +// +// Deprecated: Use NewRegistry instead. func NewRegistryBuilder() *RegistryBuilder { return &RegistryBuilder{ - typeEncoders: make(map[reflect.Type]ValueEncoder), - typeDecoders: make(map[reflect.Type]ValueDecoder), - - interfaceEncoders: make([]interfaceValueEncoder, 0), - interfaceDecoders: make([]interfaceValueDecoder, 0), - - kindEncoders: make(map[reflect.Kind]ValueEncoder), - kindDecoders: make(map[reflect.Kind]ValueDecoder), - - typeMap: make(map[bsontype.Type]reflect.Type), + registry: NewRegistry(), } } -func buildDefaultRegistry() *Registry { - rb := NewRegistryBuilder() - defaultValueEncoders.RegisterDefaultEncoders(rb) - defaultValueDecoders.RegisterDefaultDecoders(rb) - return rb.Build() -} - // RegisterCodec will register the provided ValueCodec for the provided type. +// +// Deprecated: Use Registry.RegisterTypeEncoder and Registry.RegisterTypeDecoder instead. func (rb *RegistryBuilder) RegisterCodec(t reflect.Type, codec ValueCodec) *RegistryBuilder { rb.RegisterTypeEncoder(t, codec) rb.RegisterTypeDecoder(t, codec) @@ -120,31 +97,22 @@ func (rb *RegistryBuilder) RegisterCodec(t reflect.Type, codec ValueCodec) *Regi // The type will be used directly, so an encoder can be registered for a type and a different encoder can be registered // for a pointer to that type. // -// If the given type is an interface, the encoder will be called when marshalling a type that is that interface. It -// will not be called when marshalling a non-interface type that implements the interface. +// If the given type is an interface, the encoder will be called when marshaling a type that is that interface. It +// will not be called when marshaling a non-interface type that implements the interface. +// +// Deprecated: Use Registry.RegisterTypeEncoder instead. func (rb *RegistryBuilder) RegisterTypeEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { - rb.typeEncoders[t] = enc + rb.registry.RegisterTypeEncoder(t, enc) return rb } // RegisterHookEncoder will register an encoder for the provided interface type t. This encoder will be called when -// marshalling a type if the type implements t or a pointer to the type implements t. If the provided type is not +// marshaling a type if the type implements t or a pointer to the type implements t. If the provided type is not // an interface (i.e. t.Kind() != reflect.Interface), this method will panic. +// +// Deprecated: Use Registry.RegisterInterfaceEncoder instead. func (rb *RegistryBuilder) RegisterHookEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { - if t.Kind() != reflect.Interface { - panicStr := fmt.Sprintf("RegisterHookEncoder expects a type with kind reflect.Interface, "+ - "got type %s with kind %s", t, t.Kind()) - panic(panicStr) - } - - for idx, encoder := range rb.interfaceEncoders { - if encoder.i == t { - rb.interfaceEncoders[idx].ve = enc - return rb - } - } - - rb.interfaceEncoders = append(rb.interfaceEncoders, interfaceValueEncoder{i: t, ve: enc}) + rb.registry.RegisterInterfaceEncoder(t, enc) return rb } @@ -153,97 +121,78 @@ func (rb *RegistryBuilder) RegisterHookEncoder(t reflect.Type, enc ValueEncoder) // The type will be used directly, so a decoder can be registered for a type and a different decoder can be registered // for a pointer to that type. // -// If the given type is an interface, the decoder will be called when unmarshalling into a type that is that interface. -// It will not be called when unmarshalling into a non-interface type that implements the interface. +// If the given type is an interface, the decoder will be called when unmarshaling into a type that is that interface. +// It will not be called when unmarshaling into a non-interface type that implements the interface. +// +// Deprecated: Use Registry.RegisterTypeDecoder instead. func (rb *RegistryBuilder) RegisterTypeDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { - rb.typeDecoders[t] = dec + rb.registry.RegisterTypeDecoder(t, dec) return rb } // RegisterHookDecoder will register an decoder for the provided interface type t. This decoder will be called when -// unmarshalling into a type if the type implements t or a pointer to the type implements t. If the provided type is not +// unmarshaling into a type if the type implements t or a pointer to the type implements t. If the provided type is not // an interface (i.e. t.Kind() != reflect.Interface), this method will panic. +// +// Deprecated: Use Registry.RegisterInterfaceDecoder instead. func (rb *RegistryBuilder) RegisterHookDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { - if t.Kind() != reflect.Interface { - panicStr := fmt.Sprintf("RegisterHookDecoder expects a type with kind reflect.Interface, "+ - "got type %s with kind %s", t, t.Kind()) - panic(panicStr) - } - - for idx, decoder := range rb.interfaceDecoders { - if decoder.i == t { - rb.interfaceDecoders[idx].vd = dec - return rb - } - } - - rb.interfaceDecoders = append(rb.interfaceDecoders, interfaceValueDecoder{i: t, vd: dec}) + rb.registry.RegisterInterfaceDecoder(t, dec) return rb } // RegisterEncoder registers the provided type and encoder pair. // -// Deprecated: Use RegisterTypeEncoder or RegisterHookEncoder instead. +// Deprecated: Use Registry.RegisterTypeEncoder or Registry.RegisterInterfaceEncoder instead. func (rb *RegistryBuilder) RegisterEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { if t == tEmpty { - rb.typeEncoders[t] = enc + rb.registry.RegisterTypeEncoder(t, enc) return rb } switch t.Kind() { case reflect.Interface: - for idx, ir := range rb.interfaceEncoders { - if ir.i == t { - rb.interfaceEncoders[idx].ve = enc - return rb - } - } - - rb.interfaceEncoders = append(rb.interfaceEncoders, interfaceValueEncoder{i: t, ve: enc}) + rb.registry.RegisterInterfaceEncoder(t, enc) default: - rb.typeEncoders[t] = enc + rb.registry.RegisterTypeEncoder(t, enc) } return rb } // RegisterDecoder registers the provided type and decoder pair. // -// Deprecated: Use RegisterTypeDecoder or RegisterHookDecoder instead. +// Deprecated: Use Registry.RegisterTypeDecoder or Registry.RegisterInterfaceDecoder instead. func (rb *RegistryBuilder) RegisterDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { if t == nil { - rb.typeDecoders[nil] = dec + rb.registry.RegisterTypeDecoder(t, dec) return rb } if t == tEmpty { - rb.typeDecoders[t] = dec + rb.registry.RegisterTypeDecoder(t, dec) return rb } switch t.Kind() { case reflect.Interface: - for idx, ir := range rb.interfaceDecoders { - if ir.i == t { - rb.interfaceDecoders[idx].vd = dec - return rb - } - } - - rb.interfaceDecoders = append(rb.interfaceDecoders, interfaceValueDecoder{i: t, vd: dec}) + rb.registry.RegisterInterfaceDecoder(t, dec) default: - rb.typeDecoders[t] = dec + rb.registry.RegisterTypeDecoder(t, dec) } return rb } -// RegisterDefaultEncoder will registr the provided ValueEncoder to the provided +// RegisterDefaultEncoder will register the provided ValueEncoder to the provided // kind. +// +// Deprecated: Use Registry.RegisterKindEncoder instead. func (rb *RegistryBuilder) RegisterDefaultEncoder(kind reflect.Kind, enc ValueEncoder) *RegistryBuilder { - rb.kindEncoders[kind] = enc + rb.registry.RegisterKindEncoder(kind, enc) return rb } // RegisterDefaultDecoder will register the provided ValueDecoder to the // provided kind. +// +// Deprecated: Use Registry.RegisterKindDecoder instead. func (rb *RegistryBuilder) RegisterDefaultDecoder(kind reflect.Kind, dec ValueDecoder) *RegistryBuilder { - rb.kindDecoders[kind] = dec + rb.registry.RegisterKindDecoder(kind, dec) return rb } @@ -256,120 +205,233 @@ func (rb *RegistryBuilder) RegisterDefaultDecoder(kind reflect.Kind, dec ValueDe // to decode to bson.Raw, use the following code: // // rb.RegisterTypeMapEntry(bsontype.EmbeddedDocument, reflect.TypeOf(bson.Raw{})) +// +// Deprecated: Use Registry.RegisterTypeMapEntry instead. func (rb *RegistryBuilder) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) *RegistryBuilder { - rb.typeMap[bt] = rt + rb.registry.RegisterTypeMapEntry(bt, rt) return rb } // Build creates a Registry from the current state of this RegistryBuilder. +// +// Deprecated: Use NewRegistry instead. func (rb *RegistryBuilder) Build() *Registry { - registry := new(Registry) - - registry.typeEncoders = make(map[reflect.Type]ValueEncoder) - for t, enc := range rb.typeEncoders { - registry.typeEncoders[t] = enc + r := &Registry{ + interfaceEncoders: append([]interfaceValueEncoder(nil), rb.registry.interfaceEncoders...), + interfaceDecoders: append([]interfaceValueDecoder(nil), rb.registry.interfaceDecoders...), + typeEncoders: rb.registry.typeEncoders.Clone(), + typeDecoders: rb.registry.typeDecoders.Clone(), + kindEncoders: rb.registry.kindEncoders.Clone(), + kindDecoders: rb.registry.kindDecoders.Clone(), } + rb.registry.typeMap.Range(func(k, v interface{}) bool { + if k != nil && v != nil { + r.typeMap.Store(k, v) + } + return true + }) + return r +} - registry.typeDecoders = make(map[reflect.Type]ValueDecoder) - for t, dec := range rb.typeDecoders { - registry.typeDecoders[t] = dec +// A Registry is used to store and retrieve codecs for types and interfaces. This type is the main +// typed passed around and Encoders and Decoders are constructed from it. +type Registry struct { + interfaceEncoders []interfaceValueEncoder + interfaceDecoders []interfaceValueDecoder + typeEncoders *typeEncoderCache + typeDecoders *typeDecoderCache + kindEncoders *kindEncoderCache + kindDecoders *kindDecoderCache + typeMap sync.Map // map[bsontype.Type]reflect.Type +} + +// NewRegistry creates a new empty Registry. +func NewRegistry() *Registry { + return &Registry{ + typeEncoders: new(typeEncoderCache), + typeDecoders: new(typeDecoderCache), + kindEncoders: new(kindEncoderCache), + kindDecoders: new(kindDecoderCache), } +} + +// RegisterTypeEncoder registers the provided ValueEncoder for the provided type. +// +// The type will be used as provided, so an encoder can be registered for a type and a different +// encoder can be registered for a pointer to that type. +// +// If the given type is an interface, the encoder will be called when marshaling a type that is +// that interface. It will not be called when marshaling a non-interface type that implements the +// interface. To get the latter behavior, call RegisterHookEncoder instead. +// +// RegisterTypeEncoder should not be called concurrently with any other Registry method. +func (r *Registry) RegisterTypeEncoder(valueType reflect.Type, enc ValueEncoder) { + r.typeEncoders.Store(valueType, enc) +} + +// RegisterTypeDecoder registers the provided ValueDecoder for the provided type. +// +// The type will be used as provided, so a decoder can be registered for a type and a different +// decoder can be registered for a pointer to that type. +// +// If the given type is an interface, the decoder will be called when unmarshaling into a type that +// is that interface. It will not be called when unmarshaling into a non-interface type that +// implements the interface. To get the latter behavior, call RegisterHookDecoder instead. +// +// RegisterTypeDecoder should not be called concurrently with any other Registry method. +func (r *Registry) RegisterTypeDecoder(valueType reflect.Type, dec ValueDecoder) { + r.typeDecoders.Store(valueType, dec) +} - registry.interfaceEncoders = make([]interfaceValueEncoder, len(rb.interfaceEncoders)) - copy(registry.interfaceEncoders, rb.interfaceEncoders) +// RegisterKindEncoder registers the provided ValueEncoder for the provided kind. +// +// Use RegisterKindEncoder to register an encoder for any type with the same underlying kind. For +// example, consider the type MyInt defined as +// +// type MyInt int32 +// +// To define an encoder for MyInt and int32, use RegisterKindEncoder like +// +// reg.RegisterKindEncoder(reflect.Int32, myEncoder) +// +// RegisterKindEncoder should not be called concurrently with any other Registry method. +func (r *Registry) RegisterKindEncoder(kind reflect.Kind, enc ValueEncoder) { + r.kindEncoders.Store(kind, enc) +} - registry.interfaceDecoders = make([]interfaceValueDecoder, len(rb.interfaceDecoders)) - copy(registry.interfaceDecoders, rb.interfaceDecoders) +// RegisterKindDecoder registers the provided ValueDecoder for the provided kind. +// +// Use RegisterKindDecoder to register a decoder for any type with the same underlying kind. For +// example, consider the type MyInt defined as +// +// type MyInt int32 +// +// To define an decoder for MyInt and int32, use RegisterKindDecoder like +// +// reg.RegisterKindDecoder(reflect.Int32, myDecoder) +// +// RegisterKindDecoder should not be called concurrently with any other Registry method. +func (r *Registry) RegisterKindDecoder(kind reflect.Kind, dec ValueDecoder) { + r.kindDecoders.Store(kind, dec) +} - registry.kindEncoders = make(map[reflect.Kind]ValueEncoder) - for kind, enc := range rb.kindEncoders { - registry.kindEncoders[kind] = enc +// RegisterInterfaceEncoder registers an encoder for the provided interface type iface. This encoder will +// be called when marshaling a type if the type implements iface or a pointer to the type +// implements iface. If the provided type is not an interface +// (i.e. iface.Kind() != reflect.Interface), this method will panic. +// +// RegisterInterfaceEncoder should not be called concurrently with any other Registry method. +func (r *Registry) RegisterInterfaceEncoder(iface reflect.Type, enc ValueEncoder) { + if iface.Kind() != reflect.Interface { + panicStr := fmt.Errorf("RegisterInterfaceEncoder expects a type with kind reflect.Interface, "+ + "got type %s with kind %s", iface, iface.Kind()) + panic(panicStr) } - registry.kindDecoders = make(map[reflect.Kind]ValueDecoder) - for kind, dec := range rb.kindDecoders { - registry.kindDecoders[kind] = dec + for idx, encoder := range r.interfaceEncoders { + if encoder.i == iface { + r.interfaceEncoders[idx].ve = enc + return + } + } + + r.interfaceEncoders = append(r.interfaceEncoders, interfaceValueEncoder{i: iface, ve: enc}) +} + +// RegisterInterfaceDecoder registers an decoder for the provided interface type iface. This decoder will +// be called when unmarshaling into a type if the type implements iface or a pointer to the type +// implements iface. If the provided type is not an interface (i.e. iface.Kind() != reflect.Interface), +// this method will panic. +// +// RegisterInterfaceDecoder should not be called concurrently with any other Registry method. +func (r *Registry) RegisterInterfaceDecoder(iface reflect.Type, dec ValueDecoder) { + if iface.Kind() != reflect.Interface { + panicStr := fmt.Errorf("RegisterInterfaceDecoder expects a type with kind reflect.Interface, "+ + "got type %s with kind %s", iface, iface.Kind()) + panic(panicStr) } - registry.typeMap = make(map[bsontype.Type]reflect.Type) - for bt, rt := range rb.typeMap { - registry.typeMap[bt] = rt + for idx, decoder := range r.interfaceDecoders { + if decoder.i == iface { + r.interfaceDecoders[idx].vd = dec + return + } } - return registry + r.interfaceDecoders = append(r.interfaceDecoders, interfaceValueDecoder{i: iface, vd: dec}) } -// LookupEncoder inspects the registry for an encoder for the given type. The lookup precedence works as follows: +// RegisterTypeMapEntry will register the provided type to the BSON type. The primary usage for this +// mapping is decoding situations where an empty interface is used and a default type needs to be +// created and decoded into. // -// 1. An encoder registered for the exact type. If the given type represents an interface, an encoder registered using -// RegisterTypeEncoder for the interface will be selected. +// By default, BSON documents will decode into interface{} values as bson.D. To change the default type for BSON +// documents, a type map entry for bsontype.EmbeddedDocument should be registered. For example, to force BSON documents +// to decode to bson.Raw, use the following code: // -// 2. An encoder registered using RegisterHookEncoder for an interface implemented by the type or by a pointer to the -// type. +// reg.RegisterTypeMapEntry(bsontype.EmbeddedDocument, reflect.TypeOf(bson.Raw{})) +func (r *Registry) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) { + r.typeMap.Store(bt, rt) +} + +// LookupEncoder returns the first matching encoder in the Registry. It uses the following lookup +// order: +// +// 1. An encoder registered for the exact type. If the given type is an interface, an encoder +// registered using RegisterTypeEncoder for that interface will be selected. // -// 3. An encoder registered for the reflect.Kind of the value. +// 2. An encoder registered using RegisterInterfaceEncoder for an interface implemented by the type +// or by a pointer to the type. // -// If no encoder is found, an error of type ErrNoEncoder is returned. -func (r *Registry) LookupEncoder(t reflect.Type) (ValueEncoder, error) { - encodererr := ErrNoEncoder{Type: t} - r.mu.RLock() - enc, found := r.lookupTypeEncoder(t) - r.mu.RUnlock() +// 3. An encoder registered using RegisterKindEncoder for the kind of value. +// +// If no encoder is found, an error of type ErrNoEncoder is returned. LookupEncoder is safe for +// concurrent use by multiple goroutines after all codecs and encoders are registered. +func (r *Registry) LookupEncoder(valueType reflect.Type) (ValueEncoder, error) { + if valueType == nil { + return nil, ErrNoEncoder{Type: valueType} + } + enc, found := r.lookupTypeEncoder(valueType) if found { if enc == nil { - return nil, ErrNoEncoder{Type: t} + return nil, ErrNoEncoder{Type: valueType} } return enc, nil } - enc, found = r.lookupInterfaceEncoder(t, true) + enc, found = r.lookupInterfaceEncoder(valueType, true) if found { - r.mu.Lock() - r.typeEncoders[t] = enc - r.mu.Unlock() - return enc, nil - } - - if t == nil { - r.mu.Lock() - r.typeEncoders[t] = nil - r.mu.Unlock() - return nil, encodererr + return r.typeEncoders.LoadOrStore(valueType, enc), nil } - enc, found = r.kindEncoders[t.Kind()] - if !found { - r.mu.Lock() - r.typeEncoders[t] = nil - r.mu.Unlock() - return nil, encodererr + if v, ok := r.kindEncoders.Load(valueType.Kind()); ok { + return r.storeTypeEncoder(valueType, v), nil } + return nil, ErrNoEncoder{Type: valueType} +} - r.mu.Lock() - r.typeEncoders[t] = enc - r.mu.Unlock() - return enc, nil +func (r *Registry) storeTypeEncoder(rt reflect.Type, enc ValueEncoder) ValueEncoder { + return r.typeEncoders.LoadOrStore(rt, enc) } -func (r *Registry) lookupTypeEncoder(t reflect.Type) (ValueEncoder, bool) { - enc, found := r.typeEncoders[t] - return enc, found +func (r *Registry) lookupTypeEncoder(rt reflect.Type) (ValueEncoder, bool) { + return r.typeEncoders.Load(rt) } -func (r *Registry) lookupInterfaceEncoder(t reflect.Type, allowAddr bool) (ValueEncoder, bool) { - if t == nil { +func (r *Registry) lookupInterfaceEncoder(valueType reflect.Type, allowAddr bool) (ValueEncoder, bool) { + if valueType == nil { return nil, false } for _, ienc := range r.interfaceEncoders { - if t.Implements(ienc.i) { + if valueType.Implements(ienc.i) { return ienc.ve, true } - if allowAddr && t.Kind() != reflect.Ptr && reflect.PtrTo(t).Implements(ienc.i) { - // if *t implements an interface, this will catch if t implements an interface further ahead - // in interfaceEncoders - defaultEnc, found := r.lookupInterfaceEncoder(t, false) + if allowAddr && valueType.Kind() != reflect.Ptr && reflect.PtrTo(valueType).Implements(ienc.i) { + // if *t implements an interface, this will catch if t implements an interface further + // ahead in interfaceEncoders + defaultEnc, found := r.lookupInterfaceEncoder(valueType, false) if !found { - defaultEnc = r.kindEncoders[t.Kind()] + defaultEnc, _ = r.kindEncoders.Load(valueType.Kind()) } return newCondAddrEncoder(ienc.ve, defaultEnc), true } @@ -377,70 +439,61 @@ func (r *Registry) lookupInterfaceEncoder(t reflect.Type, allowAddr bool) (Value return nil, false } -// LookupDecoder inspects the registry for an decoder for the given type. The lookup precedence works as follows: +// LookupDecoder returns the first matching decoder in the Registry. It uses the following lookup +// order: // -// 1. A decoder registered for the exact type. If the given type represents an interface, a decoder registered using -// RegisterTypeDecoder for the interface will be selected. +// 1. A decoder registered for the exact type. If the given type is an interface, a decoder +// registered using RegisterTypeDecoder for that interface will be selected. // -// 2. A decoder registered using RegisterHookDecoder for an interface implemented by the type or by a pointer to the -// type. +// 2. A decoder registered using RegisterInterfaceDecoder for an interface implemented by the type or by +// a pointer to the type. // -// 3. A decoder registered for the reflect.Kind of the value. +// 3. A decoder registered using RegisterKindDecoder for the kind of value. // -// If no decoder is found, an error of type ErrNoDecoder is returned. -func (r *Registry) LookupDecoder(t reflect.Type) (ValueDecoder, error) { - if t == nil { +// If no decoder is found, an error of type ErrNoDecoder is returned. LookupDecoder is safe for +// concurrent use by multiple goroutines after all codecs and decoders are registered. +func (r *Registry) LookupDecoder(valueType reflect.Type) (ValueDecoder, error) { + if valueType == nil { return nil, ErrNilType } - decodererr := ErrNoDecoder{Type: t} - r.mu.RLock() - dec, found := r.lookupTypeDecoder(t) - r.mu.RUnlock() + dec, found := r.lookupTypeDecoder(valueType) if found { if dec == nil { - return nil, ErrNoDecoder{Type: t} + return nil, ErrNoDecoder{Type: valueType} } return dec, nil } - dec, found = r.lookupInterfaceDecoder(t, true) + dec, found = r.lookupInterfaceDecoder(valueType, true) if found { - r.mu.Lock() - r.typeDecoders[t] = dec - r.mu.Unlock() - return dec, nil + return r.storeTypeDecoder(valueType, dec), nil } - dec, found = r.kindDecoders[t.Kind()] - if !found { - r.mu.Lock() - r.typeDecoders[t] = nil - r.mu.Unlock() - return nil, decodererr + if v, ok := r.kindDecoders.Load(valueType.Kind()); ok { + return r.storeTypeDecoder(valueType, v), nil } + return nil, ErrNoDecoder{Type: valueType} +} - r.mu.Lock() - r.typeDecoders[t] = dec - r.mu.Unlock() - return dec, nil +func (r *Registry) lookupTypeDecoder(valueType reflect.Type) (ValueDecoder, bool) { + return r.typeDecoders.Load(valueType) } -func (r *Registry) lookupTypeDecoder(t reflect.Type) (ValueDecoder, bool) { - dec, found := r.typeDecoders[t] - return dec, found +func (r *Registry) storeTypeDecoder(typ reflect.Type, dec ValueDecoder) ValueDecoder { + return r.typeDecoders.LoadOrStore(typ, dec) } -func (r *Registry) lookupInterfaceDecoder(t reflect.Type, allowAddr bool) (ValueDecoder, bool) { +func (r *Registry) lookupInterfaceDecoder(valueType reflect.Type, allowAddr bool) (ValueDecoder, bool) { for _, idec := range r.interfaceDecoders { - if t.Implements(idec.i) { + if valueType.Implements(idec.i) { return idec.vd, true } - if allowAddr && t.Kind() != reflect.Ptr && reflect.PtrTo(t).Implements(idec.i) { - // if *t implements an interface, this will catch if t implements an interface further ahead - // in interfaceDecoders - defaultDec, found := r.lookupInterfaceDecoder(t, false) + if allowAddr && valueType.Kind() != reflect.Ptr && reflect.PtrTo(valueType).Implements(idec.i) { + // if *t implements an interface, this will catch if t implements an interface further + // ahead in interfaceDecoders + defaultDec, found := r.lookupInterfaceDecoder(valueType, false) if !found { - defaultDec = r.kindDecoders[t.Kind()] + defaultDec, _ = r.kindDecoders.Load(valueType.Kind()) } return newCondAddrDecoder(idec.vd, defaultDec), true } @@ -450,12 +503,14 @@ func (r *Registry) lookupInterfaceDecoder(t reflect.Type, allowAddr bool) (Value // LookupTypeMapEntry inspects the registry's type map for a Go type for the corresponding BSON // type. If no type is found, ErrNoTypeMapEntry is returned. +// +// LookupTypeMapEntry should not be called concurrently with any other Registry method. func (r *Registry) LookupTypeMapEntry(bt bsontype.Type) (reflect.Type, error) { - t, ok := r.typeMap[bt] - if !ok || t == nil { + v, ok := r.typeMap.Load(bt) + if v == nil || !ok { return nil, ErrNoTypeMapEntry{Type: bt} } - return t, nil + return v.(reflect.Type), nil } type interfaceValueEncoder struct { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go index 3c1b6b86..14c9fd25 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go @@ -7,6 +7,7 @@ package bsoncodec import ( + "errors" "fmt" "reflect" @@ -19,13 +20,35 @@ import ( var defaultSliceCodec = NewSliceCodec() // SliceCodec is the Codec used for slice values. +// +// Deprecated: SliceCodec will not be directly configurable in Go Driver 2.0. To +// configure the slice encode and decode behavior, use the configuration methods +// on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the slice encode and +// decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to marshal nil Go slices as empty +// BSON arrays, use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// NilSliceAsEmpty: true, +// }) +// +// See the deprecation notice for each field in SliceCodec for the corresponding +// settings. type SliceCodec struct { + // EncodeNilAsEmpty causes EncodeValue to marshal nil Go slices as empty BSON arrays instead of + // BSON null. + // + // Deprecated: Use bson.Encoder.NilSliceAsEmpty instead. EncodeNilAsEmpty bool } -var _ ValueCodec = &MapCodec{} - // NewSliceCodec returns a MapCodec with options opts. +// +// Deprecated: NewSliceCodec will not be available in Go Driver 2.0. See +// [SliceCodec] for more details. func NewSliceCodec(opts ...*bsonoptions.SliceCodecOptions) *SliceCodec { sliceOpt := bsonoptions.MergeSliceCodecOptions(opts...) @@ -42,21 +65,19 @@ func (sc SliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val re return ValueEncoderError{Name: "SliceEncodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} } - if val.IsNil() && !sc.EncodeNilAsEmpty { + if val.IsNil() && !sc.EncodeNilAsEmpty && !ec.nilSliceAsEmpty { return vw.WriteNull() } // If we have a []byte we want to treat it as a binary instead of as an array. if val.Type().Elem() == tByte { - var byteSlice []byte - for idx := 0; idx < val.Len(); idx++ { - byteSlice = append(byteSlice, val.Index(idx).Interface().(byte)) - } + byteSlice := make([]byte, val.Len()) + reflect.Copy(reflect.ValueOf(byteSlice), val) return vw.WriteBinary(byteSlice) } // If we have a []primitive.E we want to treat it as a document instead of as an array. - if val.Type().ConvertibleTo(tD) { + if val.Type() == tD || val.Type().ConvertibleTo(tD) { d := val.Convert(tD).Interface().(primitive.D) dw, err := vw.WriteDocument() @@ -87,7 +108,7 @@ func (sc SliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val re for idx := 0; idx < val.Len(); idx++ { currEncoder, currVal, lookupErr := defaultValueEncoders.lookupElementEncoder(ec, encoder, val.Index(idx)) - if lookupErr != nil && lookupErr != errInvalidValue { + if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { return lookupErr } @@ -96,7 +117,7 @@ func (sc SliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val re return err } - if lookupErr == errInvalidValue { + if errors.Is(lookupErr, errInvalidValue) { err = vw.WriteNull() if err != nil { return err @@ -145,11 +166,8 @@ func (sc *SliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val r if val.IsNil() { val.Set(reflect.MakeSlice(val.Type(), 0, len(data))) } - val.SetLen(0) - for _, elem := range data { - val.Set(reflect.Append(val, reflect.ValueOf(elem))) - } + val.Set(reflect.AppendSlice(val, reflect.ValueOf(data))) return nil case bsontype.String: if sliceType := val.Type().Elem(); sliceType != tByte { @@ -164,11 +182,8 @@ func (sc *SliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val r if val.IsNil() { val.Set(reflect.MakeSlice(val.Type(), 0, len(byteStr))) } - val.SetLen(0) - for _, elem := range byteStr { - val.Set(reflect.Append(val, reflect.ValueOf(elem))) - } + val.Set(reflect.AppendSlice(val, reflect.ValueOf(byteStr))) return nil default: return fmt.Errorf("cannot decode %v into a slice", vrType) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go index 5332b7c3..a8f885a8 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go @@ -15,26 +15,46 @@ import ( "go.mongodb.org/mongo-driver/bson/bsontype" ) -// StringCodec is the Codec used for struct values. +// StringCodec is the Codec used for string values. +// +// Deprecated: StringCodec will not be directly accessible in Go Driver 2.0. To +// override the default string encode and decode behavior, create a new registry +// with [go.mongodb.org/mongo-driver/bson.NewRegistry] and register a new +// encoder and decoder for strings. +// +// For example, +// +// reg := bson.NewRegistry() +// reg.RegisterKindEncoder(reflect.String, myStringEncoder) +// reg.RegisterKindDecoder(reflect.String, myStringDecoder) type StringCodec struct { + // DecodeObjectIDAsHex specifies if object IDs should be decoded as their hex representation. + // If false, a string made from the raw object ID bytes will be used. Defaults to true. + // + // Deprecated: Decoding object IDs as raw bytes will not be supported in Go Driver 2.0. DecodeObjectIDAsHex bool } var ( defaultStringCodec = NewStringCodec() - _ ValueCodec = defaultStringCodec + // Assert that defaultStringCodec satisfies the typeDecoder interface, which allows it to be + // used by collection type decoders (e.g. map, slice, etc) to set individual values in a + // collection. _ typeDecoder = defaultStringCodec ) // NewStringCodec returns a StringCodec with options opts. +// +// Deprecated: NewStringCodec will not be available in Go Driver 2.0. See +// [StringCodec] for more details. func NewStringCodec(opts ...*bsonoptions.StringCodecOptions) *StringCodec { stringOpt := bsonoptions.MergeStringCodecOptions(opts...) return &StringCodec{*stringOpt.DecodeObjectIDAsHex} } // EncodeValue is the ValueEncoder for string types. -func (sc *StringCodec) EncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +func (sc *StringCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if val.Kind() != reflect.String { return ValueEncoderError{ Name: "StringEncodeValue", @@ -46,7 +66,7 @@ func (sc *StringCodec) EncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, va return vw.WriteString(val.String()) } -func (sc *StringCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (sc *StringCodec) decodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t.Kind() != reflect.String { return emptyValue, ValueDecoderError{ Name: "StringDecodeValue", @@ -71,6 +91,7 @@ func (sc *StringCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t ref if sc.DecodeObjectIDAsHex { str = oid.Hex() } else { + // TODO(GODRIVER-2796): Return an error here instead of decoding to a garbled string. byteArray := [12]byte(oid) str = string(byteArray[:]) } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go index be3f2081..f8d9690c 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go @@ -59,14 +59,58 @@ type Zeroer interface { } // StructCodec is the Codec used for struct values. +// +// Deprecated: StructCodec will not be directly configurable in Go Driver 2.0. +// To configure the struct encode and decode behavior, use the configuration +// methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the struct encode +// and decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to omit zero-value structs when +// using the "omitempty" struct tag, use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// OmitZeroStruct: true, +// }) +// +// See the deprecation notice for each field in StructCodec for the corresponding +// settings. type StructCodec struct { - cache map[reflect.Type]*structDescription - l sync.RWMutex - parser StructTagParser - DecodeZeroStruct bool - DecodeDeepZeroInline bool - EncodeOmitDefaultStruct bool - AllowUnexportedFields bool + cache sync.Map // map[reflect.Type]*structDescription + parser StructTagParser + + // DecodeZeroStruct causes DecodeValue to delete any existing values from Go structs in the + // destination value passed to Decode before unmarshaling BSON documents into them. + // + // Deprecated: Use bson.Decoder.ZeroStructs or options.BSONOptions.ZeroStructs instead. + DecodeZeroStruct bool + + // DecodeDeepZeroInline causes DecodeValue to delete any existing values from Go structs in the + // destination value passed to Decode before unmarshaling BSON documents into them. + // + // Deprecated: DecodeDeepZeroInline will not be supported in Go Driver 2.0. + DecodeDeepZeroInline bool + + // EncodeOmitDefaultStruct causes the Encoder to consider the zero value for a struct (e.g. + // MyStruct{}) as empty and omit it from the marshaled BSON when the "omitempty" struct tag + // option is set. + // + // Deprecated: Use bson.Encoder.OmitZeroStruct or options.BSONOptions.OmitZeroStruct instead. + EncodeOmitDefaultStruct bool + + // AllowUnexportedFields allows encoding and decoding values from un-exported struct fields. + // + // Deprecated: AllowUnexportedFields does not work on recent versions of Go and will not be + // supported in Go Driver 2.0. + AllowUnexportedFields bool + + // OverwriteDuplicatedInlinedFields, if false, causes EncodeValue to return an error if there is + // a duplicate field in the marshaled BSON when the "inline" struct tag option is set. The + // default value is true. + // + // Deprecated: Use bson.Encoder.ErrorOnInlineDuplicates or + // options.BSONOptions.ErrorOnInlineDuplicates instead. OverwriteDuplicatedInlinedFields bool } @@ -74,6 +118,9 @@ var _ ValueEncoder = &StructCodec{} var _ ValueDecoder = &StructCodec{} // NewStructCodec returns a StructCodec that uses p for struct tag parsing. +// +// Deprecated: NewStructCodec will not be available in Go Driver 2.0. See +// [StructCodec] for more details. func NewStructCodec(p StructTagParser, opts ...*bsonoptions.StructCodecOptions) (*StructCodec, error) { if p == nil { return nil, errors.New("a StructTagParser must be provided to NewStructCodec") @@ -82,7 +129,6 @@ func NewStructCodec(p StructTagParser, opts ...*bsonoptions.StructCodecOptions) structOpt := bsonoptions.MergeStructCodecOptions(opts...) codec := &StructCodec{ - cache: make(map[reflect.Type]*structDescription), parser: p, } @@ -106,12 +152,12 @@ func NewStructCodec(p StructTagParser, opts ...*bsonoptions.StructCodecOptions) } // EncodeValue handles encoding generic struct types. -func (sc *StructCodec) EncodeValue(r EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +func (sc *StructCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Kind() != reflect.Struct { return ValueEncoderError{Name: "StructCodec.EncodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val} } - sd, err := sc.describeStruct(r.Registry, val.Type()) + sd, err := sc.describeStruct(ec.Registry, val.Type(), ec.useJSONStructTags, ec.errorOnInlineDuplicates) if err != nil { return err } @@ -131,13 +177,13 @@ func (sc *StructCodec) EncodeValue(r EncodeContext, vw bsonrw.ValueWriter, val r } } - desc.encoder, rv, err = defaultValueEncoders.lookupElementEncoder(r, desc.encoder, rv) + desc.encoder, rv, err = defaultValueEncoders.lookupElementEncoder(ec, desc.encoder, rv) - if err != nil && err != errInvalidValue { + if err != nil && !errors.Is(err, errInvalidValue) { return err } - if err == errInvalidValue { + if errors.Is(err, errInvalidValue) { if desc.omitEmpty { continue } @@ -158,17 +204,17 @@ func (sc *StructCodec) EncodeValue(r EncodeContext, vw bsonrw.ValueWriter, val r encoder := desc.encoder - var isZero bool - rvInterface := rv.Interface() + var empty bool if cz, ok := encoder.(CodecZeroer); ok { - isZero = cz.IsTypeZero(rvInterface) + empty = cz.IsTypeZero(rv.Interface()) } else if rv.Kind() == reflect.Interface { - // sc.isZero will not treat an interface rv as an interface, so we need to check for the zero interface separately. - isZero = rv.IsNil() + // isEmpty will not treat an interface rv as an interface, so we need to check for the + // nil interface separately. + empty = rv.IsNil() } else { - isZero = sc.isZero(rvInterface) + empty = isEmpty(rv, sc.EncodeOmitDefaultStruct || ec.omitZeroStruct) } - if desc.omitEmpty && isZero { + if desc.omitEmpty && empty { continue } @@ -177,7 +223,17 @@ func (sc *StructCodec) EncodeValue(r EncodeContext, vw bsonrw.ValueWriter, val r return err } - ectx := EncodeContext{Registry: r.Registry, MinSize: desc.minSize} + ectx := EncodeContext{ + Registry: ec.Registry, + MinSize: desc.minSize || ec.MinSize, + errorOnInlineDuplicates: ec.errorOnInlineDuplicates, + stringifyMapKeysWithFmt: ec.stringifyMapKeysWithFmt, + nilMapAsEmpty: ec.nilMapAsEmpty, + nilSliceAsEmpty: ec.nilSliceAsEmpty, + nilByteSliceAsEmpty: ec.nilByteSliceAsEmpty, + omitZeroStruct: ec.omitZeroStruct, + useJSONStructTags: ec.useJSONStructTags, + } err = encoder.EncodeValue(ectx, vw2, rv) if err != nil { return err @@ -191,15 +247,15 @@ func (sc *StructCodec) EncodeValue(r EncodeContext, vw bsonrw.ValueWriter, val r return exists } - return defaultMapCodec.mapEncodeValue(r, dw, rv, collisionFn) + return defaultMapCodec.mapEncodeValue(ec, dw, rv, collisionFn) } return dw.WriteDocumentEnd() } func newDecodeError(key string, original error) error { - de, ok := original.(*DecodeError) - if !ok { + var de *DecodeError + if !errors.As(original, &de) { return &DecodeError{ keys: []string{key}, wrapped: original, @@ -213,7 +269,7 @@ func newDecodeError(key string, original error) error { // DecodeValue implements the Codec interface. // By default, map types in val will not be cleared. If a map has existing key/value pairs, it will be extended with the new ones from vr. // For slices, the decoder will set the length of the slice to zero and append all elements. The underlying array will not be cleared. -func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { +func (sc *StructCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Kind() != reflect.Struct { return ValueDecoderError{Name: "StructCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val} } @@ -238,12 +294,12 @@ func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val r return fmt.Errorf("cannot decode %v into a %s", vrType, val.Type()) } - sd, err := sc.describeStruct(r.Registry, val.Type()) + sd, err := sc.describeStruct(dc.Registry, val.Type(), dc.useJSONStructTags, false) if err != nil { return err } - if sc.DecodeZeroStruct { + if sc.DecodeZeroStruct || dc.zeroStructs { val.Set(reflect.Zero(val.Type())) } if sc.DecodeDeepZeroInline && sd.inline { @@ -254,7 +310,7 @@ func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val r var inlineMap reflect.Value if sd.inlineMap >= 0 { inlineMap = val.Field(sd.inlineMap) - decoder, err = r.LookupDecoder(inlineMap.Type().Elem()) + decoder, err = dc.LookupDecoder(inlineMap.Type().Elem()) if err != nil { return err } @@ -267,7 +323,7 @@ func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val r for { name, vr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { + if errors.Is(err, bsonrw.ErrEOD) { break } if err != nil { @@ -298,8 +354,8 @@ func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val r } elem := reflect.New(inlineMap.Type().Elem()).Elem() - r.Ancestor = inlineMap.Type() - err = decoder.DecodeValue(r, vr, elem) + dc.Ancestor = inlineMap.Type() + err = decoder.DecodeValue(dc, vr, elem) if err != nil { return err } @@ -326,7 +382,17 @@ func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val r } field = field.Addr() - dctx := DecodeContext{Registry: r.Registry, Truncate: fd.truncate || r.Truncate} + dctx := DecodeContext{ + Registry: dc.Registry, + Truncate: fd.truncate || dc.Truncate, + defaultDocumentType: dc.defaultDocumentType, + binaryAsSlice: dc.binaryAsSlice, + useJSONStructTags: dc.useJSONStructTags, + useLocalTimeZone: dc.useLocalTimeZone, + zeroMaps: dc.zeroMaps, + zeroStructs: dc.zeroStructs, + } + if fd.decoder == nil { return newDecodeError(fd.name, ErrNoDecoder{Type: field.Elem().Type()}) } @@ -340,51 +406,35 @@ func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val r return nil } -func (sc *StructCodec) isZero(i interface{}) bool { - v := reflect.ValueOf(i) - - // check the value validity - if !v.IsValid() { - return true - } - - if z, ok := v.Interface().(Zeroer); ok && (v.Kind() != reflect.Ptr || !v.IsNil()) { - return z.IsZero() +func isEmpty(v reflect.Value, omitZeroStruct bool) bool { + kind := v.Kind() + if (kind != reflect.Ptr || !v.IsNil()) && v.Type().Implements(tZeroer) { + return v.Interface().(Zeroer).IsZero() } - - switch v.Kind() { + switch kind { case reflect.Array, reflect.Map, reflect.Slice, reflect.String: return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() case reflect.Struct: - if sc.EncodeOmitDefaultStruct { - vt := v.Type() - if vt == tTime { - return v.Interface().(time.Time).IsZero() + if !omitZeroStruct { + return false + } + vt := v.Type() + if vt == tTime { + return v.Interface().(time.Time).IsZero() + } + numField := vt.NumField() + for i := 0; i < numField; i++ { + ff := vt.Field(i) + if ff.PkgPath != "" && !ff.Anonymous { + continue // Private field } - for i := 0; i < v.NumField(); i++ { - if vt.Field(i).PkgPath != "" && !vt.Field(i).Anonymous { - continue // Private field - } - fld := v.Field(i) - if !sc.isZero(fld.Interface()) { - return false - } + if !isEmpty(v.Field(i), omitZeroStruct) { + return false } - return true } + return true } - - return false + return !v.IsValid() || v.IsZero() } type structDescription struct { @@ -435,16 +485,35 @@ func (bi byIndex) Less(i, j int) bool { return len(bi[i].inline) < len(bi[j].inline) } -func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescription, error) { +func (sc *StructCodec) describeStruct( + r *Registry, + t reflect.Type, + useJSONStructTags bool, + errorOnDuplicates bool, +) (*structDescription, error) { // We need to analyze the struct, including getting the tags, collecting // information about inlining, and create a map of the field name to the field. - sc.l.RLock() - ds, exists := sc.cache[t] - sc.l.RUnlock() - if exists { - return ds, nil + if v, ok := sc.cache.Load(t); ok { + return v.(*structDescription), nil } + // TODO(charlie): Only describe the struct once when called + // concurrently with the same type. + ds, err := sc.describeStructSlow(r, t, useJSONStructTags, errorOnDuplicates) + if err != nil { + return nil, err + } + if v, loaded := sc.cache.LoadOrStore(t, ds); loaded { + ds = v.(*structDescription) + } + return ds, nil +} +func (sc *StructCodec) describeStructSlow( + r *Registry, + t reflect.Type, + useJSONStructTags bool, + errorOnDuplicates bool, +) (*structDescription, error) { numFields := t.NumField() sd := &structDescription{ fm: make(map[string]fieldDescription, numFields), @@ -477,7 +546,14 @@ func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescr decoder: decoder, } - stags, err := sc.parser.ParseStructTags(sf) + var stags StructTags + // If the caller requested that we use JSON struct tags, use the JSONFallbackStructTagParser + // instead of the parser defined on the codec. + if useJSONStructTags { + stags, err = JSONFallbackStructTagParser.ParseStructTags(sf) + } else { + stags, err = sc.parser.ParseStructTags(sf) + } if err != nil { return nil, err } @@ -507,7 +583,7 @@ func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescr } fallthrough case reflect.Struct: - inlinesf, err := sc.describeStruct(r, sfType) + inlinesf, err := sc.describeStruct(r, sfType, useJSONStructTags, errorOnDuplicates) if err != nil { return nil, err } @@ -559,7 +635,7 @@ func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescr continue } dominant, ok := dominantField(fields[i : i+advance]) - if !ok || !sc.OverwriteDuplicatedInlinedFields { + if !ok || !sc.OverwriteDuplicatedInlinedFields || errorOnDuplicates { return nil, fmt.Errorf("struct %s has duplicated key %s", t.String(), name) } sd.fl = append(sd.fl, dominant) @@ -568,10 +644,6 @@ func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescr sort.Sort(byIndex(sd.fl)) - sc.l.Lock() - sc.cache[t] = sd - sc.l.Unlock() - return sd, nil } @@ -629,21 +701,21 @@ func getInlineField(val reflect.Value, index []int) (reflect.Value, error) { // DeepZero returns recursive zero object func deepZero(st reflect.Type) (result reflect.Value) { - result = reflect.Indirect(reflect.New(st)) - - if result.Kind() == reflect.Struct { - for i := 0; i < result.NumField(); i++ { - if f := result.Field(i); f.Kind() == reflect.Ptr { - if f.CanInterface() { - if ft := reflect.TypeOf(f.Interface()); ft.Elem().Kind() == reflect.Struct { - result.Field(i).Set(recursivePointerTo(deepZero(ft.Elem()))) - } + if st.Kind() == reflect.Struct { + numField := st.NumField() + for i := 0; i < numField; i++ { + if result == emptyValue { + result = reflect.Indirect(reflect.New(st)) + } + f := result.Field(i) + if f.CanInterface() { + if f.Type().Kind() == reflect.Struct { + result.Field(i).Set(recursivePointerTo(deepZero(f.Type().Elem()))) } } } } - - return + return result } // recursivePointerTo calls reflect.New(v.Type) but recursively for its fields inside diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go index 62708c5c..18d85bfb 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go @@ -12,12 +12,16 @@ import ( ) // StructTagParser returns the struct tags for a given struct field. +// +// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0. type StructTagParser interface { ParseStructTags(reflect.StructField) (StructTags, error) } // StructTagParserFunc is an adapter that allows a generic function to be used // as a StructTagParser. +// +// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0. type StructTagParserFunc func(reflect.StructField) (StructTags, error) // ParseStructTags implements the StructTagParser interface. @@ -50,7 +54,7 @@ func (stpf StructTagParserFunc) ParseStructTags(sf reflect.StructField) (StructT // Skip This struct field should be skipped. This is usually denoted by parsing a "-" // for the name. // -// TODO(skriptble): Add tags for undefined as nil and for null as nil. +// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0. type StructTags struct { Name string OmitEmpty bool @@ -85,6 +89,8 @@ type StructTags struct { // A struct tag either consisting entirely of '-' or with a bson key with a // value consisting entirely of '-' will return a StructTags with Skip true and // the remaining fields will be their default values. +// +// Deprecated: DefaultStructTagParser will be removed in Go Driver 2.0. var DefaultStructTagParser StructTagParserFunc = func(sf reflect.StructField) (StructTags, error) { key := strings.ToLower(sf.Name) tag, ok := sf.Tag.Lookup("bson") @@ -125,6 +131,9 @@ func parseTags(key string, tag string) (StructTags, error) { // JSONFallbackStructTagParser has the same behavior as DefaultStructTagParser // but will also fallback to parsing the json tag instead on a field where the // bson tag isn't available. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.UseJSONStructTags] and +// [go.mongodb.org/mongo-driver/bson.Decoder.UseJSONStructTags] instead. var JSONFallbackStructTagParser StructTagParserFunc = func(sf reflect.StructField) (StructTags, error) { key := strings.ToLower(sf.Name) tag, ok := sf.Tag.Lookup("bson") diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go index ec7e30f7..22fb762c 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go @@ -22,18 +22,42 @@ const ( ) // TimeCodec is the Codec used for time.Time values. +// +// Deprecated: TimeCodec will not be directly configurable in Go Driver 2.0. +// To configure the time.Time encode and decode behavior, use the configuration +// methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the time.Time encode +// and decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to ..., use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// UseLocalTimeZone: true, +// }) +// +// See the deprecation notice for each field in TimeCodec for the corresponding +// settings. type TimeCodec struct { + // UseLocalTimeZone specifies if we should decode into the local time zone. Defaults to false. + // + // Deprecated: Use bson.Decoder.UseLocalTimeZone or options.BSONOptions.UseLocalTimeZone + // instead. UseLocalTimeZone bool } var ( defaultTimeCodec = NewTimeCodec() - _ ValueCodec = defaultTimeCodec + // Assert that defaultTimeCodec satisfies the typeDecoder interface, which allows it to be used + // by collection type decoders (e.g. map, slice, etc) to set individual values in a collection. _ typeDecoder = defaultTimeCodec ) // NewTimeCodec returns a TimeCodec with options opts. +// +// Deprecated: NewTimeCodec will not be available in Go Driver 2.0. See +// [TimeCodec] for more details. func NewTimeCodec(opts ...*bsonoptions.TimeCodecOptions) *TimeCodec { timeOpt := bsonoptions.MergeTimeCodecOptions(opts...) @@ -95,7 +119,7 @@ func (tc *TimeCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t refle return emptyValue, fmt.Errorf("cannot decode %v into a time.Time", vrType) } - if !tc.UseLocalTimeZone { + if !tc.UseLocalTimeZone && !dc.useLocalTimeZone { timeVal = timeVal.UTC() } return reflect.ValueOf(timeVal), nil @@ -117,7 +141,7 @@ func (tc *TimeCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val re } // EncodeValue is the ValueEncoderFunc for time.TIme. -func (tc *TimeCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +func (tc *TimeCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tTime { return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val} } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go index 07f4b70e..6ade17b7 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go @@ -34,6 +34,7 @@ var tValueUnmarshaler = reflect.TypeOf((*ValueUnmarshaler)(nil)).Elem() var tMarshaler = reflect.TypeOf((*Marshaler)(nil)).Elem() var tUnmarshaler = reflect.TypeOf((*Unmarshaler)(nil)).Elem() var tProxy = reflect.TypeOf((*Proxy)(nil)).Elem() +var tZeroer = reflect.TypeOf((*Zeroer)(nil)).Elem() var tBinary = reflect.TypeOf(primitive.Binary{}) var tUndefined = reflect.TypeOf(primitive.Undefined{}) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go index 0b21ce99..85254727 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go @@ -17,18 +17,43 @@ import ( ) // UIntCodec is the Codec used for uint values. +// +// Deprecated: UIntCodec will not be directly configurable in Go Driver 2.0. To +// configure the uint encode and decode behavior, use the configuration methods +// on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the uint encode and +// decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to marshal Go uint values as the +// minimum BSON int size that can represent the value, use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// IntMinSize: true, +// }) +// +// See the deprecation notice for each field in UIntCodec for the corresponding +// settings. type UIntCodec struct { + // EncodeToMinSize causes EncodeValue to marshal Go uint values (excluding uint64) as the + // minimum BSON int size (either 32-bit or 64-bit) that can represent the integer value. + // + // Deprecated: Use bson.Encoder.IntMinSize or options.BSONOptions.IntMinSize instead. EncodeToMinSize bool } var ( defaultUIntCodec = NewUIntCodec() - _ ValueCodec = defaultUIntCodec + // Assert that defaultUIntCodec satisfies the typeDecoder interface, which allows it to be used + // by collection type decoders (e.g. map, slice, etc) to set individual values in a collection. _ typeDecoder = defaultUIntCodec ) // NewUIntCodec returns a UIntCodec with options opts. +// +// Deprecated: NewUIntCodec will not be available in Go Driver 2.0. See +// [UIntCodec] for more details. func NewUIntCodec(opts ...*bsonoptions.UIntCodecOptions) *UIntCodec { uintOpt := bsonoptions.MergeUIntCodecOptions(opts...) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go index b1256a4d..996bd171 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go @@ -7,22 +7,33 @@ package bsonoptions // ByteSliceCodecOptions represents all possible options for byte slice encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. type ByteSliceCodecOptions struct { EncodeNilAsEmpty *bool // Specifies if a nil byte slice should encode as an empty binary instead of null. Defaults to false. } // ByteSliceCodec creates a new *ByteSliceCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. func ByteSliceCodec() *ByteSliceCodecOptions { return &ByteSliceCodecOptions{} } // SetEncodeNilAsEmpty specifies if a nil byte slice should encode as an empty binary instead of null. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilByteSliceAsEmpty] instead. func (bs *ByteSliceCodecOptions) SetEncodeNilAsEmpty(b bool) *ByteSliceCodecOptions { bs.EncodeNilAsEmpty = &b return bs } // MergeByteSliceCodecOptions combines the given *ByteSliceCodecOptions into a single *ByteSliceCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeByteSliceCodecOptions(opts ...*ByteSliceCodecOptions) *ByteSliceCodecOptions { bs := ByteSliceCodec() for _, opt := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go index 6caaa000..f522c7e0 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go @@ -7,22 +7,33 @@ package bsonoptions // EmptyInterfaceCodecOptions represents all possible options for interface{} encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. type EmptyInterfaceCodecOptions struct { DecodeBinaryAsSlice *bool // Specifies if Old and Generic type binarys should default to []slice instead of primitive.Binary. Defaults to false. } // EmptyInterfaceCodec creates a new *EmptyInterfaceCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. func EmptyInterfaceCodec() *EmptyInterfaceCodecOptions { return &EmptyInterfaceCodecOptions{} } // SetDecodeBinaryAsSlice specifies if Old and Generic type binarys should default to []slice instead of primitive.Binary. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.BinaryAsSlice] instead. func (e *EmptyInterfaceCodecOptions) SetDecodeBinaryAsSlice(b bool) *EmptyInterfaceCodecOptions { e.DecodeBinaryAsSlice = &b return e } // MergeEmptyInterfaceCodecOptions combines the given *EmptyInterfaceCodecOptions into a single *EmptyInterfaceCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeEmptyInterfaceCodecOptions(opts ...*EmptyInterfaceCodecOptions) *EmptyInterfaceCodecOptions { e := EmptyInterfaceCodec() for _, opt := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go index 7a6a880b..a7a7c1d9 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go @@ -7,6 +7,9 @@ package bsonoptions // MapCodecOptions represents all possible options for map encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. type MapCodecOptions struct { DecodeZerosMap *bool // Specifies if the map should be zeroed before decoding into it. Defaults to false. EncodeNilAsEmpty *bool // Specifies if a nil map should encode as an empty document instead of null. Defaults to false. @@ -19,17 +22,24 @@ type MapCodecOptions struct { } // MapCodec creates a new *MapCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. func MapCodec() *MapCodecOptions { return &MapCodecOptions{} } // SetDecodeZerosMap specifies if the map should be zeroed before decoding into it. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroMaps] instead. func (t *MapCodecOptions) SetDecodeZerosMap(b bool) *MapCodecOptions { t.DecodeZerosMap = &b return t } // SetEncodeNilAsEmpty specifies if a nil map should encode as an empty document instead of null. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilMapAsEmpty] instead. func (t *MapCodecOptions) SetEncodeNilAsEmpty(b bool) *MapCodecOptions { t.EncodeNilAsEmpty = &b return t @@ -40,12 +50,17 @@ func (t *MapCodecOptions) SetEncodeNilAsEmpty(b bool) *MapCodecOptions { // type must either be a string, an integer type, or implement bsoncodec.KeyUnmarshaler. If true, keys are encoded with // fmt.Sprint() and the encoding key type must be a string, an integer type, or a float. If true, the use of Stringer // will override TextMarshaler/TextUnmarshaler. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.StringifyMapKeysWithFmt] instead. func (t *MapCodecOptions) SetEncodeKeysWithStringer(b bool) *MapCodecOptions { t.EncodeKeysWithStringer = &b return t } // MergeMapCodecOptions combines the given *MapCodecOptions into a single *MapCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeMapCodecOptions(opts ...*MapCodecOptions) *MapCodecOptions { s := MapCodec() for _, opt := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go index ef965e4b..3c1e4f35 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go @@ -7,22 +7,33 @@ package bsonoptions // SliceCodecOptions represents all possible options for slice encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. type SliceCodecOptions struct { EncodeNilAsEmpty *bool // Specifies if a nil slice should encode as an empty array instead of null. Defaults to false. } // SliceCodec creates a new *SliceCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. func SliceCodec() *SliceCodecOptions { return &SliceCodecOptions{} } // SetEncodeNilAsEmpty specifies if a nil slice should encode as an empty array instead of null. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilSliceAsEmpty] instead. func (s *SliceCodecOptions) SetEncodeNilAsEmpty(b bool) *SliceCodecOptions { s.EncodeNilAsEmpty = &b return s } // MergeSliceCodecOptions combines the given *SliceCodecOptions into a single *SliceCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeSliceCodecOptions(opts ...*SliceCodecOptions) *SliceCodecOptions { s := SliceCodec() for _, opt := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go index 65964f42..f8b76f99 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go @@ -9,23 +9,34 @@ package bsonoptions var defaultDecodeOIDAsHex = true // StringCodecOptions represents all possible options for string encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. type StringCodecOptions struct { DecodeObjectIDAsHex *bool // Specifies if we should decode ObjectID as the hex value. Defaults to true. } // StringCodec creates a new *StringCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. func StringCodec() *StringCodecOptions { return &StringCodecOptions{} } // SetDecodeObjectIDAsHex specifies if object IDs should be decoded as their hex representation. If false, a string made // from the raw object ID bytes will be used. Defaults to true. +// +// Deprecated: Decoding object IDs as raw bytes will not be supported in Go Driver 2.0. func (t *StringCodecOptions) SetDecodeObjectIDAsHex(b bool) *StringCodecOptions { t.DecodeObjectIDAsHex = &b return t } // MergeStringCodecOptions combines the given *StringCodecOptions into a single *StringCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeStringCodecOptions(opts ...*StringCodecOptions) *StringCodecOptions { s := &StringCodecOptions{&defaultDecodeOIDAsHex} for _, opt := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go index 78d1dd86..1cbfa32e 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go @@ -9,6 +9,9 @@ package bsonoptions var defaultOverwriteDuplicatedInlinedFields = true // StructCodecOptions represents all possible options for struct encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. type StructCodecOptions struct { DecodeZeroStruct *bool // Specifies if structs should be zeroed before decoding into them. Defaults to false. DecodeDeepZeroInline *bool // Specifies if structs should be recursively zeroed when a inline value is decoded. Defaults to false. @@ -18,17 +21,24 @@ type StructCodecOptions struct { } // StructCodec creates a new *StructCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. func StructCodec() *StructCodecOptions { return &StructCodecOptions{} } // SetDecodeZeroStruct specifies if structs should be zeroed before decoding into them. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroStructs] instead. func (t *StructCodecOptions) SetDecodeZeroStruct(b bool) *StructCodecOptions { t.DecodeZeroStruct = &b return t } // SetDecodeDeepZeroInline specifies if structs should be zeroed before decoding into them. Defaults to false. +// +// Deprecated: DecodeDeepZeroInline will not be supported in Go Driver 2.0. func (t *StructCodecOptions) SetDecodeDeepZeroInline(b bool) *StructCodecOptions { t.DecodeDeepZeroInline = &b return t @@ -36,6 +46,8 @@ func (t *StructCodecOptions) SetDecodeDeepZeroInline(b bool) *StructCodecOptions // SetEncodeOmitDefaultStruct specifies if default structs should be considered empty by omitempty. A default struct has all // its values set to their default value. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.OmitZeroStruct] instead. func (t *StructCodecOptions) SetEncodeOmitDefaultStruct(b bool) *StructCodecOptions { t.EncodeOmitDefaultStruct = &b return t @@ -45,18 +57,26 @@ func (t *StructCodecOptions) SetEncodeOmitDefaultStruct(b bool) *StructCodecOpti // same bson key. When true and decoding, values will be written to the outermost struct with a matching key, and when // encoding, keys will have the value of the top-most matching field. When false, decoding and encoding will error if // there are duplicate keys after the struct is inlined. Defaults to true. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.ErrorOnInlineDuplicates] instead. func (t *StructCodecOptions) SetOverwriteDuplicatedInlinedFields(b bool) *StructCodecOptions { t.OverwriteDuplicatedInlinedFields = &b return t } // SetAllowUnexportedFields specifies if unexported fields should be marshaled/unmarshaled. Defaults to false. +// +// Deprecated: AllowUnexportedFields does not work on recent versions of Go and will not be +// supported in Go Driver 2.0. func (t *StructCodecOptions) SetAllowUnexportedFields(b bool) *StructCodecOptions { t.AllowUnexportedFields = &b return t } // MergeStructCodecOptions combines the given *StructCodecOptions into a single *StructCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeStructCodecOptions(opts ...*StructCodecOptions) *StructCodecOptions { s := &StructCodecOptions{ OverwriteDuplicatedInlinedFields: &defaultOverwriteDuplicatedInlinedFields, diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go index 13496d12..3f38433d 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go @@ -7,22 +7,33 @@ package bsonoptions // TimeCodecOptions represents all possible options for time.Time encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. type TimeCodecOptions struct { UseLocalTimeZone *bool // Specifies if we should decode into the local time zone. Defaults to false. } // TimeCodec creates a new *TimeCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. func TimeCodec() *TimeCodecOptions { return &TimeCodecOptions{} } // SetUseLocalTimeZone specifies if we should decode into the local time zone. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseLocalTimeZone] instead. func (t *TimeCodecOptions) SetUseLocalTimeZone(b bool) *TimeCodecOptions { t.UseLocalTimeZone = &b return t } // MergeTimeCodecOptions combines the given *TimeCodecOptions into a single *TimeCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeTimeCodecOptions(opts ...*TimeCodecOptions) *TimeCodecOptions { t := TimeCodec() for _, opt := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go index e08b7f19..5091e4d9 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go @@ -7,22 +7,33 @@ package bsonoptions // UIntCodecOptions represents all possible options for uint encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. type UIntCodecOptions struct { EncodeToMinSize *bool // Specifies if all uints except uint64 should be decoded to minimum size bsontype. Defaults to false. } // UIntCodec creates a new *UIntCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. func UIntCodec() *UIntCodecOptions { return &UIntCodecOptions{} } // SetEncodeToMinSize specifies if all uints except uint64 should be decoded to minimum size bsontype. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.IntMinSize] instead. func (u *UIntCodecOptions) SetEncodeToMinSize(b bool) *UIntCodecOptions { u.EncodeToMinSize = &b return u } // MergeUIntCodecOptions combines the given *UIntCodecOptions into a single *UIntCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeUIntCodecOptions(opts ...*UIntCodecOptions) *UIntCodecOptions { u := UIntCodec() for _, opt := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go index 5cdf6460..1e25570b 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go @@ -7,6 +7,7 @@ package bsonrw import ( + "errors" "fmt" "io" @@ -17,20 +18,32 @@ import ( // Copier is a type that allows copying between ValueReaders, ValueWriters, and // []byte values. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. type Copier struct{} // NewCopier creates a new copier with the given registry. If a nil registry is provided // a default registry is used. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. func NewCopier() Copier { return Copier{} } // CopyDocument handles copying a document from src to dst. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. func CopyDocument(dst ValueWriter, src ValueReader) error { return Copier{}.CopyDocument(dst, src) } // CopyDocument handles copying one document from the src to the dst. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. func (c Copier) CopyDocument(dst ValueWriter, src ValueReader) error { dr, err := src.ReadDocument() if err != nil { @@ -47,6 +60,9 @@ func (c Copier) CopyDocument(dst ValueWriter, src ValueReader) error { // CopyArrayFromBytes copies the values from a BSON array represented as a // []byte to a ValueWriter. +// +// Deprecated: Copying BSON arrays using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. func (c Copier) CopyArrayFromBytes(dst ValueWriter, src []byte) error { aw, err := dst.WriteArray() if err != nil { @@ -63,6 +79,9 @@ func (c Copier) CopyArrayFromBytes(dst ValueWriter, src []byte) error { // CopyDocumentFromBytes copies the values from a BSON document represented as a // []byte to a ValueWriter. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. func (c Copier) CopyDocumentFromBytes(dst ValueWriter, src []byte) error { dw, err := dst.WriteDocument() if err != nil { @@ -81,6 +100,9 @@ type writeElementFn func(key string) (ValueWriter, error) // CopyBytesToArrayWriter copies the values from a BSON Array represented as a []byte to an // ArrayWriter. +// +// Deprecated: Copying BSON arrays using the ArrayWriter interface will not be supported in Go +// Driver 2.0. func (c Copier) CopyBytesToArrayWriter(dst ArrayWriter, src []byte) error { wef := func(_ string) (ValueWriter, error) { return dst.WriteArrayElement() @@ -91,6 +113,9 @@ func (c Copier) CopyBytesToArrayWriter(dst ArrayWriter, src []byte) error { // CopyBytesToDocumentWriter copies the values from a BSON document represented as a []byte to a // DocumentWriter. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. func (c Copier) CopyBytesToDocumentWriter(dst DocumentWriter, src []byte) error { wef := func(key string) (ValueWriter, error) { return dst.WriteDocumentElement(key) @@ -100,7 +125,7 @@ func (c Copier) CopyBytesToDocumentWriter(dst DocumentWriter, src []byte) error } func (c Copier) copyBytesToValueWriter(src []byte, wef writeElementFn) error { - // TODO(skriptble): Create errors types here. Anything thats a tag should be a property. + // TODO(skriptble): Create errors types here. Anything that is a tag should be a property. length, rem, ok := bsoncore.ReadLength(src) if !ok { return fmt.Errorf("couldn't read length from src, not enough bytes. length=%d", len(src)) @@ -150,12 +175,18 @@ func (c Copier) copyBytesToValueWriter(src []byte, wef writeElementFn) error { // CopyDocumentToBytes copies an entire document from the ValueReader and // returns it as bytes. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. func (c Copier) CopyDocumentToBytes(src ValueReader) ([]byte, error) { return c.AppendDocumentBytes(nil, src) } // AppendDocumentBytes functions the same as CopyDocumentToBytes, but will // append the result to dst. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. func (c Copier) AppendDocumentBytes(dst []byte, src ValueReader) ([]byte, error) { if br, ok := src.(BytesReader); ok { _, dst, err := br.ReadValueBytes(dst) @@ -163,7 +194,7 @@ func (c Copier) AppendDocumentBytes(dst []byte, src ValueReader) ([]byte, error) } vw := vwPool.Get().(*valueWriter) - defer vwPool.Put(vw) + defer putValueWriter(vw) vw.reset(dst) @@ -173,6 +204,9 @@ func (c Copier) AppendDocumentBytes(dst []byte, src ValueReader) ([]byte, error) } // AppendArrayBytes copies an array from the ValueReader to dst. +// +// Deprecated: Copying BSON arrays using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. func (c Copier) AppendArrayBytes(dst []byte, src ValueReader) ([]byte, error) { if br, ok := src.(BytesReader); ok { _, dst, err := br.ReadValueBytes(dst) @@ -180,7 +214,7 @@ func (c Copier) AppendArrayBytes(dst []byte, src ValueReader) ([]byte, error) { } vw := vwPool.Get().(*valueWriter) - defer vwPool.Put(vw) + defer putValueWriter(vw) vw.reset(dst) @@ -190,6 +224,8 @@ func (c Copier) AppendArrayBytes(dst []byte, src ValueReader) ([]byte, error) { } // CopyValueFromBytes will write the value represtend by t and src to dst. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.UnmarshalValue] instead. func (c Copier) CopyValueFromBytes(dst ValueWriter, t bsontype.Type, src []byte) error { if wvb, ok := dst.(BytesWriter); ok { return wvb.WriteValueBytes(t, src) @@ -206,19 +242,24 @@ func (c Copier) CopyValueFromBytes(dst ValueWriter, t bsontype.Type, src []byte) // CopyValueToBytes copies a value from src and returns it as a bsontype.Type and a // []byte. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.MarshalValue] instead. func (c Copier) CopyValueToBytes(src ValueReader) (bsontype.Type, []byte, error) { return c.AppendValueBytes(nil, src) } // AppendValueBytes functions the same as CopyValueToBytes, but will append the // result to dst. +// +// Deprecated: Appending individual BSON elements to an existing slice will not be supported in Go +// Driver 2.0. func (c Copier) AppendValueBytes(dst []byte, src ValueReader) (bsontype.Type, []byte, error) { if br, ok := src.(BytesReader); ok { return br.ReadValueBytes(dst) } vw := vwPool.Get().(*valueWriter) - defer vwPool.Put(vw) + defer putValueWriter(vw) start := len(dst) @@ -234,6 +275,9 @@ func (c Copier) AppendValueBytes(dst []byte, src ValueReader) (bsontype.Type, [] } // CopyValue will copy a single value from src to dst. +// +// Deprecated: Copying BSON values using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. func (c Copier) CopyValue(dst ValueWriter, src ValueReader) error { var err error switch src.Type() { @@ -399,7 +443,7 @@ func (c Copier) copyArray(dst ValueWriter, src ValueReader) error { for { vr, err := ar.ReadValue() - if err == ErrEOA { + if errors.Is(err, ErrEOA) { break } if err != nil { @@ -423,7 +467,7 @@ func (c Copier) copyArray(dst ValueWriter, src ValueReader) error { func (c Copier) copyDocumentCore(dw DocumentWriter, dr DocumentReader) error { for { key, vr, err := dr.ReadElement() - if err == ErrEOD { + if errors.Is(err, ErrEOD) { break } if err != nil { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go index 54c76bf7..bb52a0ec 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go @@ -313,7 +313,7 @@ func (ejp *extJSONParser) readValue(t bsontype.Type) (*extJSONValue, error) { // convert hex to bytes bytes, err := hex.DecodeString(uuidNoHyphens) if err != nil { - return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding hex bytes: %v", err) + return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding hex bytes: %w", err) } ejp.advanceState() diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go index 35832d73..59ddfc44 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go @@ -7,6 +7,7 @@ package bsonrw import ( + "errors" "fmt" "io" "sync" @@ -16,11 +17,15 @@ import ( ) // ExtJSONValueReaderPool is a pool for ValueReaders that read ExtJSON. +// +// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. type ExtJSONValueReaderPool struct { pool sync.Pool } // NewExtJSONValueReaderPool instantiates a new ExtJSONValueReaderPool. +// +// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. func NewExtJSONValueReaderPool() *ExtJSONValueReaderPool { return &ExtJSONValueReaderPool{ pool: sync.Pool{ @@ -32,6 +37,8 @@ func NewExtJSONValueReaderPool() *ExtJSONValueReaderPool { } // Get retrieves a ValueReader from the pool and uses src as the underlying ExtJSON. +// +// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. func (bvrp *ExtJSONValueReaderPool) Get(r io.Reader, canonical bool) (ValueReader, error) { vr := bvrp.pool.Get().(*extJSONValueReader) return vr.reset(r, canonical) @@ -39,6 +46,8 @@ func (bvrp *ExtJSONValueReaderPool) Get(r io.Reader, canonical bool) (ValueReade // Put inserts a ValueReader into the pool. If the ValueReader is not a ExtJSON ValueReader nothing // is inserted into the pool and ok will be false. +// +// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. func (bvrp *ExtJSONValueReaderPool) Put(vr ValueReader) (ok bool) { bvr, ok := vr.(*extJSONValueReader) if !ok { @@ -605,7 +614,7 @@ func (ejvr *extJSONValueReader) ReadElement() (string, ValueReader, error) { name, t, err := ejvr.p.readKey() if err != nil { - if err == ErrEOD { + if errors.Is(err, ErrEOD) { if ejvr.stack[ejvr.frame].mode == mCodeWithScope { _, err := ejvr.p.peekType() if err != nil { @@ -632,7 +641,7 @@ func (ejvr *extJSONValueReader) ReadValue() (ValueReader, error) { t, err := ejvr.p.peekType() if err != nil { - if err == ErrEOA { + if errors.Is(err, ErrEOA) { ejvr.pop() } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go index 99ed524b..bb930316 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go @@ -23,11 +23,15 @@ import ( ) // ExtJSONValueWriterPool is a pool for ExtJSON ValueWriters. +// +// Deprecated: ExtJSONValueWriterPool will not be supported in Go Driver 2.0. type ExtJSONValueWriterPool struct { pool sync.Pool } // NewExtJSONValueWriterPool creates a new pool for ValueWriter instances that write to ExtJSON. +// +// Deprecated: ExtJSONValueWriterPool will not be supported in Go Driver 2.0. func NewExtJSONValueWriterPool() *ExtJSONValueWriterPool { return &ExtJSONValueWriterPool{ pool: sync.Pool{ @@ -39,6 +43,8 @@ func NewExtJSONValueWriterPool() *ExtJSONValueWriterPool { } // Get retrieves a ExtJSON ValueWriter from the pool and resets it to use w as the destination. +// +// Deprecated: ExtJSONValueWriterPool will not be supported in Go Driver 2.0. func (bvwp *ExtJSONValueWriterPool) Get(w io.Writer, canonical, escapeHTML bool) ValueWriter { vw := bvwp.pool.Get().(*extJSONValueWriter) if writer, ok := w.(*SliceWriter); ok { @@ -53,6 +59,8 @@ func (bvwp *ExtJSONValueWriterPool) Get(w io.Writer, canonical, escapeHTML bool) // Put inserts a ValueWriter into the pool. If the ValueWriter is not a ExtJSON ValueWriter, nothing // happens and ok will be false. +// +// Deprecated: ExtJSONValueWriterPool will not be supported in Go Driver 2.0. func (bvwp *ExtJSONValueWriterPool) Put(vw ValueWriter) (ok bool) { bvw, ok := vw.(*extJSONValueWriter) if !ok { @@ -80,6 +88,7 @@ type extJSONValueWriter struct { frame int64 canonical bool escapeHTML bool + newlines bool } // NewExtJSONValueWriter creates a ValueWriter that writes Extended JSON to w. @@ -88,10 +97,13 @@ func NewExtJSONValueWriter(w io.Writer, canonical, escapeHTML bool) (ValueWriter return nil, errNilWriter } - return newExtJSONWriter(w, canonical, escapeHTML), nil + // Enable newlines for all Extended JSON value writers created by NewExtJSONValueWriter. We + // expect these value writers to be used with an Encoder, which should add newlines after + // encoded Extended JSON documents. + return newExtJSONWriter(w, canonical, escapeHTML, true), nil } -func newExtJSONWriter(w io.Writer, canonical, escapeHTML bool) *extJSONValueWriter { +func newExtJSONWriter(w io.Writer, canonical, escapeHTML, newlines bool) *extJSONValueWriter { stack := make([]ejvwState, 1, 5) stack[0] = ejvwState{mode: mTopLevel} @@ -101,6 +113,7 @@ func newExtJSONWriter(w io.Writer, canonical, escapeHTML bool) *extJSONValueWrit stack: stack, canonical: canonical, escapeHTML: escapeHTML, + newlines: newlines, } } @@ -564,6 +577,12 @@ func (ejvw *extJSONValueWriter) WriteDocumentEnd() error { case mDocument: ejvw.buf = append(ejvw.buf, ',') case mTopLevel: + // If the value writer has newlines enabled, end top-level documents with a newline so that + // multiple documents encoded to the same writer are separated by newlines. That matches the + // Go json.Encoder behavior and also works with bsonrw.NewExtJSONValueReader. + if ejvw.newlines { + ejvw.buf = append(ejvw.buf, '\n') + } if ejvw.w != nil { if _, err := ejvw.w.Write(ejvw.buf); err != nil { return err diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/json_scanner.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/json_scanner.go index cd4843a3..43f3e4f3 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/json_scanner.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/json_scanner.go @@ -58,7 +58,7 @@ func (js *jsonScanner) nextToken() (*jsonToken, error) { c, err = js.readNextByte() } - if err == io.EOF { + if errors.Is(err, io.EOF) { return &jsonToken{t: jttEOF}, nil } else if err != nil { return nil, err @@ -198,7 +198,7 @@ func (js *jsonScanner) scanString() (*jsonToken, error) { for { c, err = js.readNextByte() if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { return nil, errors.New("end of input in JSON string") } return nil, err @@ -209,7 +209,7 @@ func (js *jsonScanner) scanString() (*jsonToken, error) { case '\\': c, err = js.readNextByte() if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { return nil, errors.New("end of input in JSON string") } return nil, err @@ -248,7 +248,7 @@ func (js *jsonScanner) scanString() (*jsonToken, error) { if utf16.IsSurrogate(rn) { c, err = js.readNextByte() if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { return nil, errors.New("end of input in JSON string") } return nil, err @@ -264,7 +264,7 @@ func (js *jsonScanner) scanString() (*jsonToken, error) { c, err = js.readNextByte() if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { return nil, errors.New("end of input in JSON string") } return nil, err @@ -325,17 +325,17 @@ func (js *jsonScanner) scanLiteral(first byte) (*jsonToken, error) { c5, err := js.readNextByte() - if bytes.Equal([]byte("true"), lit) && (isValueTerminator(c5) || err == io.EOF) { + if bytes.Equal([]byte("true"), lit) && (isValueTerminator(c5) || errors.Is(err, io.EOF)) { js.pos = int(math.Max(0, float64(js.pos-1))) return &jsonToken{t: jttBool, v: true, p: p}, nil - } else if bytes.Equal([]byte("null"), lit) && (isValueTerminator(c5) || err == io.EOF) { + } else if bytes.Equal([]byte("null"), lit) && (isValueTerminator(c5) || errors.Is(err, io.EOF)) { js.pos = int(math.Max(0, float64(js.pos-1))) return &jsonToken{t: jttNull, v: nil, p: p}, nil } else if bytes.Equal([]byte("fals"), lit) { if c5 == 'e' { c5, err = js.readNextByte() - if isValueTerminator(c5) || err == io.EOF { + if isValueTerminator(c5) || errors.Is(err, io.EOF) { js.pos = int(math.Max(0, float64(js.pos-1))) return &jsonToken{t: jttBool, v: false, p: p}, nil } @@ -384,7 +384,7 @@ func (js *jsonScanner) scanNumber(first byte) (*jsonToken, error) { for { c, err = js.readNextByte() - if err != nil && err != io.EOF { + if err != nil && !errors.Is(err, io.EOF) { return nil, err } @@ -413,7 +413,7 @@ func (js *jsonScanner) scanNumber(first byte) (*jsonToken, error) { case '}', ']', ',': s = nssDone default: - if isWhiteSpace(c) || err == io.EOF { + if isWhiteSpace(c) || errors.Is(err, io.EOF) { s = nssDone } else { s = nssInvalid @@ -430,7 +430,7 @@ func (js *jsonScanner) scanNumber(first byte) (*jsonToken, error) { case '}', ']', ',': s = nssDone default: - if isWhiteSpace(c) || err == io.EOF { + if isWhiteSpace(c) || errors.Is(err, io.EOF) { s = nssDone } else if isDigit(c) { s = nssSawIntegerDigits @@ -455,7 +455,7 @@ func (js *jsonScanner) scanNumber(first byte) (*jsonToken, error) { case '}', ']', ',': s = nssDone default: - if isWhiteSpace(c) || err == io.EOF { + if isWhiteSpace(c) || errors.Is(err, io.EOF) { s = nssDone } else if isDigit(c) { s = nssSawFractionDigits @@ -490,7 +490,7 @@ func (js *jsonScanner) scanNumber(first byte) (*jsonToken, error) { case '}', ']', ',': s = nssDone default: - if isWhiteSpace(c) || err == io.EOF { + if isWhiteSpace(c) || errors.Is(err, io.EOF) { s = nssDone } else if isDigit(c) { s = nssSawExponentDigits diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/reader.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/reader.go index 0b8fa28d..324b10b6 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/reader.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/reader.go @@ -58,6 +58,8 @@ type ValueReader interface { // types that implement ValueReader may also implement this interface. // // The bytes of the value will be appended to dst. +// +// Deprecated: BytesReader will not be supported in Go Driver 2.0. type BytesReader interface { ReadValueBytes(dst []byte) (bsontype.Type, []byte, error) } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go index ef5d837c..a242bb57 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go @@ -28,11 +28,15 @@ var vrPool = sync.Pool{ } // BSONValueReaderPool is a pool for ValueReaders that read BSON. +// +// Deprecated: BSONValueReaderPool will not be supported in Go Driver 2.0. type BSONValueReaderPool struct { pool sync.Pool } // NewBSONValueReaderPool instantiates a new BSONValueReaderPool. +// +// Deprecated: BSONValueReaderPool will not be supported in Go Driver 2.0. func NewBSONValueReaderPool() *BSONValueReaderPool { return &BSONValueReaderPool{ pool: sync.Pool{ @@ -44,6 +48,8 @@ func NewBSONValueReaderPool() *BSONValueReaderPool { } // Get retrieves a ValueReader from the pool and uses src as the underlying BSON. +// +// Deprecated: BSONValueReaderPool will not be supported in Go Driver 2.0. func (bvrp *BSONValueReaderPool) Get(src []byte) ValueReader { vr := bvrp.pool.Get().(*valueReader) vr.reset(src) @@ -52,6 +58,8 @@ func (bvrp *BSONValueReaderPool) Get(src []byte) ValueReader { // Put inserts a ValueReader into the pool. If the ValueReader is not a BSON ValueReader nothing // is inserted into the pool and ok will be false. +// +// Deprecated: BSONValueReaderPool will not be supported in Go Driver 2.0. func (bvrp *BSONValueReaderPool) Put(vr ValueReader) (ok bool) { bvr, ok := vr.(*valueReader) if !ok { @@ -731,8 +739,7 @@ func (vr *valueReader) ReadValue() (ValueReader, error) { return nil, ErrEOA } - _, err = vr.readCString() - if err != nil { + if err := vr.skipCString(); err != nil { return nil, err } @@ -786,6 +793,15 @@ func (vr *valueReader) readByte() (byte, error) { return vr.d[vr.offset-1], nil } +func (vr *valueReader) skipCString() error { + idx := bytes.IndexByte(vr.d[vr.offset:], 0x00) + if idx < 0 { + return io.EOF + } + vr.offset += int64(idx) + 1 + return nil +} + func (vr *valueReader) readCString() (string, error) { idx := bytes.IndexByte(vr.d[vr.offset:], 0x00) if idx < 0 { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go index f95a08af..311518a8 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go @@ -28,12 +28,23 @@ var vwPool = sync.Pool{ }, } +func putValueWriter(vw *valueWriter) { + if vw != nil { + vw.w = nil // don't leak the writer + vwPool.Put(vw) + } +} + // BSONValueWriterPool is a pool for BSON ValueWriters. +// +// Deprecated: BSONValueWriterPool will not be supported in Go Driver 2.0. type BSONValueWriterPool struct { pool sync.Pool } // NewBSONValueWriterPool creates a new pool for ValueWriter instances that write to BSON. +// +// Deprecated: BSONValueWriterPool will not be supported in Go Driver 2.0. func NewBSONValueWriterPool() *BSONValueWriterPool { return &BSONValueWriterPool{ pool: sync.Pool{ @@ -45,6 +56,8 @@ func NewBSONValueWriterPool() *BSONValueWriterPool { } // Get retrieves a BSON ValueWriter from the pool and resets it to use w as the destination. +// +// Deprecated: BSONValueWriterPool will not be supported in Go Driver 2.0. func (bvwp *BSONValueWriterPool) Get(w io.Writer) ValueWriter { vw := bvwp.pool.Get().(*valueWriter) @@ -56,6 +69,8 @@ func (bvwp *BSONValueWriterPool) Get(w io.Writer) ValueWriter { } // GetAtModeElement retrieves a ValueWriterFlusher from the pool and resets it to use w as the destination. +// +// Deprecated: BSONValueWriterPool will not be supported in Go Driver 2.0. func (bvwp *BSONValueWriterPool) GetAtModeElement(w io.Writer) ValueWriterFlusher { vw := bvwp.Get(w).(*valueWriter) vw.push(mElement) @@ -64,6 +79,8 @@ func (bvwp *BSONValueWriterPool) GetAtModeElement(w io.Writer) ValueWriterFlushe // Put inserts a ValueWriter into the pool. If the ValueWriter is not a BSON ValueWriter, nothing // happens and ok will be false. +// +// Deprecated: BSONValueWriterPool will not be supported in Go Driver 2.0. func (bvwp *BSONValueWriterPool) Put(vw ValueWriter) (ok bool) { bvw, ok := vw.(*valueWriter) if !ok { @@ -139,32 +156,21 @@ type valueWriter struct { } func (vw *valueWriter) advanceFrame() { - if vw.frame+1 >= int64(len(vw.stack)) { // We need to grow the stack - length := len(vw.stack) - if length+1 >= cap(vw.stack) { - // double it - buf := make([]vwState, 2*cap(vw.stack)+1) - copy(buf, vw.stack) - vw.stack = buf - } - vw.stack = vw.stack[:length+1] - } vw.frame++ + if vw.frame >= int64(len(vw.stack)) { + vw.stack = append(vw.stack, vwState{}) + } } func (vw *valueWriter) push(m mode) { vw.advanceFrame() // Clean the stack - vw.stack[vw.frame].mode = m - vw.stack[vw.frame].key = "" - vw.stack[vw.frame].arrkey = 0 - vw.stack[vw.frame].start = 0 + vw.stack[vw.frame] = vwState{mode: m} - vw.stack[vw.frame].mode = m switch m { case mDocument, mArray, mCodeWithScope: - vw.reserveLength() + vw.reserveLength() // WARN: this is not needed } } @@ -203,6 +209,7 @@ func newValueWriter(w io.Writer) *valueWriter { return vw } +// TODO: only used in tests func newValueWriterFromSlice(buf []byte) *valueWriter { vw := new(valueWriter) stack := make([]vwState, 1, 5) @@ -239,17 +246,16 @@ func (vw *valueWriter) invalidTransitionError(destination mode, name string, mod } func (vw *valueWriter) writeElementHeader(t bsontype.Type, destination mode, callerName string, addmodes ...mode) error { - switch vw.stack[vw.frame].mode { + frame := &vw.stack[vw.frame] + switch frame.mode { case mElement: - key := vw.stack[vw.frame].key + key := frame.key if !isValidCString(key) { return errors.New("BSON element key cannot contain null bytes") } - - vw.buf = bsoncore.AppendHeader(vw.buf, t, key) + vw.appendHeader(t, key) case mValue: - // TODO: Do this with a cache of the first 1000 or so array keys. - vw.buf = bsoncore.AppendHeader(vw.buf, t, strconv.Itoa(vw.stack[vw.frame].arrkey)) + vw.appendIntHeader(t, frame.arrkey) default: modes := []mode{mElement, mValue} if addmodes != nil { @@ -591,9 +597,11 @@ func (vw *valueWriter) writeLength() error { if length > maxSize { return errMaxDocumentSizeExceeded{size: int64(len(vw.buf))} } - length = length - int(vw.stack[vw.frame].start) - start := vw.stack[vw.frame].start + frame := &vw.stack[vw.frame] + length = length - int(frame.start) + start := frame.start + _ = vw.buf[start+3] // BCE vw.buf[start+0] = byte(length) vw.buf[start+1] = byte(length >> 8) vw.buf[start+2] = byte(length >> 16) @@ -602,5 +610,31 @@ func (vw *valueWriter) writeLength() error { } func isValidCString(cs string) bool { - return !strings.ContainsRune(cs, '\x00') + // Disallow the zero byte in a cstring because the zero byte is used as the + // terminating character. + // + // It's safe to check bytes instead of runes because all multibyte UTF-8 + // code points start with (binary) 11xxxxxx or 10xxxxxx, so 00000000 (i.e. + // 0) will never be part of a multibyte UTF-8 code point. This logic is the + // same as the "r < utf8.RuneSelf" case in strings.IndexRune but can be + // inlined. + // + // https://cs.opensource.google/go/go/+/refs/tags/go1.21.1:src/strings/strings.go;l=127 + return strings.IndexByte(cs, 0) == -1 +} + +// appendHeader is the same as bsoncore.AppendHeader but does not check if the +// key is a valid C string since the caller has already checked for that. +// +// The caller of this function must check if key is a valid C string. +func (vw *valueWriter) appendHeader(t bsontype.Type, key string) { + vw.buf = bsoncore.AppendType(vw.buf, t) + vw.buf = append(vw.buf, key...) + vw.buf = append(vw.buf, 0x00) +} + +func (vw *valueWriter) appendIntHeader(t bsontype.Type, key int) { + vw.buf = bsoncore.AppendType(vw.buf, t) + vw.buf = strconv.AppendInt(vw.buf, int64(key), 10) + vw.buf = append(vw.buf, 0x00) } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go index dff65f87..628f4529 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go @@ -56,6 +56,8 @@ type ValueWriter interface { } // ValueWriterFlusher is a superset of ValueWriter that exposes functionality to flush to the underlying buffer. +// +// Deprecated: ValueWriterFlusher will not be supported in Go Driver 2.0. type ValueWriterFlusher interface { ValueWriter Flush() error @@ -64,13 +66,20 @@ type ValueWriterFlusher interface { // BytesWriter is the interface used to write BSON bytes to a ValueWriter. // This interface is meant to be a superset of ValueWriter, so that types that // implement ValueWriter may also implement this interface. +// +// Deprecated: BytesWriter will not be supported in Go Driver 2.0. type BytesWriter interface { WriteValueBytes(t bsontype.Type, b []byte) error } // SliceWriter allows a pointer to a slice of bytes to be used as an io.Writer. +// +// Deprecated: SliceWriter will not be supported in Go Driver 2.0. type SliceWriter []byte +// Write writes the bytes to the underlying slice. +// +// Deprecated: SliceWriter will not be supported in Go Driver 2.0. func (sw *SliceWriter) Write(p []byte) (int, error) { written := len(p) *sw = append(*sw, p...) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go b/vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go index 7c91ae51..255d9909 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go @@ -8,7 +8,9 @@ // a stringifier for the Type to enable easier debugging when working with BSON. package bsontype // import "go.mongodb.org/mongo-driver/bson/bsontype" -// These constants uniquely refer to each BSON type. +// BSON element types as described in https://bsonspec.org/spec.html. +// +// Deprecated: Use bson.Type* constants instead. const ( Double Type = 0x01 String Type = 0x02 @@ -31,7 +33,12 @@ const ( Decimal128 Type = 0x13 MinKey Type = 0xFF MaxKey Type = 0x7F +) +// BSON binary element subtypes as described in https://bsonspec.org/spec.html. +// +// Deprecated: Use the bson.TypeBinary* constants instead. +const ( BinaryGeneric byte = 0x00 BinaryFunction byte = 0x01 BinaryBinaryOld byte = 0x02 @@ -40,6 +47,7 @@ const ( BinaryMD5 byte = 0x05 BinaryEncrypted byte = 0x06 BinaryColumn byte = 0x07 + BinarySensitive byte = 0x08 BinaryUserDefined byte = 0x80 ) @@ -95,3 +103,14 @@ func (bt Type) String() string { return "invalid" } } + +// IsValid will return true if the Type is valid. +func (bt Type) IsValid() bool { + switch bt { + case Double, String, EmbeddedDocument, Array, Binary, Undefined, ObjectID, Boolean, DateTime, Null, Regex, + DBPointer, JavaScript, Symbol, CodeWithScope, Int32, Timestamp, Int64, Decimal128, MinKey, MaxKey: + return true + default: + return false + } +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/decoder.go b/vendor/go.mongodb.org/mongo-driver/bson/decoder.go index 6e189fa5..eac74cd3 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/decoder.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/decoder.go @@ -38,6 +38,12 @@ type Decoder struct { // (*Decoder).SetContext. defaultDocumentM bool defaultDocumentD bool + + binaryAsSlice bool + useJSONStructTags bool + useLocalTimeZone bool + zeroMaps bool + zeroStructs bool } // NewDecoder returns a new decoder that uses the DefaultRegistry to read from vr. @@ -53,6 +59,9 @@ func NewDecoder(vr bsonrw.ValueReader) (*Decoder, error) { } // NewDecoderWithContext returns a new decoder that uses DecodeContext dc to read from vr. +// +// Deprecated: Use [NewDecoder] and use the Decoder configuration methods set the desired unmarshal +// behavior instead. func NewDecoderWithContext(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader) (*Decoder, error) { if dc.Registry == nil { dc.Registry = DefaultRegistry @@ -70,8 +79,7 @@ func NewDecoderWithContext(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader) (* // Decode reads the next BSON document from the stream and decodes it into the // value pointed to by val. // -// The documentation for Unmarshal contains details about of BSON into a Go -// value. +// See [Unmarshal] for details about BSON unmarshaling behavior. func (d *Decoder) Decode(val interface{}) error { if unmarshaler, ok := val.(Unmarshaler); ok { // TODO(skriptble): Reuse a []byte here and use the AppendDocumentBytes method. @@ -100,42 +108,101 @@ func (d *Decoder) Decode(val interface{}) error { if err != nil { return err } + if d.defaultDocumentM { d.dc.DefaultDocumentM() } if d.defaultDocumentD { d.dc.DefaultDocumentD() } + if d.binaryAsSlice { + d.dc.BinaryAsSlice() + } + if d.useJSONStructTags { + d.dc.UseJSONStructTags() + } + if d.useLocalTimeZone { + d.dc.UseLocalTimeZone() + } + if d.zeroMaps { + d.dc.ZeroMaps() + } + if d.zeroStructs { + d.dc.ZeroStructs() + } + return decoder.DecodeValue(d.dc, d.vr, rval) } // Reset will reset the state of the decoder, using the same *DecodeContext used in // the original construction but using vr for reading. func (d *Decoder) Reset(vr bsonrw.ValueReader) error { + // TODO:(GODRIVER-2719): Remove error return value. d.vr = vr return nil } // SetRegistry replaces the current registry of the decoder with r. func (d *Decoder) SetRegistry(r *bsoncodec.Registry) error { + // TODO:(GODRIVER-2719): Remove error return value. d.dc.Registry = r return nil } // SetContext replaces the current registry of the decoder with dc. +// +// Deprecated: Use the Decoder configuration methods to set the desired unmarshal behavior instead. func (d *Decoder) SetContext(dc bsoncodec.DecodeContext) error { + // TODO:(GODRIVER-2719): Remove error return value. d.dc = dc return nil } -// DefaultDocumentM will decode empty documents using the primitive.M type. This behavior is restricted to data typed as -// "interface{}" or "map[string]interface{}". +// DefaultDocumentM causes the Decoder to always unmarshal documents into the primitive.M type. This +// behavior is restricted to data typed as "interface{}" or "map[string]interface{}". func (d *Decoder) DefaultDocumentM() { d.defaultDocumentM = true } -// DefaultDocumentD will decode empty documents using the primitive.D type. This behavior is restricted to data typed as -// "interface{}" or "map[string]interface{}". +// DefaultDocumentD causes the Decoder to always unmarshal documents into the primitive.D type. This +// behavior is restricted to data typed as "interface{}" or "map[string]interface{}". func (d *Decoder) DefaultDocumentD() { d.defaultDocumentD = true } + +// AllowTruncatingDoubles causes the Decoder to truncate the fractional part of BSON "double" values +// when attempting to unmarshal them into a Go integer (int, int8, int16, int32, or int64) struct +// field. The truncation logic does not apply to BSON "decimal128" values. +func (d *Decoder) AllowTruncatingDoubles() { + d.dc.Truncate = true +} + +// BinaryAsSlice causes the Decoder to unmarshal BSON binary field values that are the "Generic" or +// "Old" BSON binary subtype as a Go byte slice instead of a primitive.Binary. +func (d *Decoder) BinaryAsSlice() { + d.binaryAsSlice = true +} + +// UseJSONStructTags causes the Decoder to fall back to using the "json" struct tag if a "bson" +// struct tag is not specified. +func (d *Decoder) UseJSONStructTags() { + d.useJSONStructTags = true +} + +// UseLocalTimeZone causes the Decoder to unmarshal time.Time values in the local timezone instead +// of the UTC timezone. +func (d *Decoder) UseLocalTimeZone() { + d.useLocalTimeZone = true +} + +// ZeroMaps causes the Decoder to delete any existing values from Go maps in the destination value +// passed to Decode before unmarshaling BSON documents into them. +func (d *Decoder) ZeroMaps() { + d.zeroMaps = true +} + +// ZeroStructs causes the Decoder to delete any existing values from Go structs in the destination +// value passed to Decode before unmarshaling BSON documents into them. +func (d *Decoder) ZeroStructs() { + d.zeroStructs = true +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/doc.go index 0134006d..af609847 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/doc.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/doc.go @@ -6,8 +6,9 @@ // Package bson is a library for reading, writing, and manipulating BSON. BSON is a binary serialization format used to // store documents and make remote procedure calls in MongoDB. The BSON specification is located at https://bsonspec.org. -// The BSON library handles marshalling and unmarshalling of values through a configurable codec system. For a description -// of the codec system and examples of registering custom codecs, see the bsoncodec package. +// The BSON library handles marshaling and unmarshaling of values through a configurable codec system. For a description +// of the codec system and examples of registering custom codecs, see the bsoncodec package. For additional information +// and usage examples, check out the [Work with BSON] page in the Go Driver docs site. // // # Raw BSON // @@ -37,7 +38,7 @@ // bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} // bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} // -// When decoding BSON to a D or M, the following type mappings apply when unmarshalling: +// When decoding BSON to a D or M, the following type mappings apply when unmarshaling: // // 1. BSON int32 unmarshals to an int32. // 2. BSON int64 unmarshals to an int64. @@ -61,81 +62,78 @@ // 20. BSON DBPointer unmarshals to a primitive.DBPointer. // 21. BSON symbol unmarshals to a primitive.Symbol. // -// The above mappings also apply when marshalling a D or M to BSON. Some other useful marshalling mappings are: +// The above mappings also apply when marshaling a D or M to BSON. Some other useful marshaling mappings are: // // 1. time.Time marshals to a BSON datetime. // 2. int8, int16, and int32 marshal to a BSON int32. // 3. int marshals to a BSON int32 if the value is between math.MinInt32 and math.MaxInt32, inclusive, and a BSON int64 // otherwise. -// 4. int64 marshals to BSON int64. +// 4. int64 marshals to BSON int64 (unless [Encoder.IntMinSize] is set). // 5. uint8 and uint16 marshal to a BSON int32. -// 6. uint, uint32, and uint64 marshal to a BSON int32 if the value is between math.MinInt32 and math.MaxInt32, -// inclusive, and BSON int64 otherwise. -// 7. BSON null and undefined values will unmarshal into the zero value of a field (e.g. unmarshalling a BSON null or +// 6. uint, uint32, and uint64 marshal to a BSON int64 (unless [Encoder.IntMinSize] is set). +// 7. BSON null and undefined values will unmarshal into the zero value of a field (e.g. unmarshaling a BSON null or // undefined value into a string will yield the empty string.). // // # Structs // -// Structs can be marshalled/unmarshalled to/from BSON or Extended JSON. When transforming structs to/from BSON or Extended +// Structs can be marshaled/unmarshaled to/from BSON or Extended JSON. When transforming structs to/from BSON or Extended // JSON, the following rules apply: // -// 1. Only exported fields in structs will be marshalled or unmarshalled. +// 1. Only exported fields in structs will be marshaled or unmarshaled. // -// 2. When marshalling a struct, each field will be lowercased to generate the key for the corresponding BSON element. +// 2. When marshaling a struct, each field will be lowercased to generate the key for the corresponding BSON element. // For example, a struct field named "Foo" will generate key "foo". This can be overridden via a struct tag (e.g. // `bson:"fooField"` to generate key "fooField" instead). // -// 3. An embedded struct field is marshalled as a subdocument. The key will be the lowercased name of the field's type. +// 3. An embedded struct field is marshaled as a subdocument. The key will be the lowercased name of the field's type. // -// 4. A pointer field is marshalled as the underlying type if the pointer is non-nil. If the pointer is nil, it is -// marshalled as a BSON null value. +// 4. A pointer field is marshaled as the underlying type if the pointer is non-nil. If the pointer is nil, it is +// marshaled as a BSON null value. // -// 5. When unmarshalling, a field of type interface{} will follow the D/M type mappings listed above. BSON documents -// unmarshalled into an interface{} field will be unmarshalled as a D. +// 5. When unmarshaling, a field of type interface{} will follow the D/M type mappings listed above. BSON documents +// unmarshaled into an interface{} field will be unmarshaled as a D. // // The encoding of each struct field can be customized by the "bson" struct tag. // // This tag behavior is configurable, and different struct tag behavior can be configured by initializing a new -// bsoncodec.StructCodec with the desired tag parser and registering that StructCodec onto the Registry. By default, JSON tags -// are not honored, but that can be enabled by creating a StructCodec with JSONFallbackStructTagParser, like below: +// bsoncodec.StructCodec with the desired tag parser and registering that StructCodec onto the Registry. By default, JSON +// tags are not honored, but that can be enabled by creating a StructCodec with JSONFallbackStructTagParser, like below: // // Example: // // structcodec, _ := bsoncodec.NewStructCodec(bsoncodec.JSONFallbackStructTagParser) // // The bson tag gives the name of the field, possibly followed by a comma-separated list of options. -// The name may be empty in order to specify options without overriding the default field name. The following options can be used -// to configure behavior: -// -// 1. omitempty: If the omitempty struct tag is specified on a field, the field will not be marshalled if it is set to -// the zero value. Fields with language primitive types such as integers, booleans, and strings are considered empty if -// their value is equal to the zero value for the type (i.e. 0 for integers, false for booleans, and "" for strings). -// Slices, maps, and arrays are considered empty if they are of length zero. Interfaces and pointers are considered -// empty if their value is nil. By default, structs are only considered empty if the struct type implements the -// bsoncodec.Zeroer interface and the IsZero method returns true. Struct fields whose types do not implement Zeroer are -// never considered empty and will be marshalled as embedded documents. +// The name may be empty in order to specify options without overriding the default field name. The following options can +// be used to configure behavior: +// +// 1. omitempty: If the omitempty struct tag is specified on a field, the field will be omitted from the marshaling if +// the field has an empty value, defined as false, 0, a nil pointer, a nil interface value, and any empty array, +// slice, map, or string. // NOTE: It is recommended that this tag be used for all slice and map fields. // // 2. minsize: If the minsize struct tag is specified on a field of type int64, uint, uint32, or uint64 and the value of -// the field can fit in a signed int32, the field will be serialized as a BSON int32 rather than a BSON int64. For other -// types, this tag is ignored. +// the field can fit in a signed int32, the field will be serialized as a BSON int32 rather than a BSON int64. For +// other types, this tag is ignored. // -// 3. truncate: If the truncate struct tag is specified on a field with a non-float numeric type, BSON doubles unmarshalled -// into that field will be truncated at the decimal point. For example, if 3.14 is unmarshalled into a field of type int, -// it will be unmarshalled as 3. If this tag is not specified, the decoder will throw an error if the value cannot be -// decoded without losing precision. For float64 or non-numeric types, this tag is ignored. +// 3. truncate: If the truncate struct tag is specified on a field with a non-float numeric type, BSON doubles +// unmarshaled into that field will be truncated at the decimal point. For example, if 3.14 is unmarshaled into a +// field of type int, it will be unmarshaled as 3. If this tag is not specified, the decoder will throw an error if +// the value cannot be decoded without losing precision. For float64 or non-numeric types, this tag is ignored. // // 4. inline: If the inline struct tag is specified for a struct or map field, the field will be "flattened" when -// marshalling and "un-flattened" when unmarshalling. This means that all of the fields in that struct/map will be -// pulled up one level and will become top-level fields rather than being fields in a nested document. For example, if a -// map field named "Map" with value map[string]interface{}{"foo": "bar"} is inlined, the resulting document will be -// {"foo": "bar"} instead of {"map": {"foo": "bar"}}. There can only be one inlined map field in a struct. If there are -// duplicated fields in the resulting document when an inlined struct is marshalled, the inlined field will be overwritten. -// If there are duplicated fields in the resulting document when an inlined map is marshalled, an error will be returned. -// This tag can be used with fields that are pointers to structs. If an inlined pointer field is nil, it will not be -// marshalled. For fields that are not maps or structs, this tag is ignored. -// -// # Marshalling and Unmarshalling -// -// Manually marshalling and unmarshalling can be done with the Marshal and Unmarshal family of functions. +// marshaling and "un-flattened" when unmarshaling. This means that all of the fields in that struct/map will be +// pulled up one level and will become top-level fields rather than being fields in a nested document. For example, +// if a map field named "Map" with value map[string]interface{}{"foo": "bar"} is inlined, the resulting document will +// be {"foo": "bar"} instead of {"map": {"foo": "bar"}}. There can only be one inlined map field in a struct. If +// there are duplicated fields in the resulting document when an inlined struct is marshaled, the inlined field will +// be overwritten. If there are duplicated fields in the resulting document when an inlined map is marshaled, an +// error will be returned. This tag can be used with fields that are pointers to structs. If an inlined pointer field +// is nil, it will not be marshaled. For fields that are not maps or structs, this tag is ignored. +// +// # Marshaling and Unmarshaling +// +// Manually marshaling and unmarshaling can be done with the Marshal and Unmarshal family of functions. +// +// [Work with BSON]: https://www.mongodb.com/docs/drivers/go/current/fundamentals/bson/ package bson diff --git a/vendor/go.mongodb.org/mongo-driver/bson/encoder.go b/vendor/go.mongodb.org/mongo-driver/bson/encoder.go index fe5125d0..0be2a97f 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/encoder.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/encoder.go @@ -29,10 +29,20 @@ var encPool = sync.Pool{ type Encoder struct { ec bsoncodec.EncodeContext vw bsonrw.ValueWriter + + errorOnInlineDuplicates bool + intMinSize bool + stringifyMapKeysWithFmt bool + nilMapAsEmpty bool + nilSliceAsEmpty bool + nilByteSliceAsEmpty bool + omitZeroStruct bool + useJSONStructTags bool } // NewEncoder returns a new encoder that uses the DefaultRegistry to write to vw. func NewEncoder(vw bsonrw.ValueWriter) (*Encoder, error) { + // TODO:(GODRIVER-2719): Remove error return value. if vw == nil { return nil, errors.New("cannot create a new Encoder with a nil ValueWriter") } @@ -44,6 +54,9 @@ func NewEncoder(vw bsonrw.ValueWriter) (*Encoder, error) { } // NewEncoderWithContext returns a new encoder that uses EncodeContext ec to write to vw. +// +// Deprecated: Use [NewEncoder] and use the Encoder configuration methods to set the desired marshal +// behavior instead. func NewEncoderWithContext(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter) (*Encoder, error) { if ec.Registry == nil { ec = bsoncodec.EncodeContext{Registry: DefaultRegistry} @@ -60,8 +73,7 @@ func NewEncoderWithContext(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter) (* // Encode writes the BSON encoding of val to the stream. // -// The documentation for Marshal contains details about the conversion of Go -// values to BSON. +// See [Marshal] for details about BSON marshaling behavior. func (e *Encoder) Encode(val interface{}) error { if marshaler, ok := val.(Marshaler); ok { // TODO(skriptble): Should we have a MarshalAppender interface so that we can have []byte reuse? @@ -76,24 +88,112 @@ func (e *Encoder) Encode(val interface{}) error { if err != nil { return err } + + // Copy the configurations applied to the Encoder over to the EncodeContext, which actually + // communicates those configurations to the default ValueEncoders. + if e.errorOnInlineDuplicates { + e.ec.ErrorOnInlineDuplicates() + } + if e.intMinSize { + e.ec.MinSize = true + } + if e.stringifyMapKeysWithFmt { + e.ec.StringifyMapKeysWithFmt() + } + if e.nilMapAsEmpty { + e.ec.NilMapAsEmpty() + } + if e.nilSliceAsEmpty { + e.ec.NilSliceAsEmpty() + } + if e.nilByteSliceAsEmpty { + e.ec.NilByteSliceAsEmpty() + } + if e.omitZeroStruct { + e.ec.OmitZeroStruct() + } + if e.useJSONStructTags { + e.ec.UseJSONStructTags() + } + return encoder.EncodeValue(e.ec, e.vw, reflect.ValueOf(val)) } -// Reset will reset the state of the encoder, using the same *EncodeContext used in +// Reset will reset the state of the Encoder, using the same *EncodeContext used in // the original construction but using vw. func (e *Encoder) Reset(vw bsonrw.ValueWriter) error { + // TODO:(GODRIVER-2719): Remove error return value. e.vw = vw return nil } -// SetRegistry replaces the current registry of the encoder with r. +// SetRegistry replaces the current registry of the Encoder with r. func (e *Encoder) SetRegistry(r *bsoncodec.Registry) error { + // TODO:(GODRIVER-2719): Remove error return value. e.ec.Registry = r return nil } -// SetContext replaces the current EncodeContext of the encoder with er. +// SetContext replaces the current EncodeContext of the encoder with ec. +// +// Deprecated: Use the Encoder configuration methods set the desired marshal behavior instead. func (e *Encoder) SetContext(ec bsoncodec.EncodeContext) error { + // TODO:(GODRIVER-2719): Remove error return value. e.ec = ec return nil } + +// ErrorOnInlineDuplicates causes the Encoder to return an error if there is a duplicate field in +// the marshaled BSON when the "inline" struct tag option is set. +func (e *Encoder) ErrorOnInlineDuplicates() { + e.errorOnInlineDuplicates = true +} + +// IntMinSize causes the Encoder to marshal Go integer values (int, int8, int16, int32, int64, uint, +// uint8, uint16, uint32, or uint64) as the minimum BSON int size (either 32 or 64 bits) that can +// represent the integer value. +func (e *Encoder) IntMinSize() { + e.intMinSize = true +} + +// StringifyMapKeysWithFmt causes the Encoder to convert Go map keys to BSON document field name +// strings using fmt.Sprint instead of the default string conversion logic. +func (e *Encoder) StringifyMapKeysWithFmt() { + e.stringifyMapKeysWithFmt = true +} + +// NilMapAsEmpty causes the Encoder to marshal nil Go maps as empty BSON documents instead of BSON +// null. +func (e *Encoder) NilMapAsEmpty() { + e.nilMapAsEmpty = true +} + +// NilSliceAsEmpty causes the Encoder to marshal nil Go slices as empty BSON arrays instead of BSON +// null. +func (e *Encoder) NilSliceAsEmpty() { + e.nilSliceAsEmpty = true +} + +// NilByteSliceAsEmpty causes the Encoder to marshal nil Go byte slices as empty BSON binary values +// instead of BSON null. +func (e *Encoder) NilByteSliceAsEmpty() { + e.nilByteSliceAsEmpty = true +} + +// TODO(GODRIVER-2820): Update the description to remove the note about only examining exported +// TODO struct fields once the logic is updated to also inspect private struct fields. + +// OmitZeroStruct causes the Encoder to consider the zero value for a struct (e.g. MyStruct{}) +// as empty and omit it from the marshaled BSON when the "omitempty" struct tag option is set. +// +// Note that the Encoder only examines exported struct fields when determining if a struct is the +// zero value. It considers pointers to a zero struct value (e.g. &MyStruct{}) not empty. +func (e *Encoder) OmitZeroStruct() { + e.omitZeroStruct = true +} + +// UseJSONStructTags causes the Encoder to fall back to using the "json" struct tag if a "bson" +// struct tag is not specified. +func (e *Encoder) UseJSONStructTags() { + e.useJSONStructTags = true +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/marshal.go b/vendor/go.mongodb.org/mongo-driver/bson/marshal.go index db8d8ee9..17ce6697 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/marshal.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/marshal.go @@ -9,6 +9,7 @@ package bson import ( "bytes" "encoding/json" + "sync" "go.mongodb.org/mongo-driver/bson/bsoncodec" "go.mongodb.org/mongo-driver/bson/bsonrw" @@ -20,17 +21,23 @@ const defaultDstCap = 256 var bvwPool = bsonrw.NewBSONValueWriterPool() var extjPool = bsonrw.NewExtJSONValueWriterPool() -// Marshaler is an interface implemented by types that can marshal themselves -// into a BSON document represented as bytes. The bytes returned must be a valid -// BSON document if the error is nil. +// Marshaler is the interface implemented by types that can marshal themselves +// into a valid BSON document. +// +// Implementations of Marshaler must return a full BSON document. To create +// custom BSON marshaling behavior for individual values in a BSON document, +// implement the ValueMarshaler interface instead. type Marshaler interface { MarshalBSON() ([]byte, error) } -// ValueMarshaler is an interface implemented by types that can marshal -// themselves into a BSON value as bytes. The type must be the valid type for -// the bytes returned. The bytes and byte type together must be valid if the -// error is nil. +// ValueMarshaler is the interface implemented by types that can marshal +// themselves into a valid BSON value. The format of the returned bytes must +// match the returned type. +// +// Implementations of ValueMarshaler must return an individual BSON value. To +// create custom BSON marshaling behavior for an entire BSON document, implement +// the Marshaler interface instead. type ValueMarshaler interface { MarshalBSONValue() (bsontype.Type, []byte, error) } @@ -48,12 +55,42 @@ func Marshal(val interface{}) ([]byte, error) { // MarshalAppend will encode val as a BSON document and append the bytes to dst. If dst is not large enough to hold the // bytes, it will be grown. If val is not a type that can be transformed into a document, MarshalValueAppend should be // used instead. +// +// Deprecated: Use [NewEncoder] and pass the dst byte slice (wrapped by a bytes.Buffer) into +// [bsonrw.NewBSONValueWriter]: +// +// buf := bytes.NewBuffer(dst) +// vw, err := bsonrw.NewBSONValueWriter(buf) +// if err != nil { +// panic(err) +// } +// enc, err := bson.NewEncoder(vw) +// if err != nil { +// panic(err) +// } +// +// See [Encoder] for more examples. func MarshalAppend(dst []byte, val interface{}) ([]byte, error) { return MarshalAppendWithRegistry(DefaultRegistry, dst, val) } // MarshalWithRegistry returns the BSON encoding of val as a BSON document. If val is not a type that can be transformed // into a document, MarshalValueWithRegistry should be used instead. +// +// Deprecated: Use [NewEncoder] and specify the Registry by calling [Encoder.SetRegistry] instead: +// +// buf := new(bytes.Buffer) +// vw, err := bsonrw.NewBSONValueWriter(buf) +// if err != nil { +// panic(err) +// } +// enc, err := bson.NewEncoder(vw) +// if err != nil { +// panic(err) +// } +// enc.SetRegistry(reg) +// +// See [Encoder] for more examples. func MarshalWithRegistry(r *bsoncodec.Registry, val interface{}) ([]byte, error) { dst := make([]byte, 0) return MarshalAppendWithRegistry(r, dst, val) @@ -61,6 +98,22 @@ func MarshalWithRegistry(r *bsoncodec.Registry, val interface{}) ([]byte, error) // MarshalWithContext returns the BSON encoding of val as a BSON document using EncodeContext ec. If val is not a type // that can be transformed into a document, MarshalValueWithContext should be used instead. +// +// Deprecated: Use [NewEncoder] and use the Encoder configuration methods to set the desired marshal +// behavior instead: +// +// buf := bytes.NewBuffer(dst) +// vw, err := bsonrw.NewBSONValueWriter(buf) +// if err != nil { +// panic(err) +// } +// enc, err := bson.NewEncoder(vw) +// if err != nil { +// panic(err) +// } +// enc.IntMinSize() +// +// See [Encoder] for more examples. func MarshalWithContext(ec bsoncodec.EncodeContext, val interface{}) ([]byte, error) { dst := make([]byte, 0) return MarshalAppendWithContext(ec, dst, val) @@ -69,16 +122,74 @@ func MarshalWithContext(ec bsoncodec.EncodeContext, val interface{}) ([]byte, er // MarshalAppendWithRegistry will encode val as a BSON document using Registry r and append the bytes to dst. If dst is // not large enough to hold the bytes, it will be grown. If val is not a type that can be transformed into a document, // MarshalValueAppendWithRegistry should be used instead. +// +// Deprecated: Use [NewEncoder], and pass the dst byte slice (wrapped by a bytes.Buffer) into +// [bsonrw.NewBSONValueWriter], and specify the Registry by calling [Encoder.SetRegistry] instead: +// +// buf := bytes.NewBuffer(dst) +// vw, err := bsonrw.NewBSONValueWriter(buf) +// if err != nil { +// panic(err) +// } +// enc, err := bson.NewEncoder(vw) +// if err != nil { +// panic(err) +// } +// enc.SetRegistry(reg) +// +// See [Encoder] for more examples. func MarshalAppendWithRegistry(r *bsoncodec.Registry, dst []byte, val interface{}) ([]byte, error) { return MarshalAppendWithContext(bsoncodec.EncodeContext{Registry: r}, dst, val) } +// Pool of buffers for marshalling BSON. +var bufPool = sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, +} + // MarshalAppendWithContext will encode val as a BSON document using Registry r and EncodeContext ec and append the // bytes to dst. If dst is not large enough to hold the bytes, it will be grown. If val is not a type that can be // transformed into a document, MarshalValueAppendWithContext should be used instead. +// +// Deprecated: Use [NewEncoder], pass the dst byte slice (wrapped by a bytes.Buffer) into +// [bsonrw.NewBSONValueWriter], and use the Encoder configuration methods to set the desired marshal +// behavior instead: +// +// buf := bytes.NewBuffer(dst) +// vw, err := bsonrw.NewBSONValueWriter(buf) +// if err != nil { +// panic(err) +// } +// enc, err := bson.NewEncoder(vw) +// if err != nil { +// panic(err) +// } +// enc.IntMinSize() +// +// See [Encoder] for more examples. func MarshalAppendWithContext(ec bsoncodec.EncodeContext, dst []byte, val interface{}) ([]byte, error) { - sw := new(bsonrw.SliceWriter) - *sw = dst + sw := bufPool.Get().(*bytes.Buffer) + defer func() { + // Proper usage of a sync.Pool requires each entry to have approximately + // the same memory cost. To obtain this property when the stored type + // contains a variably-sized buffer, we add a hard limit on the maximum + // buffer to place back in the pool. We limit the size to 16MiB because + // that's the maximum wire message size supported by any current MongoDB + // server. + // + // Comment based on + // https://cs.opensource.google/go/go/+/refs/tags/go1.19:src/fmt/print.go;l=147 + // + // Recycle byte slices that are smaller than 16MiB and at least half + // occupied. + if sw.Cap() < 16*1024*1024 && sw.Cap()/2 < sw.Len() { + bufPool.Put(sw) + } + }() + + sw.Reset() vw := bvwPool.Get(sw) defer bvwPool.Put(vw) @@ -99,7 +210,7 @@ func MarshalAppendWithContext(ec bsoncodec.EncodeContext, dst []byte, val interf return nil, err } - return *sw, nil + return append(dst, sw.Bytes()...), nil } // MarshalValue returns the BSON encoding of val. @@ -112,17 +223,26 @@ func MarshalValue(val interface{}) (bsontype.Type, []byte, error) { // MarshalValueAppend will append the BSON encoding of val to dst. If dst is not large enough to hold the BSON encoding // of val, dst will be grown. +// +// Deprecated: Appending individual BSON elements to an existing slice will not be supported in Go +// Driver 2.0. func MarshalValueAppend(dst []byte, val interface{}) (bsontype.Type, []byte, error) { return MarshalValueAppendWithRegistry(DefaultRegistry, dst, val) } // MarshalValueWithRegistry returns the BSON encoding of val using Registry r. +// +// Deprecated: Using a custom registry to marshal individual BSON values will not be supported in Go +// Driver 2.0. func MarshalValueWithRegistry(r *bsoncodec.Registry, val interface{}) (bsontype.Type, []byte, error) { dst := make([]byte, 0) return MarshalValueAppendWithRegistry(r, dst, val) } // MarshalValueWithContext returns the BSON encoding of val using EncodeContext ec. +// +// Deprecated: Using a custom EncodeContext to marshal individual BSON elements will not be +// supported in Go Driver 2.0. func MarshalValueWithContext(ec bsoncodec.EncodeContext, val interface{}) (bsontype.Type, []byte, error) { dst := make([]byte, 0) return MarshalValueAppendWithContext(ec, dst, val) @@ -130,12 +250,18 @@ func MarshalValueWithContext(ec bsoncodec.EncodeContext, val interface{}) (bsont // MarshalValueAppendWithRegistry will append the BSON encoding of val to dst using Registry r. If dst is not large // enough to hold the BSON encoding of val, dst will be grown. +// +// Deprecated: Appending individual BSON elements to an existing slice will not be supported in Go +// Driver 2.0. func MarshalValueAppendWithRegistry(r *bsoncodec.Registry, dst []byte, val interface{}) (bsontype.Type, []byte, error) { return MarshalValueAppendWithContext(bsoncodec.EncodeContext{Registry: r}, dst, val) } // MarshalValueAppendWithContext will append the BSON encoding of val to dst using EncodeContext ec. If dst is not large // enough to hold the BSON encoding of val, dst will be grown. +// +// Deprecated: Appending individual BSON elements to an existing slice will not be supported in Go +// Driver 2.0. func MarshalValueAppendWithContext(ec bsoncodec.EncodeContext, dst []byte, val interface{}) (bsontype.Type, []byte, error) { // get a ValueWriter configured to write to dst sw := new(bsonrw.SliceWriter) @@ -173,17 +299,63 @@ func MarshalExtJSON(val interface{}, canonical, escapeHTML bool) ([]byte, error) // MarshalExtJSONAppend will append the extended JSON encoding of val to dst. // If dst is not large enough to hold the extended JSON encoding of val, dst // will be grown. +// +// Deprecated: Use [NewEncoder] and pass the dst byte slice (wrapped by a bytes.Buffer) into +// [bsonrw.NewExtJSONValueWriter] instead: +// +// buf := bytes.NewBuffer(dst) +// vw, err := bsonrw.NewExtJSONValueWriter(buf, true, false) +// if err != nil { +// panic(err) +// } +// enc, err := bson.NewEncoder(vw) +// if err != nil { +// panic(err) +// } +// +// See [Encoder] for more examples. func MarshalExtJSONAppend(dst []byte, val interface{}, canonical, escapeHTML bool) ([]byte, error) { return MarshalExtJSONAppendWithRegistry(DefaultRegistry, dst, val, canonical, escapeHTML) } // MarshalExtJSONWithRegistry returns the extended JSON encoding of val using Registry r. +// +// Deprecated: Use [NewEncoder] and specify the Registry by calling [Encoder.SetRegistry] instead: +// +// buf := new(bytes.Buffer) +// vw, err := bsonrw.NewBSONValueWriter(buf) +// if err != nil { +// panic(err) +// } +// enc, err := bson.NewEncoder(vw) +// if err != nil { +// panic(err) +// } +// enc.SetRegistry(reg) +// +// See [Encoder] for more examples. func MarshalExtJSONWithRegistry(r *bsoncodec.Registry, val interface{}, canonical, escapeHTML bool) ([]byte, error) { dst := make([]byte, 0, defaultDstCap) return MarshalExtJSONAppendWithContext(bsoncodec.EncodeContext{Registry: r}, dst, val, canonical, escapeHTML) } // MarshalExtJSONWithContext returns the extended JSON encoding of val using Registry r. +// +// Deprecated: Use [NewEncoder] and use the Encoder configuration methods to set the desired marshal +// behavior instead: +// +// buf := new(bytes.Buffer) +// vw, err := bsonrw.NewBSONValueWriter(buf) +// if err != nil { +// panic(err) +// } +// enc, err := bson.NewEncoder(vw) +// if err != nil { +// panic(err) +// } +// enc.IntMinSize() +// +// See [Encoder] for more examples. func MarshalExtJSONWithContext(ec bsoncodec.EncodeContext, val interface{}, canonical, escapeHTML bool) ([]byte, error) { dst := make([]byte, 0, defaultDstCap) return MarshalExtJSONAppendWithContext(ec, dst, val, canonical, escapeHTML) @@ -192,6 +364,22 @@ func MarshalExtJSONWithContext(ec bsoncodec.EncodeContext, val interface{}, cano // MarshalExtJSONAppendWithRegistry will append the extended JSON encoding of // val to dst using Registry r. If dst is not large enough to hold the BSON // encoding of val, dst will be grown. +// +// Deprecated: Use [NewEncoder], pass the dst byte slice (wrapped by a bytes.Buffer) into +// [bsonrw.NewExtJSONValueWriter], and specify the Registry by calling [Encoder.SetRegistry] +// instead: +// +// buf := bytes.NewBuffer(dst) +// vw, err := bsonrw.NewExtJSONValueWriter(buf, true, false) +// if err != nil { +// panic(err) +// } +// enc, err := bson.NewEncoder(vw) +// if err != nil { +// panic(err) +// } +// +// See [Encoder] for more examples. func MarshalExtJSONAppendWithRegistry(r *bsoncodec.Registry, dst []byte, val interface{}, canonical, escapeHTML bool) ([]byte, error) { return MarshalExtJSONAppendWithContext(bsoncodec.EncodeContext{Registry: r}, dst, val, canonical, escapeHTML) } @@ -199,6 +387,23 @@ func MarshalExtJSONAppendWithRegistry(r *bsoncodec.Registry, dst []byte, val int // MarshalExtJSONAppendWithContext will append the extended JSON encoding of // val to dst using Registry r. If dst is not large enough to hold the BSON // encoding of val, dst will be grown. +// +// Deprecated: Use [NewEncoder], pass the dst byte slice (wrapped by a bytes.Buffer) into +// [bsonrw.NewExtJSONValueWriter], and use the Encoder configuration methods to set the desired marshal +// behavior instead: +// +// buf := bytes.NewBuffer(dst) +// vw, err := bsonrw.NewExtJSONValueWriter(buf, true, false) +// if err != nil { +// panic(err) +// } +// enc, err := bson.NewEncoder(vw) +// if err != nil { +// panic(err) +// } +// enc.IntMinSize() +// +// See [Encoder] for more examples. func MarshalExtJSONAppendWithContext(ec bsoncodec.EncodeContext, dst []byte, val interface{}, canonical, escapeHTML bool) ([]byte, error) { sw := new(bsonrw.SliceWriter) *sw = dst diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go index ba7c9112..08c39514 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go @@ -164,9 +164,6 @@ func (d Decimal128) BigInt() (*big.Int, int, error) { // Would be handled by the logic below, but that's trivial and common. if high == 0 && low == 0 && exp == 0 { - if posSign { - return new(big.Int), 0, nil - } return new(big.Int), 0, nil } @@ -328,6 +325,7 @@ func ParseDecimal128(s string) (Decimal128, error) { return dErr(s) } + // Parse the significand (i.e. the non-exponent part) as a big.Int. bi, ok := new(big.Int).SetString(intPart+decPart, 10) if !ok { return dErr(s) @@ -360,6 +358,19 @@ func ParseDecimal128FromBigInt(bi *big.Int, exp int) (Decimal128, bool) { q := new(big.Int) r := new(big.Int) + // If the significand is zero, the logical value will always be zero, independent of the + // exponent. However, the loops for handling out-of-range exponent values below may be extremely + // slow for zero values because the significand never changes. Limit the exponent value to the + // supported range here to prevent entering the loops below. + if bi.Cmp(zero) == 0 { + if exp > MaxDecimal128Exp { + exp = MaxDecimal128Exp + } + if exp < MinDecimal128Exp { + exp = MinDecimal128Exp + } + } + for bigIntCmpAbs(bi, maxS) == 1 { bi, _ = q.QuoRem(bi, ten, r) if r.Cmp(zero) != 0 { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go index ded36731..c130e3ff 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go @@ -82,18 +82,18 @@ func ObjectIDFromHex(s string) (ObjectID, error) { return NilObjectID, ErrInvalidHex } - b, err := hex.DecodeString(s) + var oid [12]byte + _, err := hex.Decode(oid[:], []byte(s)) if err != nil { return NilObjectID, err } - var oid [12]byte - copy(oid[:], b) - return oid, nil } // IsValidObjectID returns true if the provided hex string represents a valid ObjectID and false if not. +// +// Deprecated: Use ObjectIDFromHex and check the error instead. func IsValidObjectID(s string) bool { _, err := ObjectIDFromHex(s) return err == nil @@ -183,7 +183,7 @@ func processUniqueBytes() [5]byte { var b [5]byte _, err := io.ReadFull(rand.Reader, b[:]) if err != nil { - panic(fmt.Errorf("cannot initialize objectid package with crypto.rand.Reader: %v", err)) + panic(fmt.Errorf("cannot initialize objectid package with crypto.rand.Reader: %w", err)) } return b @@ -193,7 +193,7 @@ func readRandomUint32() uint32 { var b [4]byte _, err := io.ReadFull(rand.Reader, b[:]) if err != nil { - panic(fmt.Errorf("cannot initialize objectid package with crypto.rand.Reader: %v", err)) + panic(fmt.Errorf("cannot initialize objectid package with crypto.rand.Reader: %w", err)) } return (uint32(b[0]) << 0) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go index c72ccc1c..65f4fbb9 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go @@ -45,7 +45,7 @@ var _ json.Unmarshaler = (*DateTime)(nil) // MarshalJSON marshal to time type. func (d DateTime) MarshalJSON() ([]byte, error) { - return json.Marshal(d.Time()) + return json.Marshal(d.Time().UTC()) } // UnmarshalJSON creates a primitive.DateTime from a JSON string. @@ -141,6 +141,16 @@ type Timestamp struct { I uint32 } +// After reports whether the time instant tp is after tp2. +func (tp Timestamp) After(tp2 Timestamp) bool { + return tp.T > tp2.T || (tp.T == tp2.T && tp.I > tp2.I) +} + +// Before reports whether the time instant tp is before tp2. +func (tp Timestamp) Before(tp2 Timestamp) bool { + return tp.T < tp2.T || (tp.T == tp2.T && tp.I < tp2.I) +} + // Equal compares tp to tp2 and returns true if they are equal. func (tp Timestamp) Equal(tp2 Timestamp) bool { return tp.T == tp2.T && tp.I == tp2.I @@ -151,24 +161,25 @@ func (tp Timestamp) IsZero() bool { return tp.T == 0 && tp.I == 0 } -// CompareTimestamp returns an integer comparing two Timestamps, where T is compared first, followed by I. -// Returns 0 if tp = tp2, 1 if tp > tp2, -1 if tp < tp2. -func CompareTimestamp(tp, tp2 Timestamp) int { - if tp.Equal(tp2) { +// Compare compares the time instant tp with tp2. If tp is before tp2, it returns -1; if tp is after +// tp2, it returns +1; if they're the same, it returns 0. +func (tp Timestamp) Compare(tp2 Timestamp) int { + switch { + case tp.Equal(tp2): return 0 - } - - if tp.T > tp2.T { - return 1 - } - if tp.T < tp2.T { + case tp.Before(tp2): return -1 + default: + return +1 } - // Compare I values because T values are equal - if tp.I > tp2.I { - return 1 - } - return -1 +} + +// CompareTimestamp compares the time instant tp with tp2. If tp is before tp2, it returns -1; if tp is after +// tp2, it returns +1; if they're the same, it returns 0. +// +// Deprecated: Use Timestamp.Compare instead. +func CompareTimestamp(tp, tp2 Timestamp) int { + return tp.Compare(tp2) } // MinKey represents the BSON minkey value. @@ -186,6 +197,9 @@ type MaxKey struct{} type D []E // Map creates a map from the elements of the D. +// +// Deprecated: Converting directly from a D to an M will not be supported in Go Driver 2.0. Instead, +// users should marshal the D to BSON using bson.Marshal and unmarshal it to M using bson.Unmarshal. func (d D) Map() M { m := make(M, len(d)) for _, e := range d { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go index 1cbe3884..ff32a87a 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go @@ -8,6 +8,7 @@ package bson import ( "errors" + "fmt" "reflect" "go.mongodb.org/mongo-driver/bson/bsoncodec" @@ -21,10 +22,16 @@ var primitiveCodecs PrimitiveCodecs // PrimitiveCodecs is a namespace for all of the default bsoncodec.Codecs for the primitive types // defined in this package. +// +// Deprecated: Use bson.NewRegistry to get a registry with all primitive encoders and decoders +// registered. type PrimitiveCodecs struct{} // RegisterPrimitiveCodecs will register the encode and decode methods attached to PrimitiveCodecs // with the provided RegistryBuilder. if rb is nil, a new empty RegistryBuilder will be created. +// +// Deprecated: Use bson.NewRegistry to get a registry with all primitive encoders and decoders +// registered. func (pc PrimitiveCodecs) RegisterPrimitiveCodecs(rb *bsoncodec.RegistryBuilder) { if rb == nil { panic(errors.New("argument to RegisterPrimitiveCodecs must not be nil")) @@ -38,18 +45,35 @@ func (pc PrimitiveCodecs) RegisterPrimitiveCodecs(rb *bsoncodec.RegistryBuilder) } // RawValueEncodeValue is the ValueEncoderFunc for RawValue. -func (PrimitiveCodecs) RawValueEncodeValue(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// If the RawValue's Type is "invalid" and the RawValue's Value is not empty or +// nil, then this method will return an error. +// +// Deprecated: Use bson.NewRegistry to get a registry with all primitive +// encoders and decoders registered. +func (PrimitiveCodecs) RawValueEncodeValue(_ bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tRawValue { - return bsoncodec.ValueEncoderError{Name: "RawValueEncodeValue", Types: []reflect.Type{tRawValue}, Received: val} + return bsoncodec.ValueEncoderError{ + Name: "RawValueEncodeValue", + Types: []reflect.Type{tRawValue}, + Received: val, + } } rawvalue := val.Interface().(RawValue) + if !rawvalue.Type.IsValid() { + return fmt.Errorf("the RawValue Type specifies an invalid BSON type: %#x", byte(rawvalue.Type)) + } + return bsonrw.Copier{}.CopyValueFromBytes(vw, rawvalue.Type, rawvalue.Value) } // RawValueDecodeValue is the ValueDecoderFunc for RawValue. -func (PrimitiveCodecs) RawValueDecodeValue(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { +// +// Deprecated: Use bson.NewRegistry to get a registry with all primitive encoders and decoders +// registered. +func (PrimitiveCodecs) RawValueDecodeValue(_ bsoncodec.DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tRawValue { return bsoncodec.ValueDecoderError{Name: "RawValueDecodeValue", Types: []reflect.Type{tRawValue}, Received: val} } @@ -64,7 +88,10 @@ func (PrimitiveCodecs) RawValueDecodeValue(dc bsoncodec.DecodeContext, vr bsonrw } // RawEncodeValue is the ValueEncoderFunc for Reader. -func (PrimitiveCodecs) RawEncodeValue(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use bson.NewRegistry to get a registry with all primitive encoders and decoders +// registered. +func (PrimitiveCodecs) RawEncodeValue(_ bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tRaw { return bsoncodec.ValueEncoderError{Name: "RawEncodeValue", Types: []reflect.Type{tRaw}, Received: val} } @@ -75,7 +102,10 @@ func (PrimitiveCodecs) RawEncodeValue(ec bsoncodec.EncodeContext, vw bsonrw.Valu } // RawDecodeValue is the ValueDecoderFunc for Reader. -func (PrimitiveCodecs) RawDecodeValue(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { +// +// Deprecated: Use bson.NewRegistry to get a registry with all primitive encoders and decoders +// registered. +func (PrimitiveCodecs) RawDecodeValue(_ bsoncodec.DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tRaw { return bsoncodec.ValueDecoderError{Name: "RawDecodeValue", Types: []reflect.Type{tRaw}, Received: val} } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/raw.go b/vendor/go.mongodb.org/mongo-driver/bson/raw.go index efd705da..130da61b 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/raw.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/raw.go @@ -16,18 +16,27 @@ import ( // ErrNilReader indicates that an operation was attempted on a nil bson.Reader. var ErrNilReader = errors.New("nil reader") -// Raw is a wrapper around a byte slice. It will interpret the slice as a -// BSON document. This type is a wrapper around a bsoncore.Document. Errors returned from the -// methods on this type and associated types come from the bsoncore package. +// Raw is a raw encoded BSON document. It can be used to delay BSON document decoding or precompute +// a BSON encoded document. +// +// A Raw must be a full BSON document. Use the RawValue type for individual BSON values. type Raw []byte -// NewFromIOReader reads in a document from the given io.Reader and constructs a Raw from -// it. -func NewFromIOReader(r io.Reader) (Raw, error) { +// ReadDocument reads a BSON document from the io.Reader and returns it as a bson.Raw. If the +// reader contains multiple BSON documents, only the first document is read. +func ReadDocument(r io.Reader) (Raw, error) { doc, err := bsoncore.NewDocumentFromReader(r) return Raw(doc), err } +// NewFromIOReader reads a BSON document from the io.Reader and returns it as a bson.Raw. If the +// reader contains multiple BSON documents, only the first document is read. +// +// Deprecated: Use ReadDocument instead. +func NewFromIOReader(r io.Reader) (Raw, error) { + return ReadDocument(r) +} + // Validate validates the document. This method only validates the first document in // the slice, to validate other documents, the slice must be resliced. func (r Raw) Validate() (err error) { return bsoncore.Document(r).Validate() } @@ -51,12 +60,19 @@ func (r Raw) LookupErr(key ...string) (RawValue, error) { // elements. If the document is not valid, the elements up to the invalid point will be returned // along with an error. func (r Raw) Elements() ([]RawElement, error) { - elems, err := bsoncore.Document(r).Elements() + doc := bsoncore.Document(r) + if len(doc) == 0 { + return nil, nil + } + elems, err := doc.Elements() + if err != nil { + return nil, err + } relems := make([]RawElement, 0, len(elems)) for _, elem := range elems { relems = append(relems, RawElement(elem)) } - return relems, err + return relems, nil } // Values returns this document as a slice of values. The returned slice will contain valid values. @@ -81,5 +97,5 @@ func (r Raw) IndexErr(index uint) (RawElement, error) { return RawElement(elem), err } -// String implements the fmt.Stringer interface. +// String returns the BSON document encoded as Extended JSON. func (r Raw) String() string { return bsoncore.Document(r).String() } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/raw_element.go b/vendor/go.mongodb.org/mongo-driver/bson/raw_element.go index 006f503a..8ce13c2c 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/raw_element.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/raw_element.go @@ -10,10 +10,7 @@ import ( "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" ) -// RawElement represents a BSON element in byte form. This type provides a simple way to -// transform a slice of bytes into a BSON element and extract information from it. -// -// RawElement is a thin wrapper around a bsoncore.Element. +// RawElement is a raw encoded BSON document or array element. type RawElement []byte // Key returns the key for this element. If the element is not valid, this method returns an empty @@ -36,7 +33,7 @@ func (re RawElement) ValueErr() (RawValue, error) { // Validate ensures re is a valid BSON element. func (re RawElement) Validate() error { return bsoncore.Element(re).Validate() } -// String implements the fmt.Stringer interface. The output will be in extended JSON format. +// String returns the BSON element encoded as Extended JSON. func (re RawElement) String() string { doc := bsoncore.BuildDocument(nil, re) j, err := MarshalExtJSON(Raw(doc), true, false) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go b/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go index 75297f30..4d1bfb31 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go @@ -26,11 +26,10 @@ var ErrNilContext = errors.New("DecodeContext cannot be nil") // ErrNilRegistry is returned when the provided registry is nil. var ErrNilRegistry = errors.New("Registry cannot be nil") -// RawValue represents a BSON value in byte form. It can be used to hold unprocessed BSON or to -// defer processing of BSON. Type is the BSON type of the value and Value are the raw bytes that -// represent the element. +// RawValue is a raw encoded BSON value. It can be used to delay BSON value decoding or precompute +// BSON encoded value. Type is the BSON type of the value and Value is the raw encoded BSON value. // -// This type wraps bsoncore.Value for most of it's functionality. +// A RawValue must be an individual BSON value. Use the Raw type for full BSON documents. type RawValue struct { Type bsontype.Type Value []byte @@ -38,6 +37,12 @@ type RawValue struct { r *bsoncodec.Registry } +// IsZero reports whether the RawValue is zero, i.e. no data is present on +// the RawValue. It returns true if Type is 0 and Value is empty or nil. +func (rv RawValue) IsZero() bool { + return rv.Type == 0x00 && len(rv.Value) == 0 +} + // Unmarshal deserializes BSON into the provided val. If RawValue cannot be unmarshaled into val, an // error is returned. This method will use the registry used to create the RawValue, if the RawValue // was created from partial BSON processing, or it will use the default registry. Users wishing to @@ -268,10 +273,16 @@ func (rv RawValue) Int32OK() (int32, bool) { return convertToCoreValue(rv).Int32 // AsInt32 returns a BSON number as an int32. If the BSON type is not a numeric one, this method // will panic. +// +// Deprecated: Use AsInt64 instead. If an int32 is required, convert the returned value to an int32 +// and perform any required overflow/underflow checking. func (rv RawValue) AsInt32() int32 { return convertToCoreValue(rv).AsInt32() } // AsInt32OK is the same as AsInt32, except that it returns a boolean instead of // panicking. +// +// Deprecated: Use AsInt64OK instead. If an int32 is required, convert the returned value to an +// int32 and perform any required overflow/underflow checking. func (rv RawValue) AsInt32OK() (int32, bool) { return convertToCoreValue(rv).AsInt32OK() } // Timestamp returns the BSON timestamp value the Value represents. It panics if the value is a diff --git a/vendor/go.mongodb.org/mongo-driver/bson/registry.go b/vendor/go.mongodb.org/mongo-driver/bson/registry.go index 16d7573e..b5b0f356 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/registry.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/registry.go @@ -6,15 +6,19 @@ package bson -import "go.mongodb.org/mongo-driver/bson/bsoncodec" +import ( + "go.mongodb.org/mongo-driver/bson/bsoncodec" +) // DefaultRegistry is the default bsoncodec.Registry. It contains the default codecs and the // primitive codecs. -var DefaultRegistry = NewRegistryBuilder().Build() +var DefaultRegistry = NewRegistry() // NewRegistryBuilder creates a new RegistryBuilder configured with the default encoders and // decoders from the bsoncodec.DefaultValueEncoders and bsoncodec.DefaultValueDecoders types and the // PrimitiveCodecs type in this package. +// +// Deprecated: Use NewRegistry instead. func NewRegistryBuilder() *bsoncodec.RegistryBuilder { rb := bsoncodec.NewRegistryBuilder() bsoncodec.DefaultValueEncoders{}.RegisterDefaultEncoders(rb) @@ -22,3 +26,10 @@ func NewRegistryBuilder() *bsoncodec.RegistryBuilder { primitiveCodecs.RegisterPrimitiveCodecs(rb) return rb } + +// NewRegistry creates a new Registry configured with the default encoders and decoders from the +// bsoncodec.DefaultValueEncoders and bsoncodec.DefaultValueDecoders types and the PrimitiveCodecs +// type in this package. +func NewRegistry() *bsoncodec.Registry { + return NewRegistryBuilder().Build() +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/types.go b/vendor/go.mongodb.org/mongo-driver/bson/types.go index 13a1c35c..ef398124 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/types.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/types.go @@ -10,7 +10,7 @@ import ( "go.mongodb.org/mongo-driver/bson/bsontype" ) -// These constants uniquely refer to each BSON type. +// BSON element types as described in https://bsonspec.org/spec.html. const ( TypeDouble = bsontype.Double TypeString = bsontype.String @@ -34,3 +34,17 @@ const ( TypeMinKey = bsontype.MinKey TypeMaxKey = bsontype.MaxKey ) + +// BSON binary element subtypes as described in https://bsonspec.org/spec.html. +const ( + TypeBinaryGeneric = bsontype.BinaryGeneric + TypeBinaryFunction = bsontype.BinaryFunction + TypeBinaryBinaryOld = bsontype.BinaryBinaryOld + TypeBinaryUUIDOld = bsontype.BinaryUUIDOld + TypeBinaryUUID = bsontype.BinaryUUID + TypeBinaryMD5 = bsontype.BinaryMD5 + TypeBinaryEncrypted = bsontype.BinaryEncrypted + TypeBinaryColumn = bsontype.BinaryColumn + TypeBinarySensitive = bsontype.BinarySensitive + TypeBinaryUserDefined = bsontype.BinaryUserDefined +) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go b/vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go index f936ba18..66da17ee 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go @@ -14,18 +14,26 @@ import ( "go.mongodb.org/mongo-driver/bson/bsontype" ) -// Unmarshaler is an interface implemented by types that can unmarshal a BSON -// document representation of themselves. The BSON bytes can be assumed to be -// valid. UnmarshalBSON must copy the BSON bytes if it wishes to retain the data -// after returning. +// Unmarshaler is the interface implemented by types that can unmarshal a BSON +// document representation of themselves. The input can be assumed to be a valid +// encoding of a BSON document. UnmarshalBSON must copy the JSON data if it +// wishes to retain the data after returning. +// +// Unmarshaler is only used to unmarshal full BSON documents. To create custom +// BSON unmarshaling behavior for individual values in a BSON document, +// implement the ValueUnmarshaler interface instead. type Unmarshaler interface { UnmarshalBSON([]byte) error } -// ValueUnmarshaler is an interface implemented by types that can unmarshal a -// BSON value representation of themselves. The BSON bytes and type can be -// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it -// wishes to retain the data after returning. +// ValueUnmarshaler is the interface implemented by types that can unmarshal a +// BSON value representation of themselves. The input can be assumed to be a +// valid encoding of a BSON value. UnmarshalBSONValue must copy the BSON value +// bytes if it wishes to retain the data after returning. +// +// ValueUnmarshaler is only used to unmarshal individual values in a BSON +// document. To create custom BSON unmarshaling behavior for an entire BSON +// document, implement the Unmarshaler interface instead. type ValueUnmarshaler interface { UnmarshalBSONValue(bsontype.Type, []byte) error } @@ -40,6 +48,16 @@ func Unmarshal(data []byte, val interface{}) error { // UnmarshalWithRegistry parses the BSON-encoded data using Registry r and // stores the result in the value pointed to by val. If val is nil or not // a pointer, UnmarshalWithRegistry returns InvalidUnmarshalError. +// +// Deprecated: Use [NewDecoder] and specify the Registry by calling [Decoder.SetRegistry] instead: +// +// dec, err := bson.NewDecoder(bsonrw.NewBSONDocumentReader(data)) +// if err != nil { +// panic(err) +// } +// dec.SetRegistry(reg) +// +// See [Decoder] for more examples. func UnmarshalWithRegistry(r *bsoncodec.Registry, data []byte, val interface{}) error { vr := bsonrw.NewBSONDocumentReader(data) return unmarshalFromReader(bsoncodec.DecodeContext{Registry: r}, vr, val) @@ -48,11 +66,40 @@ func UnmarshalWithRegistry(r *bsoncodec.Registry, data []byte, val interface{}) // UnmarshalWithContext parses the BSON-encoded data using DecodeContext dc and // stores the result in the value pointed to by val. If val is nil or not // a pointer, UnmarshalWithRegistry returns InvalidUnmarshalError. +// +// Deprecated: Use [NewDecoder] and use the Decoder configuration methods to set the desired unmarshal +// behavior instead: +// +// dec, err := bson.NewDecoder(bsonrw.NewBSONDocumentReader(data)) +// if err != nil { +// panic(err) +// } +// dec.DefaultDocumentM() +// +// See [Decoder] for more examples. func UnmarshalWithContext(dc bsoncodec.DecodeContext, data []byte, val interface{}) error { vr := bsonrw.NewBSONDocumentReader(data) return unmarshalFromReader(dc, vr, val) } +// UnmarshalValue parses the BSON value of type t with bson.DefaultRegistry and +// stores the result in the value pointed to by val. If val is nil or not a pointer, +// UnmarshalValue returns an error. +func UnmarshalValue(t bsontype.Type, data []byte, val interface{}) error { + return UnmarshalValueWithRegistry(DefaultRegistry, t, data, val) +} + +// UnmarshalValueWithRegistry parses the BSON value of type t with registry r and +// stores the result in the value pointed to by val. If val is nil or not a pointer, +// UnmarshalValue returns an error. +// +// Deprecated: Using a custom registry to unmarshal individual BSON values will not be supported in +// Go Driver 2.0. +func UnmarshalValueWithRegistry(r *bsoncodec.Registry, t bsontype.Type, data []byte, val interface{}) error { + vr := bsonrw.NewBSONValueReader(t, data) + return unmarshalFromReader(bsoncodec.DecodeContext{Registry: r}, vr, val) +} + // UnmarshalExtJSON parses the extended JSON-encoded data and stores the result // in the value pointed to by val. If val is nil or not a pointer, Unmarshal // returns InvalidUnmarshalError. @@ -63,6 +110,20 @@ func UnmarshalExtJSON(data []byte, canonical bool, val interface{}) error { // UnmarshalExtJSONWithRegistry parses the extended JSON-encoded data using // Registry r and stores the result in the value pointed to by val. If val is // nil or not a pointer, UnmarshalWithRegistry returns InvalidUnmarshalError. +// +// Deprecated: Use [NewDecoder] and specify the Registry by calling [Decoder.SetRegistry] instead: +// +// vr, err := bsonrw.NewExtJSONValueReader(bytes.NewReader(data), true) +// if err != nil { +// panic(err) +// } +// dec, err := bson.NewDecoder(vr) +// if err != nil { +// panic(err) +// } +// dec.SetRegistry(reg) +// +// See [Decoder] for more examples. func UnmarshalExtJSONWithRegistry(r *bsoncodec.Registry, data []byte, canonical bool, val interface{}) error { ejvr, err := bsonrw.NewExtJSONValueReader(bytes.NewReader(data), canonical) if err != nil { @@ -75,6 +136,21 @@ func UnmarshalExtJSONWithRegistry(r *bsoncodec.Registry, data []byte, canonical // UnmarshalExtJSONWithContext parses the extended JSON-encoded data using // DecodeContext dc and stores the result in the value pointed to by val. If val is // nil or not a pointer, UnmarshalWithRegistry returns InvalidUnmarshalError. +// +// Deprecated: Use [NewDecoder] and use the Decoder configuration methods to set the desired unmarshal +// behavior instead: +// +// vr, err := bsonrw.NewExtJSONValueReader(bytes.NewReader(data), true) +// if err != nil { +// panic(err) +// } +// dec, err := bson.NewDecoder(vr) +// if err != nil { +// panic(err) +// } +// dec.DefaultDocumentM() +// +// See [Decoder] for more examples. func UnmarshalExtJSONWithContext(dc bsoncodec.DecodeContext, data []byte, canonical bool, val interface{}) error { ejvr, err := bsonrw.NewExtJSONValueReader(bytes.NewReader(data), canonical) if err != nil { diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/array.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/array.go index 8ea60ba3..6bc0afa7 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/array.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/array.go @@ -7,10 +7,10 @@ package bsoncore import ( - "bytes" "fmt" "io" "strconv" + "strings" ) // NewArrayLengthError creates and returns an error for when the length of an array exceeds the @@ -53,7 +53,7 @@ func (a Array) DebugString() string { if len(a) < 5 { return "" } - var buf bytes.Buffer + var buf strings.Builder buf.WriteString("Array") length, rem, _ := ReadLength(a) // We know we have enough bytes to read the length buf.WriteByte('(') @@ -69,7 +69,7 @@ func (a Array) DebugString() string { buf.WriteString(fmt.Sprintf("", length)) break } - fmt.Fprintf(&buf, "%s", elem.Value().DebugString()) + buf.WriteString(elem.Value().DebugString()) if length != 1 { buf.WriteByte(',') } @@ -85,7 +85,7 @@ func (a Array) String() string { if len(a) < 5 { return "" } - var buf bytes.Buffer + var buf strings.Builder buf.WriteByte('[') length, rem, _ := ReadLength(a) // We know we have enough bytes to read the length @@ -100,7 +100,7 @@ func (a Array) String() string { if !ok { return "" } - fmt.Fprintf(&buf, "%s", elem.Value().String()) + buf.WriteString(elem.Value().String()) if length > 1 { buf.WriteByte(',') } diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go index 17aad6d7..88133293 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go @@ -4,25 +4,6 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// Package bsoncore contains functions that can be used to encode and decode BSON -// elements and values to or from a slice of bytes. These functions are aimed at -// allowing low level manipulation of BSON and can be used to build a higher -// level BSON library. -// -// The Read* functions within this package return the values of the element and -// a boolean indicating if the values are valid. A boolean was used instead of -// an error because any error that would be returned would be the same: not -// enough bytes. This library attempts to do no validation, it will only return -// false if there are not enough bytes for an item to be read. For example, the -// ReadDocument function checks the length, if that length is larger than the -// number of bytes available, it will return false, if there are enough bytes, it -// will return those bytes and true. It is the consumers responsibility to -// validate those bytes. -// -// The Append* functions within this package will append the type value to the -// given dst slice. If the slice has enough capacity, it will not grow the -// slice. The Append*Element functions within this package operate in the same -// way, but additionally append the BSON type and the key before the value. package bsoncore // import "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" import ( @@ -254,7 +235,7 @@ func BuildDocumentValue(elems ...[]byte) Value { return Value{Type: bsontype.EmbeddedDocument, Data: BuildDocument(nil, elems...)} } -// BuildDocumentElement will append a BSON embedded document elemnt using key and the provided +// BuildDocumentElement will append a BSON embedded document element using key and the provided // elements and return the extended buffer. func BuildDocumentElement(dst []byte, key string, elems ...[]byte) []byte { return BuildDocument(AppendHeader(dst, bsontype.EmbeddedDocument, key), elems...) @@ -844,6 +825,9 @@ func readLengthBytes(src []byte) ([]byte, []byte, bool) { if !ok { return nil, src, false } + if l < 4 { + return nil, src, false + } if len(src) < int(l) { return nil, src, false } diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/doc.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/doc.go new file mode 100644 index 00000000..6837b53f --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/doc.go @@ -0,0 +1,29 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package bsoncore contains functions that can be used to encode and decode BSON +// elements and values to or from a slice of bytes. These functions are aimed at +// allowing low level manipulation of BSON and can be used to build a higher +// level BSON library. +// +// The Read* functions within this package return the values of the element and +// a boolean indicating if the values are valid. A boolean was used instead of +// an error because any error that would be returned would be the same: not +// enough bytes. This library attempts to do no validation, it will only return +// false if there are not enough bytes for an item to be read. For example, the +// ReadDocument function checks the length, if that length is larger than the +// number of bytes available, it will return false, if there are enough bytes, it +// will return those bytes and true. It is the consumers responsibility to +// validate those bytes. +// +// The Append* functions within this package will append the type value to the +// given dst slice. If the slice has enough capacity, it will not grow the +// slice. The Append*Element functions within this package operate in the same +// way, but additionally append the BSON type and the key before the value. +// +// Warning: Package bsoncore is unstable and there is no backward compatibility +// guarantee. It is experimental and subject to change. +package bsoncore diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document.go index d6e4bb06..3f360f1a 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document.go @@ -7,11 +7,11 @@ package bsoncore import ( - "bytes" "errors" "fmt" "io" "strconv" + "strings" "go.mongodb.org/mongo-driver/bson/bsontype" ) @@ -237,7 +237,7 @@ func (d Document) DebugString() string { if len(d) < 5 { return "" } - var buf bytes.Buffer + var buf strings.Builder buf.WriteString("Document") length, rem, _ := ReadLength(d) // We know we have enough bytes to read the length buf.WriteByte('(') @@ -253,7 +253,7 @@ func (d Document) DebugString() string { buf.WriteString(fmt.Sprintf("", length)) break } - fmt.Fprintf(&buf, "%s ", elem.DebugString()) + buf.WriteString(elem.DebugString()) } buf.WriteByte('}') @@ -266,7 +266,7 @@ func (d Document) String() string { if len(d) < 5 { return "" } - var buf bytes.Buffer + var buf strings.Builder buf.WriteByte('{') length, rem, _ := ReadLength(d) // We know we have enough bytes to read the length @@ -285,7 +285,7 @@ func (d Document) String() string { if !ok { return "" } - fmt.Fprintf(&buf, "%s", elem.String()) + buf.WriteString(elem.String()) first = false } buf.WriteByte('}') diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/element.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/element.go index 3acb4222..1fe0897c 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/element.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/element.go @@ -129,7 +129,7 @@ func (e Element) String() string { if !valid { return "" } - return fmt.Sprintf(`"%s": %v`, key, val) + return "\"" + string(key) + "\": " + val.String() } // DebugString outputs a human readable version of RawElement. It will attempt to stringify the diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go index 789d2b98..69c1f9ed 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go @@ -59,8 +59,6 @@ func (v Value) IsNumber() bool { // AsInt32 returns a BSON number as an int32. If the BSON type is not a numeric one, this method // will panic. -// -// TODO(skriptble): Add support for Decimal128. func (v Value) AsInt32() int32 { if !v.IsNumber() { panic(ElementTypeError{"bsoncore.Value.AsInt32", v.Type}) @@ -93,8 +91,6 @@ func (v Value) AsInt32() int32 { // AsInt32OK functions the same as AsInt32 but returns a boolean instead of panicking. False // indicates an error. -// -// TODO(skriptble): Add support for Decimal128. func (v Value) AsInt32OK() (int32, bool) { if !v.IsNumber() { return 0, false @@ -127,8 +123,6 @@ func (v Value) AsInt32OK() (int32, bool) { // AsInt64 returns a BSON number as an int64. If the BSON type is not a numeric one, this method // will panic. -// -// TODO(skriptble): Add support for Decimal128. func (v Value) AsInt64() int64 { if !v.IsNumber() { panic(ElementTypeError{"bsoncore.Value.AsInt64", v.Type}) @@ -162,8 +156,6 @@ func (v Value) AsInt64() int64 { // AsInt64OK functions the same as AsInt64 but returns a boolean instead of panicking. False // indicates an error. -// -// TODO(skriptble): Add support for Decimal128. func (v Value) AsInt64OK() (int64, bool) { if !v.IsNumber() { return 0, false @@ -198,21 +190,14 @@ func (v Value) AsInt64OK() (int64, bool) { // AsFloat64 returns a BSON number as an float64. If the BSON type is not a numeric one, this method // will panic. // -// TODO(skriptble): Add support for Decimal128. -func (v Value) AsFloat64() float64 { return 0 } +// TODO(GODRIVER-2751): Implement AsFloat64. +// func (v Value) AsFloat64() float64 // AsFloat64OK functions the same as AsFloat64 but returns a boolean instead of panicking. False // indicates an error. // -// TODO(skriptble): Add support for Decimal128. -func (v Value) AsFloat64OK() (float64, bool) { return 0, false } - -// Add will add this value to another. This is currently only implemented for strings and numbers. -// If either value is a string, the other type is coerced into a string and added to the other. -// -// This method will alter v and will attempt to reuse the []byte of v. If the []byte is too small, -// it will be expanded. -func (v *Value) Add(v2 Value) error { return nil } +// TODO(GODRIVER-2751): Implement AsFloat64OK. +// func (v Value) AsFloat64OK() (float64, bool) // Equal compaes v to v2 and returns true if they are equal. func (v Value) Equal(v2 Value) bool { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go new file mode 100644 index 00000000..92b8cf73 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go @@ -0,0 +1,61 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "io" + "net/http" + "net/url" + "strings" +) + +// DefaultClient is the default Client and is used by Get, Head, Post and PostForm. +// Please be careful of intitialization order - for example, if you change +// the global propagator, the DefaultClient might still be using the old one. +var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)} + +// Get is a convenient replacement for http.Get that adds a span around the request. +func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) { + req, err := http.NewRequestWithContext(ctx, "GET", targetURL, nil) + if err != nil { + return nil, err + } + return DefaultClient.Do(req) +} + +// Head is a convenient replacement for http.Head that adds a span around the request. +func Head(ctx context.Context, targetURL string) (resp *http.Response, err error) { + req, err := http.NewRequestWithContext(ctx, "HEAD", targetURL, nil) + if err != nil { + return nil, err + } + return DefaultClient.Do(req) +} + +// Post is a convenient replacement for http.Post that adds a span around the request. +func Post(ctx context.Context, targetURL, contentType string, body io.Reader) (resp *http.Response, err error) { + req, err := http.NewRequestWithContext(ctx, "POST", targetURL, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", contentType) + return DefaultClient.Do(req) +} + +// PostForm is a convenient replacement for http.PostForm that adds a span around the request. +func PostForm(ctx context.Context, targetURL string, data url.Values) (resp *http.Response, err error) { + return Post(ctx, targetURL, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go new file mode 100644 index 00000000..303e5505 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go @@ -0,0 +1,46 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "net/http" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// Attribute keys that can be added to a span. +const ( + ReadBytesKey = attribute.Key("http.read_bytes") // if anything was read from the request body, the total number of bytes read + ReadErrorKey = attribute.Key("http.read_error") // If an error occurred while reading a request, the string of the error (io.EOF is not recorded) + WroteBytesKey = attribute.Key("http.wrote_bytes") // if anything was written to the response writer, the total number of bytes written + WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded) +) + +// Server HTTP metrics. +const ( + RequestCount = "http.server.request_count" // Incoming request count total + RequestContentLength = "http.server.request_content_length" // Incoming request bytes total + ResponseContentLength = "http.server.response_content_length" // Incoming response bytes total + ServerLatency = "http.server.duration" // Incoming end to end duration, microseconds +) + +// Filter is a predicate used to determine whether a given http.request should +// be traced. A Filter must return true if the request should be traced. +type Filter func(*http.Request) bool + +func newTracer(tp trace.TracerProvider) trace.Tracer { + return tp.Tracer(instrumentationName, trace.WithInstrumentationVersion(Version())) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go new file mode 100644 index 00000000..e4fa1b8d --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go @@ -0,0 +1,208 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "net/http" + "net/http/httptrace" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" +) + +const ( + instrumentationName = "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" +) + +// config represents the configuration options available for the http.Handler +// and http.Transport types. +type config struct { + ServerName string + Tracer trace.Tracer + Meter metric.Meter + Propagators propagation.TextMapPropagator + SpanStartOptions []trace.SpanStartOption + PublicEndpoint bool + PublicEndpointFn func(*http.Request) bool + ReadEvent bool + WriteEvent bool + Filters []Filter + SpanNameFormatter func(string, *http.Request) string + ClientTrace func(context.Context) *httptrace.ClientTrace + + TracerProvider trace.TracerProvider + MeterProvider metric.MeterProvider +} + +// Option interface used for setting optional config properties. +type Option interface { + apply(*config) +} + +type optionFunc func(*config) + +func (o optionFunc) apply(c *config) { + o(c) +} + +// newConfig creates a new config struct and applies opts to it. +func newConfig(opts ...Option) *config { + c := &config{ + Propagators: otel.GetTextMapPropagator(), + MeterProvider: otel.GetMeterProvider(), + } + for _, opt := range opts { + opt.apply(c) + } + + // Tracer is only initialized if manually specified. Otherwise, can be passed with the tracing context. + if c.TracerProvider != nil { + c.Tracer = newTracer(c.TracerProvider) + } + + c.Meter = c.MeterProvider.Meter( + instrumentationName, + metric.WithInstrumentationVersion(Version()), + ) + + return c +} + +// WithTracerProvider specifies a tracer provider to use for creating a tracer. +// If none is specified, the global provider is used. +func WithTracerProvider(provider trace.TracerProvider) Option { + return optionFunc(func(cfg *config) { + if provider != nil { + cfg.TracerProvider = provider + } + }) +} + +// WithMeterProvider specifies a meter provider to use for creating a meter. +// If none is specified, the global provider is used. +func WithMeterProvider(provider metric.MeterProvider) Option { + return optionFunc(func(cfg *config) { + if provider != nil { + cfg.MeterProvider = provider + } + }) +} + +// WithPublicEndpoint configures the Handler to link the span with an incoming +// span context. If this option is not provided, then the association is a child +// association instead of a link. +func WithPublicEndpoint() Option { + return optionFunc(func(c *config) { + c.PublicEndpoint = true + }) +} + +// WithPublicEndpointFn runs with every request, and allows conditionnally +// configuring the Handler to link the span with an incoming span context. If +// this option is not provided or returns false, then the association is a +// child association instead of a link. +// Note: WithPublicEndpoint takes precedence over WithPublicEndpointFn. +func WithPublicEndpointFn(fn func(*http.Request) bool) Option { + return optionFunc(func(c *config) { + c.PublicEndpointFn = fn + }) +} + +// WithPropagators configures specific propagators. If this +// option isn't specified, then the global TextMapPropagator is used. +func WithPropagators(ps propagation.TextMapPropagator) Option { + return optionFunc(func(c *config) { + if ps != nil { + c.Propagators = ps + } + }) +} + +// WithSpanOptions configures an additional set of +// trace.SpanOptions, which are applied to each new span. +func WithSpanOptions(opts ...trace.SpanStartOption) Option { + return optionFunc(func(c *config) { + c.SpanStartOptions = append(c.SpanStartOptions, opts...) + }) +} + +// WithFilter adds a filter to the list of filters used by the handler. +// If any filter indicates to exclude a request then the request will not be +// traced. All filters must allow a request to be traced for a Span to be created. +// If no filters are provided then all requests are traced. +// Filters will be invoked for each processed request, it is advised to make them +// simple and fast. +func WithFilter(f Filter) Option { + return optionFunc(func(c *config) { + c.Filters = append(c.Filters, f) + }) +} + +type event int + +// Different types of events that can be recorded, see WithMessageEvents. +const ( + ReadEvents event = iota + WriteEvents +) + +// WithMessageEvents configures the Handler to record the specified events +// (span.AddEvent) on spans. By default only summary attributes are added at the +// end of the request. +// +// Valid events are: +// - ReadEvents: Record the number of bytes read after every http.Request.Body.Read +// using the ReadBytesKey +// - WriteEvents: Record the number of bytes written after every http.ResponeWriter.Write +// using the WriteBytesKey +func WithMessageEvents(events ...event) Option { + return optionFunc(func(c *config) { + for _, e := range events { + switch e { + case ReadEvents: + c.ReadEvent = true + case WriteEvents: + c.WriteEvent = true + } + } + }) +} + +// WithSpanNameFormatter takes a function that will be called on every +// request and the returned string will become the Span Name. +func WithSpanNameFormatter(f func(operation string, r *http.Request) string) Option { + return optionFunc(func(c *config) { + c.SpanNameFormatter = f + }) +} + +// WithClientTrace takes a function that returns client trace instance that will be +// applied to the requests sent through the otelhttp Transport. +func WithClientTrace(f func(context.Context) *httptrace.ClientTrace) Option { + return optionFunc(func(c *config) { + c.ClientTrace = f + }) +} + +// WithServerName returns an Option that sets the name of the (virtual) server +// handling requests. +func WithServerName(server string) Option { + return optionFunc(func(c *config) { + c.ServerName = server + }) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go new file mode 100644 index 00000000..38c7f01c --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go @@ -0,0 +1,18 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package otelhttp provides an http.Handler and functions that are intended +// to be used to add tracing by wrapping existing handlers (with Handler) and +// routes WithRouteTag. +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go new file mode 100644 index 00000000..b2fbe078 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -0,0 +1,275 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "io" + "net/http" + "time" + + "github.com/felixge/httpsnoop" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/propagation" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + "go.opentelemetry.io/otel/trace" +) + +// middleware is an http middleware which wraps the next handler in a span. +type middleware struct { + operation string + server string + + tracer trace.Tracer + meter metric.Meter + propagators propagation.TextMapPropagator + spanStartOptions []trace.SpanStartOption + readEvent bool + writeEvent bool + filters []Filter + spanNameFormatter func(string, *http.Request) string + counters map[string]metric.Int64Counter + valueRecorders map[string]metric.Float64Histogram + publicEndpoint bool + publicEndpointFn func(*http.Request) bool +} + +func defaultHandlerFormatter(operation string, _ *http.Request) string { + return operation +} + +// NewHandler wraps the passed handler in a span named after the operation and +// enriches it with metrics. +func NewHandler(handler http.Handler, operation string, opts ...Option) http.Handler { + return NewMiddleware(operation, opts...)(handler) +} + +// NewMiddleware returns a tracing and metrics instrumentation middleware. +// The handler returned by the middleware wraps a handler +// in a span named after the operation and enriches it with metrics. +func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Handler { + h := middleware{ + operation: operation, + } + + defaultOpts := []Option{ + WithSpanOptions(trace.WithSpanKind(trace.SpanKindServer)), + WithSpanNameFormatter(defaultHandlerFormatter), + } + + c := newConfig(append(defaultOpts, opts...)...) + h.configure(c) + h.createMeasures() + + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + h.serveHTTP(w, r, next) + }) + } +} + +func (h *middleware) configure(c *config) { + h.tracer = c.Tracer + h.meter = c.Meter + h.propagators = c.Propagators + h.spanStartOptions = c.SpanStartOptions + h.readEvent = c.ReadEvent + h.writeEvent = c.WriteEvent + h.filters = c.Filters + h.spanNameFormatter = c.SpanNameFormatter + h.publicEndpoint = c.PublicEndpoint + h.publicEndpointFn = c.PublicEndpointFn + h.server = c.ServerName +} + +func handleErr(err error) { + if err != nil { + otel.Handle(err) + } +} + +func (h *middleware) createMeasures() { + h.counters = make(map[string]metric.Int64Counter) + h.valueRecorders = make(map[string]metric.Float64Histogram) + + requestBytesCounter, err := h.meter.Int64Counter(RequestContentLength) + handleErr(err) + + responseBytesCounter, err := h.meter.Int64Counter(ResponseContentLength) + handleErr(err) + + serverLatencyMeasure, err := h.meter.Float64Histogram(ServerLatency) + handleErr(err) + + h.counters[RequestContentLength] = requestBytesCounter + h.counters[ResponseContentLength] = responseBytesCounter + h.valueRecorders[ServerLatency] = serverLatencyMeasure +} + +// serveHTTP sets up tracing and calls the given next http.Handler with the span +// context injected into the request context. +func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http.Handler) { + requestStartTime := time.Now() + for _, f := range h.filters { + if !f(r) { + // Simply pass through to the handler if a filter rejects the request + next.ServeHTTP(w, r) + return + } + } + + ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header)) + opts := []trace.SpanStartOption{ + trace.WithAttributes(semconvutil.HTTPServerRequest(h.server, r)...), + } + if h.server != "" { + hostAttr := semconv.NetHostName(h.server) + opts = append(opts, trace.WithAttributes(hostAttr)) + } + opts = append(opts, h.spanStartOptions...) + if h.publicEndpoint || (h.publicEndpointFn != nil && h.publicEndpointFn(r.WithContext(ctx))) { + opts = append(opts, trace.WithNewRoot()) + // Linking incoming span context if any for public endpoint. + if s := trace.SpanContextFromContext(ctx); s.IsValid() && s.IsRemote() { + opts = append(opts, trace.WithLinks(trace.Link{SpanContext: s})) + } + } + + tracer := h.tracer + + if tracer == nil { + if span := trace.SpanFromContext(r.Context()); span.SpanContext().IsValid() { + tracer = newTracer(span.TracerProvider()) + } else { + tracer = newTracer(otel.GetTracerProvider()) + } + } + + ctx, span := tracer.Start(ctx, h.spanNameFormatter(h.operation, r), opts...) + defer span.End() + + readRecordFunc := func(int64) {} + if h.readEvent { + readRecordFunc = func(n int64) { + span.AddEvent("read", trace.WithAttributes(ReadBytesKey.Int64(n))) + } + } + + var bw bodyWrapper + // if request body is nil or NoBody, we don't want to mutate the body as it + // will affect the identity of it in an unforeseeable way because we assert + // ReadCloser fulfills a certain interface and it is indeed nil or NoBody. + if r.Body != nil && r.Body != http.NoBody { + bw.ReadCloser = r.Body + bw.record = readRecordFunc + r.Body = &bw + } + + writeRecordFunc := func(int64) {} + if h.writeEvent { + writeRecordFunc = func(n int64) { + span.AddEvent("write", trace.WithAttributes(WroteBytesKey.Int64(n))) + } + } + + rww := &respWriterWrapper{ + ResponseWriter: w, + record: writeRecordFunc, + ctx: ctx, + props: h.propagators, + statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything + } + + // Wrap w to use our ResponseWriter methods while also exposing + // other interfaces that w may implement (http.CloseNotifier, + // http.Flusher, http.Hijacker, http.Pusher, io.ReaderFrom). + + w = httpsnoop.Wrap(w, httpsnoop.Hooks{ + Header: func(httpsnoop.HeaderFunc) httpsnoop.HeaderFunc { + return rww.Header + }, + Write: func(httpsnoop.WriteFunc) httpsnoop.WriteFunc { + return rww.Write + }, + WriteHeader: func(httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc { + return rww.WriteHeader + }, + }) + + labeler := &Labeler{} + ctx = injectLabeler(ctx, labeler) + + next.ServeHTTP(w, r.WithContext(ctx)) + + setAfterServeAttributes(span, bw.read, rww.written, rww.statusCode, bw.err, rww.err) + + // Add metrics + attributes := append(labeler.Get(), semconvutil.HTTPServerRequestMetrics(h.server, r)...) + if rww.statusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(rww.statusCode)) + } + o := metric.WithAttributes(attributes...) + h.counters[RequestContentLength].Add(ctx, bw.read, o) + h.counters[ResponseContentLength].Add(ctx, rww.written, o) + + // Use floating point division here for higher precision (instead of Millisecond method). + elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) + + h.valueRecorders[ServerLatency].Record(ctx, elapsedTime, o) +} + +func setAfterServeAttributes(span trace.Span, read, wrote int64, statusCode int, rerr, werr error) { + attributes := []attribute.KeyValue{} + + // TODO: Consider adding an event after each read and write, possibly as an + // option (defaulting to off), so as to not create needlessly verbose spans. + if read > 0 { + attributes = append(attributes, ReadBytesKey.Int64(read)) + } + if rerr != nil && rerr != io.EOF { + attributes = append(attributes, ReadErrorKey.String(rerr.Error())) + } + if wrote > 0 { + attributes = append(attributes, WroteBytesKey.Int64(wrote)) + } + if statusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) + } + span.SetStatus(semconvutil.HTTPServerStatus(statusCode)) + + if werr != nil && werr != io.EOF { + attributes = append(attributes, WriteErrorKey.String(werr.Error())) + } + span.SetAttributes(attributes...) +} + +// WithRouteTag annotates spans and metrics with the provided route name +// with HTTP route attribute. +func WithRouteTag(route string, h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + attr := semconv.HTTPRouteKey.String(route) + + span := trace.SpanFromContext(r.Context()) + span.SetAttributes(attr) + + labeler, _ := LabelerFromContext(r.Context()) + labeler.Add(attr) + + h.ServeHTTP(w, r) + }) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go new file mode 100644 index 00000000..edf4ce3d --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go @@ -0,0 +1,21 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + +// Generate semconvutil package: +//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv_test.go.tmpl "--data={}" --out=httpconv_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv.go.tmpl "--data={}" --out=httpconv.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv_test.go.tmpl "--data={}" --out=netconv_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv.go.tmpl "--data={}" --out=netconv.go diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go new file mode 100644 index 00000000..d3dede9e --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go @@ -0,0 +1,552 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconvutil/httpconv.go.tmpl + +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + +import ( + "fmt" + "net/http" + "strings" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" +) + +// HTTPClientResponse returns trace attributes for an HTTP response received by a +// client from a server. It will return the following attributes if the related +// values are defined in resp: "http.status.code", +// "http.response_content_length". +// +// This does not add all OpenTelemetry required attributes for an HTTP event, +// it assumes ClientRequest was used to create the span with a complete set of +// attributes. If a complete set of attributes can be generated using the +// request contained in resp. For example: +// +// append(HTTPClientResponse(resp), ClientRequest(resp.Request)...) +func HTTPClientResponse(resp *http.Response) []attribute.KeyValue { + return hc.ClientResponse(resp) +} + +// HTTPClientRequest returns trace attributes for an HTTP request made by a client. +// The following attributes are always returned: "http.url", "http.flavor", +// "http.method", "net.peer.name". The following attributes are returned if the +// related values are defined in req: "net.peer.port", "http.user_agent", +// "http.request_content_length", "enduser.id". +func HTTPClientRequest(req *http.Request) []attribute.KeyValue { + return hc.ClientRequest(req) +} + +// HTTPClientStatus returns a span status code and message for an HTTP status code +// value received by a client. +func HTTPClientStatus(code int) (codes.Code, string) { + return hc.ClientStatus(code) +} + +// HTTPServerRequest returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "http.flavor", "http.target", "net.host.name". The following attributes are +// returned if they related values are defined in req: "net.host.port", +// "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id", +// "http.client_ip". +func HTTPServerRequest(server string, req *http.Request) []attribute.KeyValue { + return hc.ServerRequest(server, req) +} + +// HTTPServerRequestMetrics returns metric attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "http.flavor", "net.host.name". The following attributes are +// returned if they related values are defined in req: "net.host.port". +func HTTPServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue { + return hc.ServerRequestMetrics(server, req) +} + +// HTTPServerStatus returns a span status code and message for an HTTP status code +// value returned by a server. Status codes in the 400-499 range are not +// returned as errors. +func HTTPServerStatus(code int) (codes.Code, string) { + return hc.ServerStatus(code) +} + +// HTTPRequestHeader returns the contents of h as attributes. +// +// Instrumentation should require an explicit configuration of which headers to +// captured and then prune what they pass here. Including all headers can be a +// security risk - explicit configuration helps avoid leaking sensitive +// information. +// +// The User-Agent header is already captured in the http.user_agent attribute +// from ClientRequest and ServerRequest. Instrumentation may provide an option +// to capture that header here even though it is not recommended. Otherwise, +// instrumentation should filter that out of what is passed. +func HTTPRequestHeader(h http.Header) []attribute.KeyValue { + return hc.RequestHeader(h) +} + +// HTTPResponseHeader returns the contents of h as attributes. +// +// Instrumentation should require an explicit configuration of which headers to +// captured and then prune what they pass here. Including all headers can be a +// security risk - explicit configuration helps avoid leaking sensitive +// information. +// +// The User-Agent header is already captured in the http.user_agent attribute +// from ClientRequest and ServerRequest. Instrumentation may provide an option +// to capture that header here even though it is not recommended. Otherwise, +// instrumentation should filter that out of what is passed. +func HTTPResponseHeader(h http.Header) []attribute.KeyValue { + return hc.ResponseHeader(h) +} + +// httpConv are the HTTP semantic convention attributes defined for a version +// of the OpenTelemetry specification. +type httpConv struct { + NetConv *netConv + + EnduserIDKey attribute.Key + HTTPClientIPKey attribute.Key + HTTPFlavorKey attribute.Key + HTTPMethodKey attribute.Key + HTTPRequestContentLengthKey attribute.Key + HTTPResponseContentLengthKey attribute.Key + HTTPRouteKey attribute.Key + HTTPSchemeHTTP attribute.KeyValue + HTTPSchemeHTTPS attribute.KeyValue + HTTPStatusCodeKey attribute.Key + HTTPTargetKey attribute.Key + HTTPURLKey attribute.Key + HTTPUserAgentKey attribute.Key +} + +var hc = &httpConv{ + NetConv: nc, + + EnduserIDKey: semconv.EnduserIDKey, + HTTPClientIPKey: semconv.HTTPClientIPKey, + HTTPFlavorKey: semconv.HTTPFlavorKey, + HTTPMethodKey: semconv.HTTPMethodKey, + HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey, + HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey, + HTTPRouteKey: semconv.HTTPRouteKey, + HTTPSchemeHTTP: semconv.HTTPSchemeHTTP, + HTTPSchemeHTTPS: semconv.HTTPSchemeHTTPS, + HTTPStatusCodeKey: semconv.HTTPStatusCodeKey, + HTTPTargetKey: semconv.HTTPTargetKey, + HTTPURLKey: semconv.HTTPURLKey, + HTTPUserAgentKey: semconv.HTTPUserAgentKey, +} + +// ClientResponse returns attributes for an HTTP response received by a client +// from a server. The following attributes are returned if the related values +// are defined in resp: "http.status.code", "http.response_content_length". +// +// This does not add all OpenTelemetry required attributes for an HTTP event, +// it assumes ClientRequest was used to create the span with a complete set of +// attributes. If a complete set of attributes can be generated using the +// request contained in resp. For example: +// +// append(ClientResponse(resp), ClientRequest(resp.Request)...) +func (c *httpConv) ClientResponse(resp *http.Response) []attribute.KeyValue { + var n int + if resp.StatusCode > 0 { + n++ + } + if resp.ContentLength > 0 { + n++ + } + + attrs := make([]attribute.KeyValue, 0, n) + if resp.StatusCode > 0 { + attrs = append(attrs, c.HTTPStatusCodeKey.Int(resp.StatusCode)) + } + if resp.ContentLength > 0 { + attrs = append(attrs, c.HTTPResponseContentLengthKey.Int(int(resp.ContentLength))) + } + return attrs +} + +// ClientRequest returns attributes for an HTTP request made by a client. The +// following attributes are always returned: "http.url", "http.flavor", +// "http.method", "net.peer.name". The following attributes are returned if the +// related values are defined in req: "net.peer.port", "http.user_agent", +// "http.request_content_length", "enduser.id". +func (c *httpConv) ClientRequest(req *http.Request) []attribute.KeyValue { + n := 3 // URL, peer name, proto, and method. + var h string + if req.URL != nil { + h = req.URL.Host + } + peer, p := firstHostPort(h, req.Header.Get("Host")) + port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p) + if port > 0 { + n++ + } + useragent := req.UserAgent() + if useragent != "" { + n++ + } + if req.ContentLength > 0 { + n++ + } + userID, _, hasUserID := req.BasicAuth() + if hasUserID { + n++ + } + attrs := make([]attribute.KeyValue, 0, n) + + attrs = append(attrs, c.method(req.Method)) + attrs = append(attrs, c.flavor(req.Proto)) + + var u string + if req.URL != nil { + // Remove any username/password info that may be in the URL. + userinfo := req.URL.User + req.URL.User = nil + u = req.URL.String() + // Restore any username/password info that was removed. + req.URL.User = userinfo + } + attrs = append(attrs, c.HTTPURLKey.String(u)) + + attrs = append(attrs, c.NetConv.PeerName(peer)) + if port > 0 { + attrs = append(attrs, c.NetConv.PeerPort(port)) + } + + if useragent != "" { + attrs = append(attrs, c.HTTPUserAgentKey.String(useragent)) + } + + if l := req.ContentLength; l > 0 { + attrs = append(attrs, c.HTTPRequestContentLengthKey.Int64(l)) + } + + if hasUserID { + attrs = append(attrs, c.EnduserIDKey.String(userID)) + } + + return attrs +} + +// ServerRequest returns attributes for an HTTP request received by a server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "http.flavor", "http.target", "net.host.name". The following attributes are +// returned if they related values are defined in req: "net.host.port", +// "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id", +// "http.client_ip". +func (c *httpConv) ServerRequest(server string, req *http.Request) []attribute.KeyValue { + // TODO: This currently does not add the specification required + // `http.target` attribute. It has too high of a cardinality to safely be + // added. An alternate should be added, or this comment removed, when it is + // addressed by the specification. If it is ultimately decided to continue + // not including the attribute, the HTTPTargetKey field of the httpConv + // should be removed as well. + + n := 4 // Method, scheme, proto, and host name. + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + n++ + } + peer, peerPort := splitHostPort(req.RemoteAddr) + if peer != "" { + n++ + if peerPort > 0 { + n++ + } + } + useragent := req.UserAgent() + if useragent != "" { + n++ + } + userID, _, hasUserID := req.BasicAuth() + if hasUserID { + n++ + } + clientIP := serverClientIP(req.Header.Get("X-Forwarded-For")) + if clientIP != "" { + n++ + } + attrs := make([]attribute.KeyValue, 0, n) + + attrs = append(attrs, c.method(req.Method)) + attrs = append(attrs, c.scheme(req.TLS != nil)) + attrs = append(attrs, c.flavor(req.Proto)) + attrs = append(attrs, c.NetConv.HostName(host)) + + if hostPort > 0 { + attrs = append(attrs, c.NetConv.HostPort(hostPort)) + } + + if peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + attrs = append(attrs, c.NetConv.SockPeerAddr(peer)) + if peerPort > 0 { + attrs = append(attrs, c.NetConv.SockPeerPort(peerPort)) + } + } + + if useragent != "" { + attrs = append(attrs, c.HTTPUserAgentKey.String(useragent)) + } + + if hasUserID { + attrs = append(attrs, c.EnduserIDKey.String(userID)) + } + + if clientIP != "" { + attrs = append(attrs, c.HTTPClientIPKey.String(clientIP)) + } + + return attrs +} + +// ServerRequestMetrics returns metric attributes for an HTTP request received +// by a server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "http.flavor", "net.host.name". The following attributes are +// returned if they related values are defined in req: "net.host.port". +func (c *httpConv) ServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue { + // TODO: This currently does not add the specification required + // `http.target` attribute. It has too high of a cardinality to safely be + // added. An alternate should be added, or this comment removed, when it is + // addressed by the specification. If it is ultimately decided to continue + // not including the attribute, the HTTPTargetKey field of the httpConv + // should be removed as well. + + n := 4 // Method, scheme, proto, and host name. + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + n++ + } + attrs := make([]attribute.KeyValue, 0, n) + + attrs = append(attrs, c.methodMetric(req.Method)) + attrs = append(attrs, c.scheme(req.TLS != nil)) + attrs = append(attrs, c.flavor(req.Proto)) + attrs = append(attrs, c.NetConv.HostName(host)) + + if hostPort > 0 { + attrs = append(attrs, c.NetConv.HostPort(hostPort)) + } + + return attrs +} + +func (c *httpConv) method(method string) attribute.KeyValue { + if method == "" { + return c.HTTPMethodKey.String(http.MethodGet) + } + return c.HTTPMethodKey.String(method) +} + +func (c *httpConv) methodMetric(method string) attribute.KeyValue { + method = strings.ToUpper(method) + switch method { + case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: + default: + method = "_OTHER" + } + return c.HTTPMethodKey.String(method) +} + +func (c *httpConv) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return c.HTTPSchemeHTTPS + } + return c.HTTPSchemeHTTP +} + +func (c *httpConv) flavor(proto string) attribute.KeyValue { + switch proto { + case "HTTP/1.0": + return c.HTTPFlavorKey.String("1.0") + case "HTTP/1.1": + return c.HTTPFlavorKey.String("1.1") + case "HTTP/2": + return c.HTTPFlavorKey.String("2.0") + case "HTTP/3": + return c.HTTPFlavorKey.String("3.0") + default: + return c.HTTPFlavorKey.String(proto) + } +} + +func serverClientIP(xForwardedFor string) string { + if idx := strings.Index(xForwardedFor, ","); idx >= 0 { + xForwardedFor = xForwardedFor[:idx] + } + return xForwardedFor +} + +func requiredHTTPPort(https bool, port int) int { // nolint:revive + if https { + if port > 0 && port != 443 { + return port + } + } else { + if port > 0 && port != 80 { + return port + } + } + return -1 +} + +// Return the request host and port from the first non-empty source. +func firstHostPort(source ...string) (host string, port int) { + for _, hostport := range source { + host, port = splitHostPort(hostport) + if host != "" || port > 0 { + break + } + } + return +} + +// RequestHeader returns the contents of h as OpenTelemetry attributes. +func (c *httpConv) RequestHeader(h http.Header) []attribute.KeyValue { + return c.header("http.request.header", h) +} + +// ResponseHeader returns the contents of h as OpenTelemetry attributes. +func (c *httpConv) ResponseHeader(h http.Header) []attribute.KeyValue { + return c.header("http.response.header", h) +} + +func (c *httpConv) header(prefix string, h http.Header) []attribute.KeyValue { + key := func(k string) attribute.Key { + k = strings.ToLower(k) + k = strings.ReplaceAll(k, "-", "_") + k = fmt.Sprintf("%s.%s", prefix, k) + return attribute.Key(k) + } + + attrs := make([]attribute.KeyValue, 0, len(h)) + for k, v := range h { + attrs = append(attrs, key(k).StringSlice(v)) + } + return attrs +} + +// ClientStatus returns a span status code and message for an HTTP status code +// value received by a client. +func (c *httpConv) ClientStatus(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 400 { + return codes.Error, "" + } + return codes.Unset, "" +} + +// ServerStatus returns a span status code and message for an HTTP status code +// value returned by a server. Status codes in the 400-499 range are not +// returned as errors. +func (c *httpConv) ServerStatus(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 500 { + return codes.Error, "" + } + return codes.Unset, "" +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go new file mode 100644 index 00000000..bde88934 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go @@ -0,0 +1,368 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconvutil/netconv.go.tmpl + +// Copyright The OpenTelemetry Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + +import ( + "net" + "strconv" + "strings" + + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" +) + +// NetTransport returns a trace attribute describing the transport protocol of the +// passed network. See the net.Dial for information about acceptable network +// values. +func NetTransport(network string) attribute.KeyValue { + return nc.Transport(network) +} + +// NetClient returns trace attributes for a client network connection to address. +// See net.Dial for information about acceptable address values, address should +// be the same as the one used to create conn. If conn is nil, only network +// peer attributes will be returned that describe address. Otherwise, the +// socket level information about conn will also be included. +func NetClient(address string, conn net.Conn) []attribute.KeyValue { + return nc.Client(address, conn) +} + +// NetServer returns trace attributes for a network listener listening at address. +// See net.Listen for information about acceptable address values, address +// should be the same as the one used to create ln. If ln is nil, only network +// host attributes will be returned that describe address. Otherwise, the +// socket level information about ln will also be included. +func NetServer(address string, ln net.Listener) []attribute.KeyValue { + return nc.Server(address, ln) +} + +// netConv are the network semantic convention attributes defined for a version +// of the OpenTelemetry specification. +type netConv struct { + NetHostNameKey attribute.Key + NetHostPortKey attribute.Key + NetPeerNameKey attribute.Key + NetPeerPortKey attribute.Key + NetSockFamilyKey attribute.Key + NetSockPeerAddrKey attribute.Key + NetSockPeerPortKey attribute.Key + NetSockHostAddrKey attribute.Key + NetSockHostPortKey attribute.Key + NetTransportOther attribute.KeyValue + NetTransportTCP attribute.KeyValue + NetTransportUDP attribute.KeyValue + NetTransportInProc attribute.KeyValue +} + +var nc = &netConv{ + NetHostNameKey: semconv.NetHostNameKey, + NetHostPortKey: semconv.NetHostPortKey, + NetPeerNameKey: semconv.NetPeerNameKey, + NetPeerPortKey: semconv.NetPeerPortKey, + NetSockFamilyKey: semconv.NetSockFamilyKey, + NetSockPeerAddrKey: semconv.NetSockPeerAddrKey, + NetSockPeerPortKey: semconv.NetSockPeerPortKey, + NetSockHostAddrKey: semconv.NetSockHostAddrKey, + NetSockHostPortKey: semconv.NetSockHostPortKey, + NetTransportOther: semconv.NetTransportOther, + NetTransportTCP: semconv.NetTransportTCP, + NetTransportUDP: semconv.NetTransportUDP, + NetTransportInProc: semconv.NetTransportInProc, +} + +func (c *netConv) Transport(network string) attribute.KeyValue { + switch network { + case "tcp", "tcp4", "tcp6": + return c.NetTransportTCP + case "udp", "udp4", "udp6": + return c.NetTransportUDP + case "unix", "unixgram", "unixpacket": + return c.NetTransportInProc + default: + // "ip:*", "ip4:*", and "ip6:*" all are considered other. + return c.NetTransportOther + } +} + +// Host returns attributes for a network host address. +func (c *netConv) Host(address string) []attribute.KeyValue { + h, p := splitHostPort(address) + var n int + if h != "" { + n++ + if p > 0 { + n++ + } + } + + if n == 0 { + return nil + } + + attrs := make([]attribute.KeyValue, 0, n) + attrs = append(attrs, c.HostName(h)) + if p > 0 { + attrs = append(attrs, c.HostPort(int(p))) + } + return attrs +} + +// Server returns attributes for a network listener listening at address. See +// net.Listen for information about acceptable address values, address should +// be the same as the one used to create ln. If ln is nil, only network host +// attributes will be returned that describe address. Otherwise, the socket +// level information about ln will also be included. +func (c *netConv) Server(address string, ln net.Listener) []attribute.KeyValue { + if ln == nil { + return c.Host(address) + } + + lAddr := ln.Addr() + if lAddr == nil { + return c.Host(address) + } + + hostName, hostPort := splitHostPort(address) + sockHostAddr, sockHostPort := splitHostPort(lAddr.String()) + network := lAddr.Network() + sockFamily := family(network, sockHostAddr) + + n := nonZeroStr(hostName, network, sockHostAddr, sockFamily) + n += positiveInt(hostPort, sockHostPort) + attr := make([]attribute.KeyValue, 0, n) + if hostName != "" { + attr = append(attr, c.HostName(hostName)) + if hostPort > 0 { + // Only if net.host.name is set should net.host.port be. + attr = append(attr, c.HostPort(hostPort)) + } + } + if network != "" { + attr = append(attr, c.Transport(network)) + } + if sockFamily != "" { + attr = append(attr, c.NetSockFamilyKey.String(sockFamily)) + } + if sockHostAddr != "" { + attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr)) + if sockHostPort > 0 { + // Only if net.sock.host.addr is set should net.sock.host.port be. + attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort)) + } + } + return attr +} + +func (c *netConv) HostName(name string) attribute.KeyValue { + return c.NetHostNameKey.String(name) +} + +func (c *netConv) HostPort(port int) attribute.KeyValue { + return c.NetHostPortKey.Int(port) +} + +// Client returns attributes for a client network connection to address. See +// net.Dial for information about acceptable address values, address should be +// the same as the one used to create conn. If conn is nil, only network peer +// attributes will be returned that describe address. Otherwise, the socket +// level information about conn will also be included. +func (c *netConv) Client(address string, conn net.Conn) []attribute.KeyValue { + if conn == nil { + return c.Peer(address) + } + + lAddr, rAddr := conn.LocalAddr(), conn.RemoteAddr() + + var network string + switch { + case lAddr != nil: + network = lAddr.Network() + case rAddr != nil: + network = rAddr.Network() + default: + return c.Peer(address) + } + + peerName, peerPort := splitHostPort(address) + var ( + sockFamily string + sockPeerAddr string + sockPeerPort int + sockHostAddr string + sockHostPort int + ) + + if lAddr != nil { + sockHostAddr, sockHostPort = splitHostPort(lAddr.String()) + } + + if rAddr != nil { + sockPeerAddr, sockPeerPort = splitHostPort(rAddr.String()) + } + + switch { + case sockHostAddr != "": + sockFamily = family(network, sockHostAddr) + case sockPeerAddr != "": + sockFamily = family(network, sockPeerAddr) + } + + n := nonZeroStr(peerName, network, sockPeerAddr, sockHostAddr, sockFamily) + n += positiveInt(peerPort, sockPeerPort, sockHostPort) + attr := make([]attribute.KeyValue, 0, n) + if peerName != "" { + attr = append(attr, c.PeerName(peerName)) + if peerPort > 0 { + // Only if net.peer.name is set should net.peer.port be. + attr = append(attr, c.PeerPort(peerPort)) + } + } + if network != "" { + attr = append(attr, c.Transport(network)) + } + if sockFamily != "" { + attr = append(attr, c.NetSockFamilyKey.String(sockFamily)) + } + if sockPeerAddr != "" { + attr = append(attr, c.NetSockPeerAddrKey.String(sockPeerAddr)) + if sockPeerPort > 0 { + // Only if net.sock.peer.addr is set should net.sock.peer.port be. + attr = append(attr, c.NetSockPeerPortKey.Int(sockPeerPort)) + } + } + if sockHostAddr != "" { + attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr)) + if sockHostPort > 0 { + // Only if net.sock.host.addr is set should net.sock.host.port be. + attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort)) + } + } + return attr +} + +func family(network, address string) string { + switch network { + case "unix", "unixgram", "unixpacket": + return "unix" + default: + if ip := net.ParseIP(address); ip != nil { + if ip.To4() == nil { + return "inet6" + } + return "inet" + } + } + return "" +} + +func nonZeroStr(strs ...string) int { + var n int + for _, str := range strs { + if str != "" { + n++ + } + } + return n +} + +func positiveInt(ints ...int) int { + var n int + for _, i := range ints { + if i > 0 { + n++ + } + } + return n +} + +// Peer returns attributes for a network peer address. +func (c *netConv) Peer(address string) []attribute.KeyValue { + h, p := splitHostPort(address) + var n int + if h != "" { + n++ + if p > 0 { + n++ + } + } + + if n == 0 { + return nil + } + + attrs := make([]attribute.KeyValue, 0, n) + attrs = append(attrs, c.PeerName(h)) + if p > 0 { + attrs = append(attrs, c.PeerPort(int(p))) + } + return attrs +} + +func (c *netConv) PeerName(name string) attribute.KeyValue { + return c.NetPeerNameKey.String(name) +} + +func (c *netConv) PeerPort(port int) attribute.KeyValue { + return c.NetPeerPortKey.Int(port) +} + +func (c *netConv) SockPeerAddr(addr string) attribute.KeyValue { + return c.NetSockPeerAddrKey.String(addr) +} + +func (c *netConv) SockPeerPort(port int) attribute.KeyValue { + return c.NetSockPeerPortKey.Int(port) +} + +// splitHostPort splits a network address hostport of the form "host", +// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port", +// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and +// port. +// +// An empty host is returned if it is not provided or unparsable. A negative +// port is returned if it is not provided or unparsable. +func splitHostPort(hostport string) (host string, port int) { + port = -1 + + if strings.HasPrefix(hostport, "[") { + addrEnd := strings.LastIndex(hostport, "]") + if addrEnd < 0 { + // Invalid hostport. + return + } + if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 { + host = hostport[1:addrEnd] + return + } + } else { + if i := strings.LastIndex(hostport, ":"); i < 0 { + host = hostport + return + } + } + + host, pStr, err := net.SplitHostPort(hostport) + if err != nil { + return + } + + p, err := strconv.ParseUint(pStr, 10, 16) + if err != nil { + return + } + return host, int(p) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go new file mode 100644 index 00000000..26a51a18 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go @@ -0,0 +1,65 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" +) + +// Labeler is used to allow instrumented HTTP handlers to add custom attributes to +// the metrics recorded by the net/http instrumentation. +type Labeler struct { + mu sync.Mutex + attributes []attribute.KeyValue +} + +// Add attributes to a Labeler. +func (l *Labeler) Add(ls ...attribute.KeyValue) { + l.mu.Lock() + defer l.mu.Unlock() + l.attributes = append(l.attributes, ls...) +} + +// Get returns a copy of the attributes added to the Labeler. +func (l *Labeler) Get() []attribute.KeyValue { + l.mu.Lock() + defer l.mu.Unlock() + ret := make([]attribute.KeyValue, len(l.attributes)) + copy(ret, l.attributes) + return ret +} + +type labelerContextKeyType int + +const lablelerContextKey labelerContextKeyType = 0 + +func injectLabeler(ctx context.Context, l *Labeler) context.Context { + return context.WithValue(ctx, lablelerContextKey, l) +} + +// LabelerFromContext retrieves a Labeler instance from the provided context if +// one is available. If no Labeler was found in the provided context a new, empty +// Labeler is returned and the second return value is false. In this case it is +// safe to use the Labeler but any attributes added to it will not be used. +func LabelerFromContext(ctx context.Context) (*Labeler, bool) { + l, ok := ctx.Value(lablelerContextKey).(*Labeler) + if !ok { + l = &Labeler{} + } + return l, ok +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go new file mode 100644 index 00000000..e835cac1 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -0,0 +1,193 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "io" + "net/http" + "net/http/httptrace" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" +) + +// Transport implements the http.RoundTripper interface and wraps +// outbound HTTP(S) requests with a span. +type Transport struct { + rt http.RoundTripper + + tracer trace.Tracer + propagators propagation.TextMapPropagator + spanStartOptions []trace.SpanStartOption + filters []Filter + spanNameFormatter func(string, *http.Request) string + clientTrace func(context.Context) *httptrace.ClientTrace +} + +var _ http.RoundTripper = &Transport{} + +// NewTransport wraps the provided http.RoundTripper with one that +// starts a span and injects the span context into the outbound request headers. +// +// If the provided http.RoundTripper is nil, http.DefaultTransport will be used +// as the base http.RoundTripper. +func NewTransport(base http.RoundTripper, opts ...Option) *Transport { + if base == nil { + base = http.DefaultTransport + } + + t := Transport{ + rt: base, + } + + defaultOpts := []Option{ + WithSpanOptions(trace.WithSpanKind(trace.SpanKindClient)), + WithSpanNameFormatter(defaultTransportFormatter), + } + + c := newConfig(append(defaultOpts, opts...)...) + t.applyConfig(c) + + return &t +} + +func (t *Transport) applyConfig(c *config) { + t.tracer = c.Tracer + t.propagators = c.Propagators + t.spanStartOptions = c.SpanStartOptions + t.filters = c.Filters + t.spanNameFormatter = c.SpanNameFormatter + t.clientTrace = c.ClientTrace +} + +func defaultTransportFormatter(_ string, r *http.Request) string { + return "HTTP " + r.Method +} + +// RoundTrip creates a Span and propagates its context via the provided request's headers +// before handing the request to the configured base RoundTripper. The created span will +// end when the response body is closed or when a read from the body returns io.EOF. +func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { + for _, f := range t.filters { + if !f(r) { + // Simply pass through to the base RoundTripper if a filter rejects the request + return t.rt.RoundTrip(r) + } + } + + tracer := t.tracer + + if tracer == nil { + if span := trace.SpanFromContext(r.Context()); span.SpanContext().IsValid() { + tracer = newTracer(span.TracerProvider()) + } else { + tracer = newTracer(otel.GetTracerProvider()) + } + } + + opts := append([]trace.SpanStartOption{}, t.spanStartOptions...) // start with the configured options + + ctx, span := tracer.Start(r.Context(), t.spanNameFormatter("", r), opts...) + + if t.clientTrace != nil { + ctx = httptrace.WithClientTrace(ctx, t.clientTrace(ctx)) + } + + r = r.Clone(ctx) // According to RoundTripper spec, we shouldn't modify the origin request. + span.SetAttributes(semconvutil.HTTPClientRequest(r)...) + t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header)) + + res, err := t.rt.RoundTrip(r) + if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + span.End() + return res, err + } + + span.SetAttributes(semconvutil.HTTPClientResponse(res)...) + span.SetStatus(semconvutil.HTTPClientStatus(res.StatusCode)) + res.Body = newWrappedBody(span, res.Body) + + return res, err +} + +// newWrappedBody returns a new and appropriately scoped *wrappedBody as an +// io.ReadCloser. If the passed body implements io.Writer, the returned value +// will implement io.ReadWriteCloser. +func newWrappedBody(span trace.Span, body io.ReadCloser) io.ReadCloser { + // The successful protocol switch responses will have a body that + // implement an io.ReadWriteCloser. Ensure this interface type continues + // to be satisfied if that is the case. + if _, ok := body.(io.ReadWriteCloser); ok { + return &wrappedBody{span: span, body: body} + } + + // Remove the implementation of the io.ReadWriteCloser and only implement + // the io.ReadCloser. + return struct{ io.ReadCloser }{&wrappedBody{span: span, body: body}} +} + +// wrappedBody is the response body type returned by the transport +// instrumentation to complete a span. Errors encountered when using the +// response body are recorded in span tracking the response. +// +// The span tracking the response is ended when this body is closed. +// +// If the response body implements the io.Writer interface (i.e. for +// successful protocol switches), the wrapped body also will. +type wrappedBody struct { + span trace.Span + body io.ReadCloser +} + +var _ io.ReadWriteCloser = &wrappedBody{} + +func (wb *wrappedBody) Write(p []byte) (int, error) { + // This will not panic given the guard in newWrappedBody. + n, err := wb.body.(io.Writer).Write(p) + if err != nil { + wb.span.RecordError(err) + wb.span.SetStatus(codes.Error, err.Error()) + } + return n, err +} + +func (wb *wrappedBody) Read(b []byte) (int, error) { + n, err := wb.body.Read(b) + + switch err { + case nil: + // nothing to do here but fall through to the return + case io.EOF: + wb.span.End() + default: + wb.span.RecordError(err) + wb.span.SetStatus(codes.Error, err.Error()) + } + return n, err +} + +func (wb *wrappedBody) Close() error { + wb.span.End() + if wb.body != nil { + return wb.body.Close() + } + return nil +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go new file mode 100644 index 00000000..6eace875 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -0,0 +1,28 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +// Version is the current release version of the otelhttp instrumentation. +func Version() string { + return "0.45.0" + // This string is updated by the pre_release.sh script during release +} + +// SemVersion is the semantic version to be supplied to tracer/meter creation. +// +// Deprecated: Use [Version] instead. +func SemVersion() string { + return Version() +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go new file mode 100644 index 00000000..11a35ed1 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go @@ -0,0 +1,99 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "io" + "net/http" + + "go.opentelemetry.io/otel/propagation" +) + +var _ io.ReadCloser = &bodyWrapper{} + +// bodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number +// of bytes read and the last error. +type bodyWrapper struct { + io.ReadCloser + record func(n int64) // must not be nil + + read int64 + err error +} + +func (w *bodyWrapper) Read(b []byte) (int, error) { + n, err := w.ReadCloser.Read(b) + n1 := int64(n) + w.read += n1 + w.err = err + w.record(n1) + return n, err +} + +func (w *bodyWrapper) Close() error { + return w.ReadCloser.Close() +} + +var _ http.ResponseWriter = &respWriterWrapper{} + +// respWriterWrapper wraps a http.ResponseWriter in order to track the number of +// bytes written, the last error, and to catch the first written statusCode. +// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional +// types (http.Hijacker, http.Pusher, http.CloseNotifier, http.Flusher, etc) +// that may be useful when using it in real life situations. +type respWriterWrapper struct { + http.ResponseWriter + record func(n int64) // must not be nil + + // used to inject the header + ctx context.Context + + props propagation.TextMapPropagator + + written int64 + statusCode int + err error + wroteHeader bool +} + +func (w *respWriterWrapper) Header() http.Header { + return w.ResponseWriter.Header() +} + +func (w *respWriterWrapper) Write(p []byte) (int, error) { + if !w.wroteHeader { + w.WriteHeader(http.StatusOK) + } + n, err := w.ResponseWriter.Write(p) + n1 := int64(n) + w.record(n1) + w.written += n1 + w.err = err + return n, err +} + +// WriteHeader persists initial statusCode for span attribution. +// All calls to WriteHeader will be propagated to the underlying ResponseWriter +// and will persist the statusCode from the first call. +// Blocking consecutive calls to WriteHeader alters expected behavior and will +// remove warning logs from net/http where developers will notice incorrect handler implementations. +func (w *respWriterWrapper) WriteHeader(statusCode int) { + if !w.wroteHeader { + w.wroteHeader = true + w.statusCode = statusCode + } + w.ResponseWriter.WriteHeader(statusCode) +} diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore new file mode 100644 index 00000000..ae6a3bcf --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.codespellignore @@ -0,0 +1,5 @@ +ot +fo +te +collison +consequentially diff --git a/vendor/go.opentelemetry.io/otel/.codespellrc b/vendor/go.opentelemetry.io/otel/.codespellrc new file mode 100644 index 00000000..4afbb1fb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.codespellrc @@ -0,0 +1,10 @@ +# https://github.com/codespell-project/codespell +[codespell] +builtin = clear,rare,informal +check-filenames = +check-hidden = +ignore-words = .codespellignore +interactive = 1 +skip = .git,go.mod,go.sum,semconv,venv,.tools +uri-ignore-words-list = * +write = diff --git a/vendor/go.opentelemetry.io/otel/.gitattributes b/vendor/go.opentelemetry.io/otel/.gitattributes new file mode 100644 index 00000000..314766e9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.gitattributes @@ -0,0 +1,3 @@ +* text=auto eol=lf +*.{cmd,[cC][mM][dD]} text eol=crlf +*.{bat,[bB][aA][tT]} text eol=crlf diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore new file mode 100644 index 00000000..f3355c85 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.gitignore @@ -0,0 +1,25 @@ +.DS_Store +Thumbs.db + +.tools/ +venv/ +.idea/ +.vscode/ +*.iml +*.so +coverage.* +go.work +go.work.sum + +gen/ + +/example/dice/dice +/example/fib/fib +/example/fib/traces.txt +/example/jaeger/jaeger +/example/namedtracer/namedtracer +/example/opencensus/opencensus +/example/passthrough/passthrough +/example/prometheus/prometheus +/example/zipkin/zipkin +/example/otel-collector/otel-collector diff --git a/vendor/go.opentelemetry.io/otel/.gitmodules b/vendor/go.opentelemetry.io/otel/.gitmodules new file mode 100644 index 00000000..38a1f569 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.gitmodules @@ -0,0 +1,3 @@ +[submodule "opentelemetry-proto"] + path = exporters/otlp/internal/opentelemetry-proto + url = https://github.com/open-telemetry/opentelemetry-proto diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml new file mode 100644 index 00000000..6e8eeec0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -0,0 +1,281 @@ +# See https://github.com/golangci/golangci-lint#config-file +run: + issues-exit-code: 1 #Default + tests: true #Default + +linters: + # Disable everything by default so upgrades to not include new "default + # enabled" linters. + disable-all: true + # Specifically enable linters we want to use. + enable: + - depguard + - errcheck + - godot + - gofmt + - goimports + - gosimple + - govet + - ineffassign + - misspell + - revive + - staticcheck + - typecheck + - unused + +issues: + # Maximum issues count per one linter. + # Set to 0 to disable. + # Default: 50 + # Setting to unlimited so the linter only is run once to debug all issues. + max-issues-per-linter: 0 + # Maximum count of issues with the same text. + # Set to 0 to disable. + # Default: 3 + # Setting to unlimited so the linter only is run once to debug all issues. + max-same-issues: 0 + # Excluding configuration per-path, per-linter, per-text and per-source. + exclude-rules: + # TODO: Having appropriate comments for exported objects helps development, + # even for objects in internal packages. Appropriate comments for all + # exported objects should be added and this exclusion removed. + - path: '.*internal/.*' + text: "exported (method|function|type|const) (.+) should have comment or be unexported" + linters: + - revive + # Yes, they are, but it's okay in a test. + - path: _test\.go + text: "exported func.*returns unexported type.*which can be annoying to use" + linters: + - revive + # Example test functions should be treated like main. + - path: example.*_test\.go + text: "calls to (.+) only in main[(][)] or init[(][)] functions" + linters: + - revive + include: + # revive exported should have comment or be unexported. + - EXC0012 + # revive package comment should be of the form ... + - EXC0013 + +linters-settings: + depguard: + rules: + non-tests: + files: + - "!$test" + - "!**/*test/*.go" + - "!**/internal/matchers/*.go" + deny: + - pkg: "testing" + - pkg: "github.com/stretchr/testify" + - pkg: "crypto/md5" + - pkg: "crypto/sha1" + - pkg: "crypto/**/pkix" + otlp-internal: + files: + - "!**/exporters/otlp/internal/**/*.go" + deny: + - pkg: "go.opentelemetry.io/otel/exporters/otlp/internal" + desc: Do not use cross-module internal packages. + otlptrace-internal: + files: + - "!**/exporters/otlp/otlptrace/*.go" + - "!**/exporters/otlp/otlptrace/internal/**.go" + deny: + - pkg: "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal" + desc: Do not use cross-module internal packages. + otlpmetric-internal: + files: + - "!**/exporters/otlp/otlpmetric/internal/*.go" + - "!**/exporters/otlp/otlpmetric/internal/**/*.go" + deny: + - pkg: "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal" + desc: Do not use cross-module internal packages. + otel-internal: + files: + - "**/sdk/*.go" + - "**/sdk/**/*.go" + - "**/exporters/*.go" + - "**/exporters/**/*.go" + - "**/schema/*.go" + - "**/schema/**/*.go" + - "**/metric/*.go" + - "**/metric/**/*.go" + - "**/bridge/*.go" + - "**/bridge/**/*.go" + - "**/example/*.go" + - "**/example/**/*.go" + - "**/trace/*.go" + - "**/trace/**/*.go" + deny: + - pkg: "go.opentelemetry.io/otel/internal$" + desc: Do not use cross-module internal packages. + - pkg: "go.opentelemetry.io/otel/internal/attribute" + desc: Do not use cross-module internal packages. + - pkg: "go.opentelemetry.io/otel/internal/internaltest" + desc: Do not use cross-module internal packages. + - pkg: "go.opentelemetry.io/otel/internal/matchers" + desc: Do not use cross-module internal packages. + godot: + exclude: + # Exclude links. + - '^ *\[[^]]+\]:' + # Exclude sentence fragments for lists. + - '^[ ]*[-•]' + # Exclude sentences prefixing a list. + - ':$' + goimports: + local-prefixes: go.opentelemetry.io + misspell: + locale: US + ignore-words: + - cancelled + revive: + # Sets the default failure confidence. + # This means that linting errors with less than 0.8 confidence will be ignored. + # Default: 0.8 + confidence: 0.01 + rules: + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#blank-imports + - name: blank-imports + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#bool-literal-in-expr + - name: bool-literal-in-expr + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#constant-logical-expr + - name: constant-logical-expr + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-as-argument + # TODO (#3372) re-enable linter when it is compatible. https://github.com/golangci/golangci-lint/issues/3280 + - name: context-as-argument + disabled: true + arguments: + allowTypesBefore: "*testing.T" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-keys-type + - name: context-keys-type + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#deep-exit + - name: deep-exit + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#defer + - name: defer + disabled: false + arguments: + - ["call-chain", "loop"] + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#dot-imports + - name: dot-imports + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#duplicated-imports + - name: duplicated-imports + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#early-return + - name: early-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-block + - name: empty-block + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-lines + - name: empty-lines + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-naming + - name: error-naming + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-return + - name: error-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-strings + - name: error-strings + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#errorf + - name: errorf + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#exported + - name: exported + disabled: false + arguments: + - "sayRepetitiveInsteadOfStutters" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#flag-parameter + - name: flag-parameter + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#identical-branches + - name: identical-branches + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#if-return + - name: if-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#increment-decrement + - name: increment-decrement + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#indent-error-flow + - name: indent-error-flow + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#import-shadowing + - name: import-shadowing + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#package-comments + - name: package-comments + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range + - name: range + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-in-closure + - name: range-val-in-closure + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-address + - name: range-val-address + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#redefines-builtin-id + - name: redefines-builtin-id + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#string-format + - name: string-format + disabled: false + arguments: + - - panic + - '/^[^\n]*$/' + - must not contain line breaks + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#struct-tag + - name: struct-tag + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#superfluous-else + - name: superfluous-else + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#time-equal + - name: time-equal + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-naming + - name: var-naming + disabled: false + arguments: + - ["ID"] # AllowList + - ["Otel", "Aws", "Gcp"] # DenyList + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-declaration + - name: var-declaration + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unconditional-recursion + - name: unconditional-recursion + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unexported-return + - name: unexported-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unhandled-error + - name: unhandled-error + disabled: false + arguments: + - "fmt.Fprint" + - "fmt.Fprintf" + - "fmt.Fprintln" + - "fmt.Print" + - "fmt.Printf" + - "fmt.Println" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unnecessary-stmt + - name: unnecessary-stmt + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#useless-break + - name: useless-break + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value + - name: waitgroup-by-value + disabled: false diff --git a/vendor/go.opentelemetry.io/otel/.lycheeignore b/vendor/go.opentelemetry.io/otel/.lycheeignore new file mode 100644 index 00000000..40d62fa2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.lycheeignore @@ -0,0 +1,6 @@ +http://localhost +http://jaeger-collector +https://github.com/open-telemetry/opentelemetry-go/milestone/ +https://github.com/open-telemetry/opentelemetry-go/projects +file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries +file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual diff --git a/vendor/go.opentelemetry.io/otel/.markdownlint.yaml b/vendor/go.opentelemetry.io/otel/.markdownlint.yaml new file mode 100644 index 00000000..3202496c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.markdownlint.yaml @@ -0,0 +1,29 @@ +# Default state for all rules +default: true + +# ul-style +MD004: false + +# hard-tabs +MD010: false + +# line-length +MD013: false + +# no-duplicate-header +MD024: + siblings_only: true + +#single-title +MD025: false + +# ol-prefix +MD029: + style: ordered + +# no-inline-html +MD033: false + +# fenced-code-language +MD040: false + diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md new file mode 100644 index 00000000..3e5c35b5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -0,0 +1,2737 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). + +This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +## [1.19.0/0.42.0/0.0.7] 2023-09-28 + +This release contains the first stable release of the OpenTelemetry Go [metric SDK]. +Our project stability guarantees now apply to the `go.opentelemetry.io/otel/sdk/metric` package. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- Add the "Roll the dice" getting started application example in `go.opentelemetry.io/otel/example/dice`. (#4539) +- The `WithWriter` and `WithPrettyPrint` options to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to set a custom `io.Writer`, and allow displaying the output in human-readable JSON. (#4507) + +### Changed + +- Allow '/' characters in metric instrument names. (#4501) +- The exporter in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` does not prettify its output by default anymore. (#4507) +- Upgrade `gopkg.io/yaml` from `v2` to `v3` in `go.opentelemetry.io/otel/schema`. (#4535) + +### Fixed + +- In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the Prometheus metric on every `Collect` if we know the scope is invalid. (#4499) + +### Removed + +- Remove `"go.opentelemetry.io/otel/bridge/opencensus".NewMetricExporter`, which is replaced by `NewMetricProducer`. (#4566) + +## [1.19.0-rc.1/0.42.0-rc.1] 2023-09-14 + +This is a release candidate for the v1.19.0/v0.42.0 release. +That release is expected to include the `v1` release of the OpenTelemetry Go metric SDK and will provide stability guarantees of that SDK. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Changed + +- Allow '/' characters in metric instrument names. (#4501) + +### Fixed + +- In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the prometheus metric on every `Collect` if we know the scope is invalid. (#4499) + +## [1.18.0/0.41.0/0.0.6] 2023-09-12 + +This release drops the compatibility guarantee of [Go 1.19]. + +### Added + +- Add `WithProducer` option in `go.opentelemetry.op/otel/exporters/prometheus` to restore the ability to register producers on the prometheus exporter's manual reader. (#4473) +- Add `IgnoreValue` option in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest` to allow ignoring values when comparing metrics. (#4447) + +### Changed + +- Use a `TestingT` interface instead of `*testing.T` struct in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest`. (#4483) + +### Deprecated + +- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` was deprecated in `v0.35.0` (#3541). + The deprecation notice format for the function has been corrected to trigger Go documentation and build tooling. (#4470) + +### Removed + +- Removed the deprecated `go.opentelemetry.io/otel/exporters/jaeger` package. (#4467) +- Removed the deprecated `go.opentelemetry.io/otel/example/jaeger` package. (#4467) +- Removed the deprecated `go.opentelemetry.io/otel/sdk/metric/aggregation` package. (#4468) +- Removed the deprecated internal packages in `go.opentelemetry.io/otel/exporters/otlp` and its sub-packages. (#4469) +- Dropped guaranteed support for versions of Go less than 1.20. (#4481) + +## [1.17.0/0.40.0/0.0.5] 2023-08-28 + +### Added + +- Export the `ManualReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244) +- Export the `PeriodicReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244) +- Add support for exponential histogram aggregations. + A histogram can be configured as an exponential histogram using a view with `"go.opentelemetry.io/otel/sdk/metric".ExponentialHistogram` as the aggregation. (#4245) +- Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4272) +- Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4272) +- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE` environment variable. (#4287) +- Add `WithoutCounterSuffixes` option in `go.opentelemetry.io/otel/exporters/prometheus` to disable addition of `_total` suffixes. (#4306) +- Add info and debug logging to the metric SDK in `go.opentelemetry.io/otel/sdk/metric`. (#4315) +- The `go.opentelemetry.io/otel/semconv/v1.21.0` package. + The package contains semantic conventions from the `v1.21.0` version of the OpenTelemetry Semantic Conventions. (#4362) +- Accept 201 to 299 HTTP status as success in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4365) +- Document the `Temporality` and `Aggregation` methods of the `"go.opentelemetry.io/otel/sdk/metric".Exporter"` need to be concurrent safe. (#4381) +- Expand the set of units supported by the Prometheus exporter, and don't add unit suffixes if they are already present in `go.opentelemetry.op/otel/exporters/prometheus` (#4374) +- Move the `Aggregation` interface and its implementations from `go.opentelemetry.io/otel/sdk/metric/aggregation` to `go.opentelemetry.io/otel/sdk/metric`. (#4435) +- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION` environment variable. (#4437) +- Add the `NewAllowKeysFilter` and `NewDenyKeysFilter` functions to `go.opentelemetry.io/otel/attribute` to allow convenient creation of allow-keys and deny-keys filters. (#4444) +- Support Go 1.21. (#4463) + +### Changed + +- Starting from `v1.21.0` of semantic conventions, `go.opentelemetry.io/otel/semconv/{version}/httpconv` and `go.opentelemetry.io/otel/semconv/{version}/netconv` packages will no longer be published. (#4145) +- Log duplicate instrument conflict at a warning level instead of info in `go.opentelemetry.io/otel/sdk/metric`. (#4202) +- Return an error on the creation of new instruments in `go.opentelemetry.io/otel/sdk/metric` if their name doesn't pass regexp validation. (#4210) +- `NewManualReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*ManualReader` instead of `Reader`. (#4244) +- `NewPeriodicReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*PeriodicReader` instead of `Reader`. (#4244) +- Count the Collect time in the `PeriodicReader` timeout in `go.opentelemetry.io/otel/sdk/metric`. (#4221) +- The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272) +- The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272) +- If an attribute set is omitted from an async callback, the previous value will no longer be exported in `go.opentelemetry.io/otel/sdk/metric`. (#4290) +- If an attribute set is observed multiple times in an async callback in `go.opentelemetry.io/otel/sdk/metric`, the values will be summed instead of the last observation winning. (#4289) +- Allow the explicit bucket histogram aggregation to be used for the up-down counter, observable counter, observable up-down counter, and observable gauge in the `go.opentelemetry.io/otel/sdk/metric` package. (#4332) +- Restrict `Meter`s in `go.opentelemetry.io/otel/sdk/metric` to only register and collect instruments it created. (#4333) +- `PeriodicReader.Shutdown` and `PeriodicReader.ForceFlush` in `go.opentelemetry.io/otel/sdk/metric` now apply the periodic reader's timeout to the operation if the user provided context does not contain a deadline. (#4356, #4377) +- Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.21.0`. (#4408) +- Increase instrument name maximum length from 63 to 255 characters in `go.opentelemetry.io/otel/sdk/metric`. (#4434) +- Add `go.opentelemetry.op/otel/sdk/metric.WithProducer` as an `Option` for `"go.opentelemetry.io/otel/sdk/metric".NewManualReader` and `"go.opentelemetry.io/otel/sdk/metric".NewPeriodicReader`. (#4346) + +### Removed + +- Remove `Reader.RegisterProducer` in `go.opentelemetry.io/otel/metric`. + Use the added `WithProducer` option instead. (#4346) +- Remove `Reader.ForceFlush` in `go.opentelemetry.io/otel/metric`. + Notice that `PeriodicReader.ForceFlush` is still available. (#4375) + +### Fixed + +- Correctly format log messages from the `go.opentelemetry.io/otel/exporters/zipkin` exporter. (#4143) +- Log an error for calls to `NewView` in `go.opentelemetry.io/otel/sdk/metric` that have empty criteria. (#4307) +- Fix `"go.opentelemetry.io/otel/sdk/resource".WithHostID()` to not set an empty `host.id`. (#4317) +- Use the instrument identifying fields to cache aggregators and determine duplicate instrument registrations in `go.opentelemetry.io/otel/sdk/metric`. (#4337) +- Detect duplicate instruments for case-insensitive names in `go.opentelemetry.io/otel/sdk/metric`. (#4338) +- The `ManualReader` will not panic if `AggregationSelector` returns `nil` in `go.opentelemetry.io/otel/sdk/metric`. (#4350) +- If a `Reader`'s `AggregationSelector` returns `nil` or `DefaultAggregation` the pipeline will use the default aggregation. (#4350) +- Log a suggested view that fixes instrument conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4349) +- Fix possible panic, deadlock and race condition in batch span processor in `go.opentelemetry.io/otel/sdk/trace`. (#4353) +- Improve context cancellation handling in batch span processor's `ForceFlush` in `go.opentelemetry.io/otel/sdk/trace`. (#4369) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` using gotmpl. (#4397, #3846) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4404, #3846) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4407, #3846) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4400, #3846) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4401, #3846) +- Do not block the metric SDK when OTLP metric exports are blocked in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#3925, #4395) +- Do not append `_total` if the counter already has that suffix for the Prometheus exproter in `go.opentelemetry.io/otel/exporter/prometheus`. (#4373) +- Fix resource detection data race in `go.opentelemetry.io/otel/sdk/resource`. (#4409) +- Use the first-seen instrument name during instrument name conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4428) + +### Deprecated + +- The `go.opentelemetry.io/otel/exporters/jaeger` package is deprecated. + OpenTelemetry dropped support for Jaeger exporter in July 2023. + Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` + or `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` instead. (#4423) +- The `go.opentelemetry.io/otel/example/jaeger` package is deprecated. (#4423) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` package is deprecated. (#4420) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf` package is deprecated. (#4420) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otest` package is deprecated. (#4420) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform` package is deprecated. (#4420) +- The `go.opentelemetry.io/otel/exporters/otlp/internal` package is deprecated. (#4421) +- The `go.opentelemetry.io/otel/exporters/otlp/internal/envconfig` package is deprecated. (#4421) +- The `go.opentelemetry.io/otel/exporters/otlp/internal/retry` package is deprecated. (#4421) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/envconfig` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlptracetest` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/retry` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/sdk/metric/aggregation` package is deprecated. + Use the aggregation types added to `go.opentelemetry.io/otel/sdk/metric` instead. (#4435) + +## [1.16.0/0.39.0] 2023-05-18 + +This release contains the first stable release of the OpenTelemetry Go [metric API]. +Our project stability guarantees now apply to the `go.opentelemetry.io/otel/metric` package. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- The `go.opentelemetry.io/otel/semconv/v1.19.0` package. + The package contains semantic conventions from the `v1.19.0` version of the OpenTelemetry specification. (#3848) +- The `go.opentelemetry.io/otel/semconv/v1.20.0` package. + The package contains semantic conventions from the `v1.20.0` version of the OpenTelemetry specification. (#4078) +- The Exponential Histogram data types in `go.opentelemetry.io/otel/sdk/metric/metricdata`. (#4165) +- OTLP metrics exporter now supports the Exponential Histogram Data Type. (#4222) +- Fix serialization of `time.Time` zero values in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` packages. (#4271) + +### Changed + +- Use `strings.Cut()` instead of `string.SplitN()` for better readability and memory use. (#4049) +- `MeterProvider` returns noop meters once it has been shutdown. (#4154) + +### Removed + +- The deprecated `go.opentelemetry.io/otel/metric/instrument` package is removed. + Use `go.opentelemetry.io/otel/metric` instead. (#4055) + +### Fixed + +- Fix build for BSD based systems in `go.opentelemetry.io/otel/sdk/resource`. (#4077) + +## [1.16.0-rc.1/0.39.0-rc.1] 2023-05-03 + +This is a release candidate for the v1.16.0/v0.39.0 release. +That release is expected to include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- Support global `MeterProvider` in `go.opentelemetry.io/otel`. (#4039) + - Use `Meter` for a `metric.Meter` from the global `metric.MeterProvider`. + - Use `GetMeterProivder` for a global `metric.MeterProvider`. + - Use `SetMeterProivder` to set the global `metric.MeterProvider`. + +### Changed + +- Move the `go.opentelemetry.io/otel/metric` module to the `stable-v1` module set. + This stages the metric API to be released as a stable module. (#4038) + +### Removed + +- The `go.opentelemetry.io/otel/metric/global` package is removed. + Use `go.opentelemetry.io/otel` instead. (#4039) + +## [1.15.1/0.38.1] 2023-05-02 + +### Fixed + +- Remove unused imports from `sdk/resource/host_id_bsd.go` which caused build failures. (#4040, #4041) + +## [1.15.0/0.38.0] 2023-04-27 + +### Added + +- The `go.opentelemetry.io/otel/metric/embedded` package. (#3916) +- The `Version` function to `go.opentelemetry.io/otel/sdk` to return the SDK version. (#3949) +- Add a `WithNamespace` option to `go.opentelemetry.io/otel/exporters/prometheus` to allow users to prefix metrics with a namespace. (#3970) +- The following configuration types were added to `go.opentelemetry.io/otel/metric/instrument` to be used in the configuration of measurement methods. (#3971) + - The `AddConfig` used to hold configuration for addition measurements + - `NewAddConfig` used to create a new `AddConfig` + - `AddOption` used to configure an `AddConfig` + - The `RecordConfig` used to hold configuration for recorded measurements + - `NewRecordConfig` used to create a new `RecordConfig` + - `RecordOption` used to configure a `RecordConfig` + - The `ObserveConfig` used to hold configuration for observed measurements + - `NewObserveConfig` used to create a new `ObserveConfig` + - `ObserveOption` used to configure an `ObserveConfig` +- `WithAttributeSet` and `WithAttributes` are added to `go.opentelemetry.io/otel/metric/instrument`. + They return an option used during a measurement that defines the attribute Set associated with the measurement. (#3971) +- The `Version` function to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` to return the OTLP metrics client version. (#3956) +- The `Version` function to `go.opentelemetry.io/otel/exporters/otlp/otlptrace` to return the OTLP trace client version. (#3956) + +### Changed + +- The `Extrema` in `go.opentelemetry.io/otel/sdk/metric/metricdata` is redefined with a generic argument of `[N int64 | float64]`. (#3870) +- Update all exported interfaces from `go.opentelemetry.io/otel/metric` to embed their corresponding interface from `go.opentelemetry.io/otel/metric/embedded`. + This adds an implementation requirement to set the interface default behavior for unimplemented methods. (#3916) +- Move No-Op implementation from `go.opentelemetry.io/otel/metric` into its own package `go.opentelemetry.io/otel/metric/noop`. (#3941) + - `metric.NewNoopMeterProvider` is replaced with `noop.NewMeterProvider` +- Add all the methods from `"go.opentelemetry.io/otel/trace".SpanContext` to `bridgeSpanContext` by embedding `otel.SpanContext` in `bridgeSpanContext`. (#3966) +- Wrap `UploadMetrics` error in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/` to improve error message when encountering generic grpc errors. (#3974) +- The measurement methods for all instruments in `go.opentelemetry.io/otel/metric/instrument` accept an option instead of the variadic `"go.opentelemetry.io/otel/attribute".KeyValue`. (#3971) + - The `Int64Counter.Add` method now accepts `...AddOption` + - The `Float64Counter.Add` method now accepts `...AddOption` + - The `Int64UpDownCounter.Add` method now accepts `...AddOption` + - The `Float64UpDownCounter.Add` method now accepts `...AddOption` + - The `Int64Histogram.Record` method now accepts `...RecordOption` + - The `Float64Histogram.Record` method now accepts `...RecordOption` + - The `Int64Observer.Observe` method now accepts `...ObserveOption` + - The `Float64Observer.Observe` method now accepts `...ObserveOption` +- The `Observer` methods in `go.opentelemetry.io/otel/metric` accept an option instead of the variadic `"go.opentelemetry.io/otel/attribute".KeyValue`. (#3971) + - The `Observer.ObserveInt64` method now accepts `...ObserveOption` + - The `Observer.ObserveFloat64` method now accepts `...ObserveOption` +- Move global metric back to `go.opentelemetry.io/otel/metric/global` from `go.opentelemetry.io/otel`. (#3986) + +### Fixed + +- `TracerProvider` allows calling `Tracer()` while it's shutting down. + It used to deadlock. (#3924) +- Use the SDK version for the Telemetry SDK resource detector in `go.opentelemetry.io/otel/sdk/resource`. (#3949) +- Fix a data race in `SpanProcessor` returned by `NewSimpleSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace`. (#3951) +- Automatically figure out the default aggregation with `aggregation.Default`. (#3967) + +### Deprecated + +- The `go.opentelemetry.io/otel/metric/instrument` package is deprecated. + Use the equivalent types added to `go.opentelemetry.io/otel/metric` instead. (#4018) + +## [1.15.0-rc.2/0.38.0-rc.2] 2023-03-23 + +This is a release candidate for the v1.15.0/v0.38.0 release. +That release will include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- The `WithHostID` option to `go.opentelemetry.io/otel/sdk/resource`. (#3812) +- The `WithoutTimestamps` option to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to sets all timestamps to zero. (#3828) +- The new `Exemplar` type is added to `go.opentelemetry.io/otel/sdk/metric/metricdata`. + Both the `DataPoint` and `HistogramDataPoint` types from that package have a new field of `Exemplars` containing the sampled exemplars for their timeseries. (#3849) +- Configuration for each metric instrument in `go.opentelemetry.io/otel/sdk/metric/instrument`. (#3895) +- The internal logging introduces a warning level verbosity equal to `V(1)`. (#3900) +- Added a log message warning about usage of `SimpleSpanProcessor` in production environments. (#3854) + +### Changed + +- Optimize memory allocation when creation a new `Set` using `NewSet` or `NewSetWithFiltered` in `go.opentelemetry.io/otel/attribute`. (#3832) +- Optimize memory allocation when creation new metric instruments in `go.opentelemetry.io/otel/sdk/metric`. (#3832) +- Avoid creating new objects on all calls to `WithDeferredSetup` and `SkipContextSetup` in OpenTracing bridge. (#3833) +- The `New` and `Detect` functions from `go.opentelemetry.io/otel/sdk/resource` return errors that wrap underlying errors instead of just containing the underlying error strings. (#3844) +- Both the `Histogram` and `HistogramDataPoint` are redefined with a generic argument of `[N int64 | float64]` in `go.opentelemetry.io/otel/sdk/metric/metricdata`. (#3849) +- The metric `Export` interface from `go.opentelemetry.io/otel/sdk/metric` accepts a `*ResourceMetrics` instead of `ResourceMetrics`. (#3853) +- Rename `Asynchronous` to `Observable` in `go.opentelemetry.io/otel/metric/instrument`. (#3892) +- Rename `Int64ObserverOption` to `Int64ObservableOption` in `go.opentelemetry.io/otel/metric/instrument`. (#3895) +- Rename `Float64ObserverOption` to `Float64ObservableOption` in `go.opentelemetry.io/otel/metric/instrument`. (#3895) +- The internal logging changes the verbosity level of info to `V(4)`, the verbosity level of debug to `V(8)`. (#3900) + +### Fixed + +- `TracerProvider` consistently doesn't allow to register a `SpanProcessor` after shutdown. (#3845) + +### Removed + +- The deprecated `go.opentelemetry.io/otel/metric/global` package is removed. (#3829) +- The unneeded `Synchronous` interface in `go.opentelemetry.io/otel/metric/instrument` was removed. (#3892) +- The `Float64ObserverConfig` and `NewFloat64ObserverConfig` in `go.opentelemetry.io/otel/sdk/metric/instrument`. + Use the added `float64` instrument configuration instead. (#3895) +- The `Int64ObserverConfig` and `NewInt64ObserverConfig` in `go.opentelemetry.io/otel/sdk/metric/instrument`. + Use the added `int64` instrument configuration instead. (#3895) +- The `NewNoopMeter` function in `go.opentelemetry.io/otel/metric`, use `NewMeterProvider().Meter("")` instead. (#3893) + +## [1.15.0-rc.1/0.38.0-rc.1] 2023-03-01 + +This is a release candidate for the v1.15.0/v0.38.0 release. +That release will include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +This release drops the compatibility guarantee of [Go 1.18]. + +### Added + +- Support global `MeterProvider` in `go.opentelemetry.io/otel`. (#3818) + - Use `Meter` for a `metric.Meter` from the global `metric.MeterProvider`. + - Use `GetMeterProivder` for a global `metric.MeterProvider`. + - Use `SetMeterProivder` to set the global `metric.MeterProvider`. + +### Changed + +- Dropped compatibility testing for [Go 1.18]. + The project no longer guarantees support for this version of Go. (#3813) + +### Fixed + +- Handle empty environment variable as it they were not set. (#3764) +- Clarify the `httpconv` and `netconv` packages in `go.opentelemetry.io/otel/semconv/*` provide tracing semantic conventions. (#3823) +- Fix race conditions in `go.opentelemetry.io/otel/exporters/metric/prometheus` that could cause a panic. (#3899) +- Fix sending nil `scopeInfo` to metrics channel in `go.opentelemetry.io/otel/exporters/metric/prometheus` that could cause a panic in `github.com/prometheus/client_golang/prometheus`. (#3899) + +### Deprecated + +- The `go.opentelemetry.io/otel/metric/global` package is deprecated. + Use `go.opentelemetry.io/otel` instead. (#3818) + +### Removed + +- The deprecated `go.opentelemetry.io/otel/metric/unit` package is removed. (#3814) + +## [1.14.0/0.37.0/0.0.4] 2023-02-27 + +This release is the last to support [Go 1.18]. +The next release will require at least [Go 1.19]. + +### Added + +- The `event` type semantic conventions are added to `go.opentelemetry.io/otel/semconv/v1.17.0`. (#3697) +- Support [Go 1.20]. (#3693) +- The `go.opentelemetry.io/otel/semconv/v1.18.0` package. + The package contains semantic conventions from the `v1.18.0` version of the OpenTelemetry specification. (#3719) + - The following `const` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included: + - `OtelScopeNameKey` -> `OTelScopeNameKey` + - `OtelScopeVersionKey` -> `OTelScopeVersionKey` + - `OtelLibraryNameKey` -> `OTelLibraryNameKey` + - `OtelLibraryVersionKey` -> `OTelLibraryVersionKey` + - `OtelStatusCodeKey` -> `OTelStatusCodeKey` + - `OtelStatusDescriptionKey` -> `OTelStatusDescriptionKey` + - `OtelStatusCodeOk` -> `OTelStatusCodeOk` + - `OtelStatusCodeError` -> `OTelStatusCodeError` + - The following `func` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included: + - `OtelScopeName` -> `OTelScopeName` + - `OtelScopeVersion` -> `OTelScopeVersion` + - `OtelLibraryName` -> `OTelLibraryName` + - `OtelLibraryVersion` -> `OTelLibraryVersion` + - `OtelStatusDescription` -> `OTelStatusDescription` +- A `IsSampled` method is added to the `SpanContext` implementation in `go.opentelemetry.io/otel/bridge/opentracing` to expose the span sampled state. + See the [README](./bridge/opentracing/README.md) for more information. (#3570) +- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/metric`. (#3738) +- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/trace`. (#3739) +- The following environment variables are supported by the periodic `Reader` in `go.opentelemetry.io/otel/sdk/metric`. (#3763) + - `OTEL_METRIC_EXPORT_INTERVAL` sets the time between collections and exports. + - `OTEL_METRIC_EXPORT_TIMEOUT` sets the timeout an export is attempted. + +### Changed + +- Fall-back to `TextMapCarrier` when it's not `HttpHeader`s in `go.opentelemetry.io/otel/bridge/opentracing`. (#3679) +- The `Collect` method of the `"go.opentelemetry.io/otel/sdk/metric".Reader` interface is updated to accept the `metricdata.ResourceMetrics` value the collection will be made into. + This change is made to enable memory reuse by SDK users. (#3732) +- The `WithUnit` option in `go.opentelemetry.io/otel/sdk/metric/instrument` is updated to accept a `string` for the unit value. (#3776) + +### Fixed + +- Ensure `go.opentelemetry.io/otel` does not use generics. (#3723, #3725) +- Multi-reader `MeterProvider`s now export metrics for all readers, instead of just the first reader. (#3720, #3724) +- Remove use of deprecated `"math/rand".Seed` in `go.opentelemetry.io/otel/example/prometheus`. (#3733) +- Do not silently drop unknown schema data with `Parse` in `go.opentelemetry.io/otel/schema/v1.1`. (#3743) +- Data race issue in OTLP exporter retry mechanism. (#3755, #3756) +- Wrapping empty errors when exporting in `go.opentelemetry.io/otel/sdk/metric`. (#3698, #3772) +- Incorrect "all" and "resource" definition for schema files in `go.opentelemetry.io/otel/schema/v1.1`. (#3777) + +### Deprecated + +- The `go.opentelemetry.io/otel/metric/unit` package is deprecated. + Use the equivalent unit string instead. (#3776) + - Use `"1"` instead of `unit.Dimensionless` + - Use `"By"` instead of `unit.Bytes` + - Use `"ms"` instead of `unit.Milliseconds` + +## [1.13.0/0.36.0] 2023-02-07 + +### Added + +- Attribute `KeyValue` creations functions to `go.opentelemetry.io/otel/semconv/v1.17.0` for all non-enum semantic conventions. + These functions ensure semantic convention type correctness. (#3675) + +### Fixed + +- Removed the `http.target` attribute from being added by `ServerRequest` in the following packages. (#3687) + - `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.14.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.15.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.16.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.17.0/httpconv` + +### Removed + +- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is removed. (#3631) +- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is removed. (#3631) +- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is removed. (#3631) +- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncint64` package is removed. (#3631) + +## [1.12.0/0.35.0] 2023-01-28 + +### Added + +- The `WithInt64Callback` option to `go.opentelemetry.io/otel/metric/instrument`. + This options is used to configure `int64` Observer callbacks during their creation. (#3507) +- The `WithFloat64Callback` option to `go.opentelemetry.io/otel/metric/instrument`. + This options is used to configure `float64` Observer callbacks during their creation. (#3507) +- The `Producer` interface and `Reader.RegisterProducer(Producer)` to `go.opentelemetry.io/otel/sdk/metric`. + These additions are used to enable external metric Producers. (#3524) +- The `Callback` function type to `go.opentelemetry.io/otel/metric`. + This new named function type is registered with a `Meter`. (#3564) +- The `go.opentelemetry.io/otel/semconv/v1.13.0` package. + The package contains semantic conventions from the `v1.13.0` version of the OpenTelemetry specification. (#3499) + - The `EndUserAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientRequest` and `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPAttributesFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientResponse` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPClientAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPServerAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPServerMetricAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `NetAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `Transport` in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` and `ClientRequest` or `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `SpanStatusFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `SpanStatusFromHTTPStatusCodeAndSpanKind` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `ClientStatus` and `ServerStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `Client` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Conn`. + - The `Server` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Listener`. +- The `go.opentelemetry.io/otel/semconv/v1.14.0` package. + The package contains semantic conventions from the `v1.14.0` version of the OpenTelemetry specification. (#3566) +- The `go.opentelemetry.io/otel/semconv/v1.15.0` package. + The package contains semantic conventions from the `v1.15.0` version of the OpenTelemetry specification. (#3578) +- The `go.opentelemetry.io/otel/semconv/v1.16.0` package. + The package contains semantic conventions from the `v1.16.0` version of the OpenTelemetry specification. (#3579) +- Metric instruments to `go.opentelemetry.io/otel/metric/instrument`. + These instruments are use as replacements of the deprecated `go.opentelemetry.io/otel/metric/instrument/{asyncfloat64,asyncint64,syncfloat64,syncint64}` packages.(#3575, #3586) + - `Float64ObservableCounter` replaces the `asyncfloat64.Counter` + - `Float64ObservableUpDownCounter` replaces the `asyncfloat64.UpDownCounter` + - `Float64ObservableGauge` replaces the `asyncfloat64.Gauge` + - `Int64ObservableCounter` replaces the `asyncint64.Counter` + - `Int64ObservableUpDownCounter` replaces the `asyncint64.UpDownCounter` + - `Int64ObservableGauge` replaces the `asyncint64.Gauge` + - `Float64Counter` replaces the `syncfloat64.Counter` + - `Float64UpDownCounter` replaces the `syncfloat64.UpDownCounter` + - `Float64Histogram` replaces the `syncfloat64.Histogram` + - `Int64Counter` replaces the `syncint64.Counter` + - `Int64UpDownCounter` replaces the `syncint64.UpDownCounter` + - `Int64Histogram` replaces the `syncint64.Histogram` +- `NewTracerProvider` to `go.opentelemetry.io/otel/bridge/opentracing`. + This is used to create `WrapperTracer` instances from a `TracerProvider`. (#3116) +- The `Extrema` type to `go.opentelemetry.io/otel/sdk/metric/metricdata`. + This type is used to represent min/max values and still be able to distinguish unset and zero values. (#3487) +- The `go.opentelemetry.io/otel/semconv/v1.17.0` package. + The package contains semantic conventions from the `v1.17.0` version of the OpenTelemetry specification. (#3599) + +### Changed + +- Jaeger and Zipkin exporter use `github.com/go-logr/logr` as the logging interface, and add the `WithLogr` option. (#3497, #3500) +- Instrument configuration in `go.opentelemetry.io/otel/metric/instrument` is split into specific options and configuration based on the instrument type. (#3507) + - Use the added `Int64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncint64`. + - Use the added `Float64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncfloat64`. + - Use the added `Int64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncint64`. + - Use the added `Float64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncfloat64`. +- Return a `Registration` from the `RegisterCallback` method of a `Meter` in the `go.opentelemetry.io/otel/metric` package. + This `Registration` can be used to unregister callbacks. (#3522) +- Global error handler uses an atomic value instead of a mutex. (#3543) +- Add `NewMetricProducer` to `go.opentelemetry.io/otel/bridge/opencensus`, which can be used to pass OpenCensus metrics to an OpenTelemetry Reader. (#3541) +- Global logger uses an atomic value instead of a mutex. (#3545) +- The `Shutdown` method of the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` releases all computational resources when called the first time. (#3551) +- The `Sampler` returned from `TraceIDRatioBased` `go.opentelemetry.io/otel/sdk/trace` now uses the rightmost bits for sampling decisions. + This fixes random sampling when using ID generators like `xray.IDGenerator` and increasing parity with other language implementations. (#3557) +- Errors from `go.opentelemetry.io/otel/exporters/otlp/otlptrace` exporters are wrapped in errors identifying their signal name. + Existing users of the exporters attempting to identify specific errors will need to use `errors.Unwrap()` to get the underlying error. (#3516) +- Exporters from `go.opentelemetry.io/otel/exporters/otlp` will print the final retryable error message when attempts to retry time out. (#3514) +- The instrument kind names in `go.opentelemetry.io/otel/sdk/metric` are updated to match the API. (#3562) + - `InstrumentKindSyncCounter` is renamed to `InstrumentKindCounter` + - `InstrumentKindSyncUpDownCounter` is renamed to `InstrumentKindUpDownCounter` + - `InstrumentKindSyncHistogram` is renamed to `InstrumentKindHistogram` + - `InstrumentKindAsyncCounter` is renamed to `InstrumentKindObservableCounter` + - `InstrumentKindAsyncUpDownCounter` is renamed to `InstrumentKindObservableUpDownCounter` + - `InstrumentKindAsyncGauge` is renamed to `InstrumentKindObservableGauge` +- The `RegisterCallback` method of the `Meter` in `go.opentelemetry.io/otel/metric` changed. + - The named `Callback` replaces the inline function parameter. (#3564) + - `Callback` is required to return an error. (#3576) + - `Callback` accepts the added `Observer` parameter added. + This new parameter is used by `Callback` implementations to observe values for asynchronous instruments instead of calling the `Observe` method of the instrument directly. (#3584) + - The slice of `instrument.Asynchronous` is now passed as a variadic argument. (#3587) +- The exporter from `go.opentelemetry.io/otel/exporters/zipkin` is updated to use the `v1.16.0` version of semantic conventions. + This means it no longer uses the removed `net.peer.ip` or `http.host` attributes to determine the remote endpoint. + Instead it uses the `net.sock.peer` attributes. (#3581) +- The `Min` and `Max` fields of the `HistogramDataPoint` in `go.opentelemetry.io/otel/sdk/metric/metricdata` are now defined with the added `Extrema` type instead of a `*float64`. (#3487) + +### Fixed + +- Asynchronous instruments that use sum aggregators and attribute filters correctly add values from equivalent attribute sets that have been filtered. (#3439, #3549) +- The `RegisterCallback` method of the `Meter` from `go.opentelemetry.io/otel/sdk/metric` only registers a callback for instruments created by that meter. + Trying to register a callback with instruments from a different meter will result in an error being returned. (#3584) + +### Deprecated + +- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` is deprecated. + Use `NewMetricProducer` instead. (#3541) +- The `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/syncint64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `NewWrappedTracerProvider` in `go.opentelemetry.io/otel/bridge/opentracing` is now deprecated. + Use `NewTracerProvider` instead. (#3116) + +### Removed + +- The deprecated `go.opentelemetry.io/otel/sdk/metric/view` package is removed. (#3520) +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncint64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Int64ObservableCounter` + - The `UpDownCounter` method is replaced by `Meter.Int64ObservableUpDownCounter` + - The `Gauge` method is replaced by `Meter.Int64ObservableGauge` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncfloat64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Float64ObservableCounter` + - The `UpDownCounter` method is replaced by `Meter.Float64ObservableUpDownCounter` + - The `Gauge` method is replaced by `Meter.Float64ObservableGauge` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncint64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Int64Counter` + - The `UpDownCounter` method is replaced by `Meter.Int64UpDownCounter` + - The `Histogram` method is replaced by `Meter.Int64Histogram` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncfloat64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Float64Counter` + - The `UpDownCounter` method is replaced by `Meter.Float64UpDownCounter` + - The `Histogram` method is replaced by `Meter.Float64Histogram` + +## [1.11.2/0.34.0] 2022-12-05 + +### Added + +- The `WithView` `Option` is added to the `go.opentelemetry.io/otel/sdk/metric` package. + This option is used to configure the view(s) a `MeterProvider` will use for all `Reader`s that are registered with it. (#3387) +- Add Instrumentation Scope and Version as info metric and label in Prometheus exporter. + This can be disabled using the `WithoutScopeInfo()` option added to that package.(#3273, #3357) +- OTLP exporters now recognize: (#3363) + - `OTEL_EXPORTER_OTLP_INSECURE` + - `OTEL_EXPORTER_OTLP_TRACES_INSECURE` + - `OTEL_EXPORTER_OTLP_METRICS_INSECURE` + - `OTEL_EXPORTER_OTLP_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE` +- The `View` type and related `NewView` function to create a view according to the OpenTelemetry specification are added to `go.opentelemetry.io/otel/sdk/metric`. + These additions are replacements for the `View` type and `New` function from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459) +- The `Instrument` and `InstrumentKind` type are added to `go.opentelemetry.io/otel/sdk/metric`. + These additions are replacements for the `Instrument` and `InstrumentKind` types from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459) +- The `Stream` type is added to `go.opentelemetry.io/otel/sdk/metric` to define a metric data stream a view will produce. (#3459) +- The `AssertHasAttributes` allows instrument authors to test that datapoints returned have appropriate attributes. (#3487) + +### Changed + +- The `"go.opentelemetry.io/otel/sdk/metric".WithReader` option no longer accepts views to associate with the `Reader`. + Instead, views are now registered directly with the `MeterProvider` via the new `WithView` option. + The views registered with the `MeterProvider` apply to all `Reader`s. (#3387) +- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/sdk/metric".Exporter` interface. (#3260) +- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric".Client` interface. (#3260) +- The `WithTemporalitySelector` and `WithAggregationSelector` `ReaderOption`s have been changed to `ManualReaderOption`s in the `go.opentelemetry.io/otel/sdk/metric` package. (#3260) +- The periodic reader in the `go.opentelemetry.io/otel/sdk/metric` package now uses the temporality and aggregation selectors from its configured exporter instead of accepting them as options. (#3260) + +### Fixed + +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter fixes duplicated `_total` suffixes. (#3369) +- Remove comparable requirement for `Reader`s. (#3387) +- Cumulative metrics from the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) are defined as monotonic sums, instead of non-monotonic. (#3389) +- Asynchronous counters (`Counter` and `UpDownCounter`) from the metric SDK now produce delta sums when configured with delta temporality. (#3398) +- Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340) +- `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436) +- Re-enabled Attribute Filters in the Metric SDK. (#3396) +- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggragation. (#3408) +- Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432) +- Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440) +- Prevent duplicate Prometheus description, unit, and type. (#3469) +- Prevents panic when using incorrect `attribute.Value.As[Type]Slice()`. (#3489) + +### Removed + +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.Client` interface is removed. (#3486) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.New` function is removed. Use the `otlpmetric[http|grpc].New` directly. (#3486) + +### Deprecated + +- The `go.opentelemetry.io/otel/sdk/metric/view` package is deprecated. + Use `Instrument`, `InstrumentKind`, `View`, and `NewView` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3476) + +## [1.11.1/0.33.0] 2022-10-19 + +### Added + +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` registers with a Prometheus registerer on creation. + By default, it will register with the default Prometheus registerer. + A non-default registerer can be used by passing the `WithRegisterer` option. (#3239) +- Added the `WithAggregationSelector` option to the `go.opentelemetry.io/otel/exporters/prometheus` package to change the default `AggregationSelector` used. (#3341) +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` converts the `Resource` associated with metric exports into a `target_info` metric. (#3285) + +### Changed + +- The `"go.opentelemetry.io/otel/exporters/prometheus".New` function is updated to return an error. + It will return an error if the exporter fails to register with Prometheus. (#3239) + +### Fixed + +- The URL-encoded values from the `OTEL_RESOURCE_ATTRIBUTES` environment variable are decoded. (#2963) +- The `baggage.NewMember` function decodes the `value` parameter instead of directly using it. + This fixes the implementation to be compliant with the W3C specification. (#3226) +- Slice attributes of the `attribute` package are now comparable based on their value, not instance. (#3108 #3252) +- The `Shutdown` and `ForceFlush` methods of the `"go.opentelemetry.io/otel/sdk/trace".TraceProvider` no longer return an error when no processor is registered. (#3268) +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` cumulatively sums histogram buckets. (#3281) +- The sum of each histogram data point is now uniquely exported by the `go.opentelemetry.io/otel/exporters/otlpmetric` exporters. (#3284, #3293) +- Recorded values for asynchronous counters (`Counter` and `UpDownCounter`) are interpreted as exact, not incremental, sum values by the metric SDK. (#3350, #3278) +- `UpDownCounters` are now correctly output as Prometheus gauges in the `go.opentelemetry.io/otel/exporters/prometheus` exporter. (#3358) +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` no longer describes the metrics it will send to Prometheus on startup. + Instead the exporter is defined as an "unchecked" collector for Prometheus. + This fixes the `reader is not registered` warning currently emitted on startup. (#3291 #3342) +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now correctly adds `_total` suffixes to counter metrics. (#3360) +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now adds a unit suffix to metric names. + This can be disabled using the `WithoutUnits()` option added to that package. (#3352) + +## [1.11.0/0.32.3] 2022-10-12 + +### Added + +- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlptrace/otlptracehttp`). (#3261) + +### Changed + +- `span.SetStatus` has been updated such that calls that lower the status are now no-ops. (#3214) +- Upgrade `golang.org/x/sys/unix` from `v0.0.0-20210423185535-09eb48e85fd7` to `v0.0.0-20220919091848-fb04ddd9f9c8`. + This addresses [GO-2022-0493](https://pkg.go.dev/vuln/GO-2022-0493). (#3235) + +## [0.32.2] Metric SDK (Alpha) - 2022-10-11 + +### Added + +- Added an example of using metric views to customize instruments. (#3177) +- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetrichttp`). (#3261) + +### Changed + +- Flush pending measurements with the `PeriodicReader` in the `go.opentelemetry.io/otel/sdk/metric` when `ForceFlush` or `Shutdown` are called. (#3220) +- Update histogram default bounds to match the requirements of the latest specification. (#3222) +- Encode the HTTP status code in the OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`) as an integer. (#3265) + +### Fixed + +- Use default view if instrument does not match any registered view of a reader. (#3224, #3237) +- Return the same instrument every time a user makes the exact same instrument creation call. (#3229, #3251) +- Return the existing instrument when a view transforms a creation call to match an existing instrument. (#3240, #3251) +- Log a warning when a conflicting instrument (e.g. description, unit, data-type) is created instead of returning an error. (#3251) +- The OpenCensus bridge no longer sends empty batches of metrics. (#3263) + +## [0.32.1] Metric SDK (Alpha) - 2022-09-22 + +### Changed + +- The Prometheus exporter sanitizes OpenTelemetry instrument names when exporting. + Invalid characters are replaced with `_`. (#3212) + +### Added + +- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been reintroduced. (#3192) +- The OpenCensus bridge example (`go.opentelemetry.io/otel/example/opencensus`) has been reintroduced. (#3206) + +### Fixed + +- Updated go.mods to point to valid versions of the sdk. (#3216) +- Set the `MeterProvider` resource on all exported metric data. (#3218) + +## [0.32.0] Revised Metric SDK (Alpha) - 2022-09-18 + +### Changed + +- The metric SDK in `go.opentelemetry.io/otel/sdk/metric` is completely refactored to comply with the OpenTelemetry specification. + Please see the package documentation for how the new SDK is initialized and configured. (#3175) +- Update the minimum supported go version to go1.18. Removes support for go1.17 (#3179) + +### Removed + +- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been removed. + A new bridge compliant with the revised metric SDK will be added back in a future release. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/histogram` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/sum` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/basic` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/controllertest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/time` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/export/aggregation` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/export` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/metrictest` package is removed. + A replacement package that supports the new metric SDK will be added back in a future release. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/number` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/basic` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/processortest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/reducer` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/registry` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/sdkapi` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/selector/simple` package is removed, see the new metric SDK. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".ErrUninitializedInstrument` variable was removed. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".ErrBadInstrument` variable was removed. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".Accumulator` type was removed, see the `MeterProvider`in the new metric SDK. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".NewAccumulator` function was removed, see `NewMeterProvider`in the new metric SDK. (#3175) +- The deprecated `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets` function was removed. (#3175) + +## [1.10.0] - 2022-09-09 + +### Added + +- Support Go 1.19. (#3077) + Include compatibility testing and document support. (#3077) +- Support the OTLP ExportTracePartialSuccess response; these are passed to the registered error handler. (#3106) +- Upgrade go.opentelemetry.io/proto/otlp from v0.18.0 to v0.19.0 (#3107) + +### Changed + +- Fix misidentification of OpenTelemetry `SpanKind` in OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`). (#3096) +- Attempting to start a span with a nil `context` will no longer cause a panic. (#3110) +- All exporters will be shutdown even if one reports an error (#3091) +- Ensure valid UTF-8 when truncating over-length attribute values. (#3156) + +## [1.9.0/0.0.3] - 2022-08-01 + +### Added + +- Add support for Schema Files format 1.1.x (metric "split" transform) with the new `go.opentelemetry.io/otel/schema/v1.1` package. (#2999) +- Add the `go.opentelemetry.io/otel/semconv/v1.11.0` package. + The package contains semantic conventions from the `v1.11.0` version of the OpenTelemetry specification. (#3009) +- Add the `go.opentelemetry.io/otel/semconv/v1.12.0` package. + The package contains semantic conventions from the `v1.12.0` version of the OpenTelemetry specification. (#3010) +- Add the `http.method` attribute to HTTP server metric from all `go.opentelemetry.io/otel/semconv/*` packages. (#3018) + +### Fixed + +- Invalid warning for context setup being deferred in `go.opentelemetry.io/otel/bridge/opentracing` package. (#3029) + +## [1.8.0/0.31.0] - 2022-07-08 + +### Added + +- Add support for `opentracing.TextMap` format in the `Inject` and `Extract` methods +of the `"go.opentelemetry.io/otel/bridge/opentracing".BridgeTracer` type. (#2911) + +### Changed + +- The `crosslink` make target has been updated to use the `go.opentelemetry.io/build-tools/crosslink` package. (#2886) +- In the `go.opentelemetry.io/otel/sdk/instrumentation` package rename `Library` to `Scope` and alias `Library` as `Scope` (#2976) +- Move metric no-op implementation form `nonrecording` to `metric` package. (#2866) + +### Removed + +- Support for go1.16. Support is now only for go1.17 and go1.18 (#2917) + +### Deprecated + +- The `Library` struct in the `go.opentelemetry.io/otel/sdk/instrumentation` package is deprecated. + Use the equivalent `Scope` struct instead. (#2977) +- The `ReadOnlySpan.InstrumentationLibrary` method from the `go.opentelemetry.io/otel/sdk/trace` package is deprecated. + Use the equivalent `ReadOnlySpan.InstrumentationScope` method instead. (#2977) + +## [1.7.0/0.30.0] - 2022-04-28 + +### Added + +- Add the `go.opentelemetry.io/otel/semconv/v1.8.0` package. + The package contains semantic conventions from the `v1.8.0` version of the OpenTelemetry specification. (#2763) +- Add the `go.opentelemetry.io/otel/semconv/v1.9.0` package. + The package contains semantic conventions from the `v1.9.0` version of the OpenTelemetry specification. (#2792) +- Add the `go.opentelemetry.io/otel/semconv/v1.10.0` package. + The package contains semantic conventions from the `v1.10.0` version of the OpenTelemetry specification. (#2842) +- Added an in-memory exporter to metrictest to aid testing with a full SDK. (#2776) + +### Fixed + +- Globally delegated instruments are unwrapped before delegating asynchronous callbacks. (#2784) +- Remove import of `testing` package in non-tests builds of the `go.opentelemetry.io/otel` package. (#2786) + +### Changed + +- The `WithLabelEncoder` option from the `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` package is renamed to `WithAttributeEncoder`. (#2790) +- The `LabelFilterSelector` interface from `go.opentelemetry.io/otel/sdk/metric/processor/reducer` is renamed to `AttributeFilterSelector`. + The method included in the renamed interface also changed from `LabelFilterFor` to `AttributeFilterFor`. (#2790) +- The `Metadata.Labels` method from the `go.opentelemetry.io/otel/sdk/metric/export` package is renamed to `Metadata.Attributes`. + Consequentially, the `Record` type from the same package also has had the embedded method renamed. (#2790) + +### Deprecated + +- The `Iterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. + Use the equivalent `Iterator.Attribute` method instead. (#2790) +- The `Iterator.IndexedLabel` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. + Use the equivalent `Iterator.IndexedAttribute` method instead. (#2790) +- The `MergeIterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. + Use the equivalent `MergeIterator.Attribute` method instead. (#2790) + +### Removed + +- Removed the `Batch` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864) +- Removed the `Measurement` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864) + +## [0.29.0] - 2022-04-11 + +### Added + +- The metrics global package was added back into several test files. (#2764) +- The `Meter` function is added back to the `go.opentelemetry.io/otel/metric/global` package. + This function is a convenience function equivalent to calling `global.MeterProvider().Meter(...)`. (#2750) + +### Removed + +- Removed module the `go.opentelemetry.io/otel/sdk/export/metric`. + Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2720) + +### Changed + +- Don't panic anymore when setting a global MeterProvider to itself. (#2749) +- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` from `v0.12.1` to `v0.15.0`. + This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibraryMetrics` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeMetrics`. (#2748) + +## [1.6.3] - 2022-04-07 + +### Fixed + +- Allow non-comparable global `MeterProvider`, `TracerProvider`, and `TextMapPropagator` types to be set. (#2772, #2773) + +## [1.6.2] - 2022-04-06 + +### Changed + +- Don't panic anymore when setting a global TracerProvider or TextMapPropagator to itself. (#2749) +- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace` from `v0.12.1` to `v0.15.0`. + This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibrarySpans` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeSpans`. (#2748) + +## [1.6.1] - 2022-03-28 + +### Fixed + +- The `go.opentelemetry.io/otel/schema/*` packages now use the correct schema URL for their `SchemaURL` constant. + Instead of using `"https://opentelemetry.io/schemas/v"` they now use the correct URL without a `v` prefix, `"https://opentelemetry.io/schemas/"`. (#2743, #2744) + +### Security + +- Upgrade `go.opentelemetry.io/proto/otlp` from `v0.12.0` to `v0.12.1`. + This includes an indirect upgrade of `github.com/grpc-ecosystem/grpc-gateway` which resolves [a vulnerability](https://nvd.nist.gov/vuln/detail/CVE-2019-11254) from `gopkg.in/yaml.v2` in version `v2.2.3`. (#2724, #2728) + +## [1.6.0/0.28.0] - 2022-03-23 + +### ⚠️ Notice ⚠️ + +This update is a breaking change of the unstable Metrics API. +Code instrumented with the `go.opentelemetry.io/otel/metric` will need to be modified. + +### Added + +- Add metrics exponential histogram support. + New mapping functions have been made available in `sdk/metric/aggregator/exponential/mapping` for other OpenTelemetry projects to take dependencies on. (#2502) +- Add Go 1.18 to our compatibility tests. (#2679) +- Allow configuring the Sampler with the `OTEL_TRACES_SAMPLER` and `OTEL_TRACES_SAMPLER_ARG` environment variables. (#2305, #2517) +- Add the `metric/global` for obtaining and setting the global `MeterProvider`. (#2660) + +### Changed + +- The metrics API has been significantly changed to match the revised OpenTelemetry specification. + High-level changes include: + + - Synchronous and asynchronous instruments are now handled by independent `InstrumentProvider`s. + These `InstrumentProvider`s are managed with a `Meter`. + - Synchronous and asynchronous instruments are grouped into their own packages based on value types. + - Asynchronous callbacks can now be registered with a `Meter`. + + Be sure to check out the metric module documentation for more information on how to use the revised API. (#2587, #2660) + +### Fixed + +- Fallback to general attribute limits when span specific ones are not set in the environment. (#2675, #2677) + +## [1.5.0] - 2022-03-16 + +### Added + +- Log the Exporters configuration in the TracerProviders message. (#2578) +- Added support to configure the span limits with environment variables. + The following environment variables are supported. (#2606, #2637) + - `OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT` + - `OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT` + - `OTEL_SPAN_EVENT_COUNT_LIMIT` + - `OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT` + - `OTEL_SPAN_LINK_COUNT_LIMIT` + - `OTEL_LINK_ATTRIBUTE_COUNT_LIMIT` + + If the provided environment variables are invalid (negative), the default values would be used. +- Rename the `gc` runtime name to `go` (#2560) +- Add resource container ID detection. (#2418) +- Add span attribute value length limit. + The new `AttributeValueLengthLimit` field is added to the `"go.opentelemetry.io/otel/sdk/trace".SpanLimits` type to configure this limit for a `TracerProvider`. + The default limit for this resource is "unlimited". (#2637) +- Add the `WithRawSpanLimits` option to `go.opentelemetry.io/otel/sdk/trace`. + This option replaces the `WithSpanLimits` option. + Zero or negative values will not be changed to the default value like `WithSpanLimits` does. + Setting a limit to zero will effectively disable the related resource it limits and setting to a negative value will mean that resource is unlimited. + Consequentially, limits should be constructed using `NewSpanLimits` and updated accordingly. (#2637) + +### Changed + +- Drop oldest tracestate `Member` when capacity is reached. (#2592) +- Add event and link drop counts to the exported data from the `oltptrace` exporter. (#2601) +- Unify path cleaning functionally in the `otlpmetric` and `otlptrace` configuration. (#2639) +- Change the debug message from the `sdk/trace.BatchSpanProcessor` to reflect the count is cumulative. (#2640) +- Introduce new internal `envconfig` package for OTLP exporters. (#2608) +- If `http.Request.Host` is empty, fall back to use `URL.Host` when populating `http.host` in the `semconv` packages. (#2661) + +### Fixed + +- Remove the OTLP trace exporter limit of SpanEvents when exporting. (#2616) +- Default to port `4318` instead of `4317` for the `otlpmetrichttp` and `otlptracehttp` client. (#2614, #2625) +- Unlimited span limits are now supported (negative values). (#2636, #2637) + +### Deprecated + +- Deprecated `"go.opentelemetry.io/otel/sdk/trace".WithSpanLimits`. + Use `WithRawSpanLimits` instead. + That option allows setting unlimited and zero limits, this option does not. + This option will be kept until the next major version incremented release. (#2637) + +## [1.4.1] - 2022-02-16 + +### Fixed + +- Fix race condition in reading the dropped spans number for the `BatchSpanProcessor`. (#2615) + +## [1.4.0] - 2022-02-11 + +### Added + +- Use `OTEL_EXPORTER_ZIPKIN_ENDPOINT` environment variable to specify zipkin collector endpoint. (#2490) +- Log the configuration of `TracerProvider`s, and `Tracer`s for debugging. + To enable use a logger with Verbosity (V level) `>=1`. (#2500) +- Added support to configure the batch span-processor with environment variables. + The following environment variables are used. (#2515) + - `OTEL_BSP_SCHEDULE_DELAY` + - `OTEL_BSP_EXPORT_TIMEOUT` + - `OTEL_BSP_MAX_QUEUE_SIZE`. + - `OTEL_BSP_MAX_EXPORT_BATCH_SIZE` + +### Changed + +- Zipkin exporter exports `Resource` attributes in the `Tags` field. (#2589) + +### Deprecated + +- Deprecate module the `go.opentelemetry.io/otel/sdk/export/metric`. + Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2382) +- Deprecate `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets`. (#2445) + +### Fixed + +- Fixed the instrument kind for noop async instruments to correctly report an implementation. (#2461) +- Fix UDP packets overflowing with Jaeger payloads. (#2489, #2512) +- Change the `otlpmetric.Client` interface's `UploadMetrics` method to accept a single `ResourceMetrics` instead of a slice of them. (#2491) +- Specify explicit buckets in Prometheus example, fixing issue where example only has `+inf` bucket. (#2419, #2493) +- W3C baggage will now decode urlescaped values. (#2529) +- Baggage members are now only validated once, when calling `NewMember` and not also when adding it to the baggage itself. (#2522) +- The order attributes are dropped from spans in the `go.opentelemetry.io/otel/sdk/trace` package when capacity is reached is fixed to be in compliance with the OpenTelemetry specification. + Instead of dropping the least-recently-used attribute, the last added attribute is dropped. + This drop order still only applies to attributes with unique keys not already contained in the span. + If an attribute is added with a key already contained in the span, that attribute is updated to the new value being added. (#2576) + +### Removed + +- Updated `go.opentelemetry.io/proto/otlp` from `v0.11.0` to `v0.12.0`. This version removes a number of deprecated methods. (#2546) + - [`Metric.GetIntGauge()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntGauge) + - [`Metric.GetIntHistogram()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntHistogram) + - [`Metric.GetIntSum()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntSum) + +## [1.3.0] - 2021-12-10 + +### ⚠️ Notice ⚠️ + +We have updated the project minimum supported Go version to 1.16 + +### Added + +- Added an internal Logger. + This can be used by the SDK and API to provide users with feedback of the internal state. + To enable verbose logs configure the logger which will print V(1) logs. For debugging information configure to print V(5) logs. (#2343) +- Add the `WithRetry` `Option` and the `RetryConfig` type to the `go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp` package to specify retry behavior consistently. (#2425) +- Add `SpanStatusFromHTTPStatusCodeAndSpanKind` to all `semconv` packages to return a span status code similar to `SpanStatusFromHTTPStatusCode`, but exclude `4XX` HTTP errors as span errors if the span is of server kind. (#2296) + +### Changed + +- The `"go.opentelemetry.io/otel/exporter/otel/otlptrace/otlptracegrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2329) +- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2425) +- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".RetrySettings` type is renamed to `RetryConfig`. (#2425) +- The `go.opentelemetry.io/otel/exporter/otel/*` gRPC exporters now default to using the host's root CA set if none are provided by the user and `WithInsecure` is not specified. (#2432) +- Change `resource.Default` to be evaluated the first time it is called, rather than on import. This allows the caller the option to update `OTEL_RESOURCE_ATTRIBUTES` first, such as with `os.Setenv`. (#2371) + +### Fixed + +- The `go.opentelemetry.io/otel/exporter/otel/*` exporters are updated to handle per-signal and universal endpoints according to the OpenTelemetry specification. + Any per-signal endpoint set via an `OTEL_EXPORTER_OTLP__ENDPOINT` environment variable is now used without modification of the path. + When `OTEL_EXPORTER_OTLP_ENDPOINT` is set, if it contains a path, that path is used as a base path which per-signal paths are appended to. (#2433) +- Basic metric controller updated to use sync.Map to avoid blocking calls (#2381) +- The `go.opentelemetry.io/otel/exporter/jaeger` correctly sets the `otel.status_code` value to be a string of `ERROR` or `OK` instead of an integer code. (#2439, #2440) + +### Deprecated + +- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithMaxAttempts` `Option`, use the new `WithRetry` `Option` instead. (#2425) +- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithBackoff` `Option`, use the new `WithRetry` `Option` instead. (#2425) + +### Removed + +- Remove the metric Processor's ability to convert cumulative to delta aggregation temporality. (#2350) +- Remove the metric Bound Instruments interface and implementations. (#2399) +- Remove the metric MinMaxSumCount kind aggregation and the corresponding OTLP export path. (#2423) +- Metric SDK removes the "exact" aggregator for histogram instruments, as it performed a non-standard aggregation for OTLP export (creating repeated Gauge points) and worked its way into a number of confusing examples. (#2348) + +## [1.2.0] - 2021-11-12 + +### Changed + +- Metric SDK `export.ExportKind`, `export.ExportKindSelector` types have been renamed to `aggregation.Temporality` and `aggregation.TemporalitySelector` respectively to keep in line with current specification and protocol along with built-in selectors (e.g., `aggregation.CumulativeTemporalitySelector`, ...). (#2274) +- The Metric `Exporter` interface now requires a `TemporalitySelector` method instead of an `ExportKindSelector`. (#2274) +- Metrics API cleanup. The `metric/sdkapi` package has been created to relocate the API-to-SDK interface: + - The following interface types simply moved from `metric` to `metric/sdkapi`: `Descriptor`, `MeterImpl`, `InstrumentImpl`, `SyncImpl`, `BoundSyncImpl`, `AsyncImpl`, `AsyncRunner`, `AsyncSingleRunner`, and `AsyncBatchRunner` + - The following struct types moved and are replaced with type aliases, since they are exposed to the user: `Observation`, `Measurement`. + - The No-op implementations of sync and async instruments are no longer exported, new functions `sdkapi.NewNoopAsyncInstrument()` and `sdkapi.NewNoopSyncInstrument()` are provided instead. (#2271) +- Update the SDK `BatchSpanProcessor` to export all queued spans when `ForceFlush` is called. (#2080, #2335) + +### Added + +- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002) +- Added a new `schema` module to help parse Schema Files in OTEP 0152 format. (#2267) +- Added a new `MapCarrier` to the `go.opentelemetry.io/otel/propagation` package to hold propagated cross-cutting concerns as a `map[string]string` held in memory. (#2334) + +## [1.1.0] - 2021-10-27 + +### Added + +- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002) +- Add the `go.opentelemetry.io/otel/semconv/v1.7.0` package. + The package contains semantic conventions from the `v1.7.0` version of the OpenTelemetry specification. (#2320) +- Add the `go.opentelemetry.io/otel/semconv/v1.6.1` package. + The package contains semantic conventions from the `v1.6.1` version of the OpenTelemetry specification. (#2321) +- Add the `go.opentelemetry.io/otel/semconv/v1.5.0` package. + The package contains semantic conventions from the `v1.5.0` version of the OpenTelemetry specification. (#2322) + - When upgrading from the `semconv/v1.4.0` package note the following name changes: + - `K8SReplicasetUIDKey` -> `K8SReplicaSetUIDKey` + - `K8SReplicasetNameKey` -> `K8SReplicaSetNameKey` + - `K8SStatefulsetUIDKey` -> `K8SStatefulSetUIDKey` + - `k8SStatefulsetNameKey` -> `K8SStatefulSetNameKey` + - `K8SDaemonsetUIDKey` -> `K8SDaemonSetUIDKey` + - `K8SDaemonsetNameKey` -> `K8SDaemonSetNameKey` + +### Changed + +- Links added to a span will be dropped by the SDK if they contain an invalid span context (#2275). + +### Fixed + +- The `"go.opentelemetry.io/otel/semconv/v1.4.0".HTTPServerAttributesFromHTTPRequest` now correctly only sets the HTTP client IP attribute even if the connection was routed with proxies and there are multiple addresses in the `X-Forwarded-For` header. (#2282, #2284) +- The `"go.opentelemetry.io/otel/semconv/v1.4.0".NetAttributesFromHTTPRequest` function correctly handles IPv6 addresses as IP addresses and sets the correct net peer IP instead of the net peer hostname attribute. (#2283, #2285) +- The simple span processor shutdown method deterministically returns the exporter error status if it simultaneously finishes when the deadline is reached. (#2290, #2289) + +## [1.0.1] - 2021-10-01 + +### Fixed + +- json stdout exporter no longer crashes due to concurrency bug. (#2265) + +## [Metrics 0.24.0] - 2021-10-01 + +### Changed + +- NoopMeterProvider is now private and NewNoopMeterProvider must be used to obtain a noopMeterProvider. (#2237) +- The Metric SDK `Export()` function takes a new two-level reader interface for iterating over results one instrumentation library at a time. (#2197) + - The former `"go.opentelemetry.io/otel/sdk/export/metric".CheckpointSet` is renamed `Reader`. + - The new interface is named `"go.opentelemetry.io/otel/sdk/export/metric".InstrumentationLibraryReader`. + +## [1.0.0] - 2021-09-20 + +This is the first stable release for the project. +This release includes an API and SDK for the tracing signal that will comply with the stability guarantees defined by the projects [versioning policy](./VERSIONING.md). + +### Added + +- OTLP trace exporter now sets the `SchemaURL` field in the exported telemetry if the Tracer has `WithSchemaURL` option. (#2242) + +### Fixed + +- Slice-valued attributes can correctly be used as map keys. (#2223) + +### Removed + +- Removed the `"go.opentelemetry.io/otel/exporters/zipkin".WithSDKOptions` function. (#2248) +- Removed the deprecated package `go.opentelemetry.io/otel/oteltest`. (#2234) +- Removed the deprecated package `go.opentelemetry.io/otel/bridge/opencensus/utils`. (#2233) +- Removed deprecated functions, types, and methods from `go.opentelemetry.io/otel/attribute` package. + Use the typed functions and methods added to the package instead. (#2235) + - The `Key.Array` method is removed. + - The `Array` function is removed. + - The `Any` function is removed. + - The `ArrayValue` function is removed. + - The `AsArray` function is removed. + +## [1.0.0-RC3] - 2021-09-02 + +### Added + +- Added `ErrorHandlerFunc` to use a function as an `"go.opentelemetry.io/otel".ErrorHandler`. (#2149) +- Added `"go.opentelemetry.io/otel/trace".WithStackTrace` option to add a stack trace when using `span.RecordError` or when panic is handled in `span.End`. (#2163) +- Added typed slice attribute types and functionality to the `go.opentelemetry.io/otel/attribute` package to replace the existing array type and functions. (#2162) + - `BoolSlice`, `IntSlice`, `Int64Slice`, `Float64Slice`, and `StringSlice` replace the use of the `Array` function in the package. +- Added the `go.opentelemetry.io/otel/example/fib` example package. + Included is an example application that computes Fibonacci numbers. (#2203) + +### Changed + +- Metric instruments have been renamed to match the (feature-frozen) metric API specification: + - ValueRecorder becomes Histogram + - ValueObserver becomes Gauge + - SumObserver becomes CounterObserver + - UpDownSumObserver becomes UpDownCounterObserver + The API exported from this project is still considered experimental. (#2202) +- Metric SDK/API implementation type `InstrumentKind` moves into `sdkapi` sub-package. (#2091) +- The Metrics SDK export record no longer contains a Resource pointer, the SDK `"go.opentelemetry.io/otel/sdk/trace/export/metric".Exporter.Export()` function for push-based exporters now takes a single Resource argument, pull-based exporters use `"go.opentelemetry.io/otel/sdk/metric/controller/basic".Controller.Resource()`. (#2120) +- The JSON output of the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` is harmonized now such that the output is "plain" JSON objects after each other of the form `{ ... } { ... } { ... }`. Earlier the JSON objects describing a span were wrapped in a slice for each `Exporter.ExportSpans` call, like `[ { ... } ][ { ... } { ... } ]`. Outputting JSON object directly after each other is consistent with JSON loggers, and a bit easier to parse and read. (#2196) +- Update the `NewTracerConfig`, `NewSpanStartConfig`, `NewSpanEndConfig`, and `NewEventConfig` function in the `go.opentelemetry.io/otel/trace` package to return their respective configurations as structs instead of pointers to the struct. (#2212) + +### Deprecated + +- The `go.opentelemetry.io/otel/bridge/opencensus/utils` package is deprecated. + All functionality from this package now exists in the `go.opentelemetry.io/otel/bridge/opencensus` package. + The functions from that package should be used instead. (#2166) +- The `"go.opentelemetry.io/otel/attribute".Array` function and the related `ARRAY` value type is deprecated. + Use the typed `*Slice` functions and types added to the package instead. (#2162) +- The `"go.opentelemetry.io/otel/attribute".Any` function is deprecated. + Use the typed functions instead. (#2181) +- The `go.opentelemetry.io/otel/oteltest` package is deprecated. + The `"go.opentelemetry.io/otel/sdk/trace/tracetest".SpanRecorder` can be registered with the default SDK (`go.opentelemetry.io/otel/sdk/trace`) as a `SpanProcessor` and used as a replacement for this deprecated package. (#2188) + +### Removed + +- Removed metrics test package `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#2105) + +### Fixed + +- The `fromEnv` detector no longer throws an error when `OTEL_RESOURCE_ATTRIBUTES` environment variable is not set or empty. (#2138) +- Setting the global `ErrorHandler` with `"go.opentelemetry.io/otel".SetErrorHandler` multiple times is now supported. (#2160, #2140) +- The `"go.opentelemetry.io/otel/attribute".Any` function now supports `int32` values. (#2169) +- Multiple calls to `"go.opentelemetry.io/otel/sdk/metric/controller/basic".WithResource()` are handled correctly, and when no resources are provided `"go.opentelemetry.io/otel/sdk/resource".Default()` is used. (#2120) +- The `WithoutTimestamps` option for the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter causes the exporter to correctly omit timestamps. (#2195) +- Fixed typos in resources.go. (#2201) + +## [1.0.0-RC2] - 2021-07-26 + +### Added + +- Added `WithOSDescription` resource configuration option to set OS (Operating System) description resource attribute (`os.description`). (#1840) +- Added `WithOS` resource configuration option to set all OS (Operating System) resource attributes at once. (#1840) +- Added the `WithRetry` option to the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package. + This option is a replacement for the removed `WithMaxAttempts` and `WithBackoff` options. (#2095) +- Added API `LinkFromContext` to return Link which encapsulates SpanContext from provided context and also encapsulates attributes. (#2115) +- Added a new `Link` type under the SDK `otel/sdk/trace` package that counts the number of attributes that were dropped for surpassing the `AttributePerLinkCountLimit` configured in the Span's `SpanLimits`. + This new type replaces the equal-named API `Link` type found in the `otel/trace` package for most usages within the SDK. + For example, instances of this type are now returned by the `Links()` function of `ReadOnlySpan`s provided in places like the `OnEnd` function of `SpanProcessor` implementations. (#2118) +- Added the `SpanRecorder` type to the `go.opentelemetry.io/otel/skd/trace/tracetest` package. + This type can be used with the default SDK as a `SpanProcessor` during testing. (#2132) + +### Changed + +- The `SpanModels` function is now exported from the `go.opentelemetry.io/otel/exporters/zipkin` package to convert OpenTelemetry spans into Zipkin model spans. (#2027) +- Rename the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".RetrySettings` to `RetryConfig`. (#2095) + +### Deprecated + +- The `TextMapCarrier` and `TextMapPropagator` from the `go.opentelemetry.io/otel/oteltest` package and their associated creation functions (`TextMapCarrier`, `NewTextMapPropagator`) are deprecated. (#2114) +- The `Harness` type from the `go.opentelemetry.io/otel/oteltest` package and its associated creation function, `NewHarness` are deprecated and will be removed in the next release. (#2123) +- The `TraceStateFromKeyValues` function from the `go.opentelemetry.io/otel/oteltest` package is deprecated. + Use the `trace.ParseTraceState` function instead. (#2122) + +### Removed + +- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/jaeger`. (#2020) +- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/zipkin`. (#2020) +- Removed the `"go.opentelemetry.io/otel/sdk/resource".WithBuiltinDetectors` function. + The explicit `With*` options for every built-in detector should be used instead. (#2026 #2097) +- Removed the `WithMaxAttempts` and `WithBackoff` options from the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package. + The retry logic of the package has been updated to match the `otlptracegrpc` package and accordingly a `WithRetry` option is added that should be used instead. (#2095) +- Removed `DroppedAttributeCount` field from `otel/trace.Link` struct. (#2118) + +### Fixed + +- When using WithNewRoot, don't use the parent context for making sampling decisions. (#2032) +- `oteltest.Tracer` now creates a valid `SpanContext` when using `WithNewRoot`. (#2073) +- OS type detector now sets the correct `dragonflybsd` value for DragonFly BSD. (#2092) +- The OTel span status is correctly transformed into the OTLP status in the `go.opentelemetry.io/otel/exporters/otlp/otlptrace` package. + This fix will by default set the status to `Unset` if it is not explicitly set to `Ok` or `Error`. (#2099 #2102) +- The `Inject` method for the `"go.opentelemetry.io/otel/propagation".TraceContext` type no longer injects empty `tracestate` values. (#2108) +- Use `6831` as default Jaeger agent port instead of `6832`. (#2131) + +## [Experimental Metrics v0.22.0] - 2021-07-19 + +### Added + +- Adds HTTP support for OTLP metrics exporter. (#2022) + +### Removed + +- Removed the deprecated package `go.opentelemetry.io/otel/exporters/metric/prometheus`. (#2020) + +## [1.0.0-RC1] / 0.21.0 - 2021-06-18 + +With this release we are introducing a split in module versions. The tracing API and SDK are entering the `v1.0.0` Release Candidate phase with `v1.0.0-RC1` +while the experimental metrics API and SDK continue with `v0.x` releases at `v0.21.0`. Modules at major version 1 or greater will not depend on modules +with major version 0. + +### Added + +- Adds `otlpgrpc.WithRetry`option for configuring the retry policy for transient errors on the otlp/gRPC exporter. (#1832) + - The following status codes are defined as transient errors: + | gRPC Status Code | Description | + | ---------------- | ----------- | + | 1 | Cancelled | + | 4 | Deadline Exceeded | + | 8 | Resource Exhausted | + | 10 | Aborted | + | 10 | Out of Range | + | 14 | Unavailable | + | 15 | Data Loss | +- Added `Status` type to the `go.opentelemetry.io/otel/sdk/trace` package to represent the status of a span. (#1874) +- Added `SpanStub` type and its associated functions to the `go.opentelemetry.io/otel/sdk/trace/tracetest` package. + This type can be used as a testing replacement for the `SpanSnapshot` that was removed from the `go.opentelemetry.io/otel/sdk/trace` package. (#1873) +- Adds support for scheme in `OTEL_EXPORTER_OTLP_ENDPOINT` according to the spec. (#1886) +- Adds `trace.WithSchemaURL` option for configuring the tracer with a Schema URL. (#1889) +- Added an example of using OpenTelemetry Go as a trace context forwarder. (#1912) +- `ParseTraceState` is added to the `go.opentelemetry.io/otel/trace` package. + It can be used to decode a `TraceState` from a `tracestate` header string value. (#1937) +- Added `Len` method to the `TraceState` type in the `go.opentelemetry.io/otel/trace` package. + This method returns the number of list-members the `TraceState` holds. (#1937) +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace` that defines a trace exporter that uses a `otlptrace.Client` to send data. + Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` implementing a gRPC `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing .(#1922) +- Added `Baggage`, `Member`, and `Property` types to the `go.opentelemetry.io/otel/baggage` package along with their related functions. (#1967) +- Added `ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext` functions to the `go.opentelemetry.io/otel/baggage` package. + These functions replace the `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions from that package and directly work with the new `Baggage` type. (#1967) +- The `OTEL_SERVICE_NAME` environment variable is the preferred source for `service.name`, used by the environment resource detector if a service name is present both there and in `OTEL_RESOURCE_ATTRIBUTES`. (#1969) +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` implementing an HTTP `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing. (#1963) +- Changes `go.opentelemetry.io/otel/sdk/resource.NewWithAttributes` to require a schema URL. The old function is still available as `resource.NewSchemaless`. This is a breaking change. (#1938) +- Several builtin resource detectors now correctly populate the schema URL. (#1938) +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` that defines a metrics exporter that uses a `otlpmetric.Client` to send data. +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` implementing a gRPC `otlpmetric.Client` and offers convenience functions, `New` and `NewUnstarted`, to create an `otlpmetric.Exporter`.(#1991) +- Added `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter. (#2005) +- Added `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` exporter. (#2005) +- Added a `TracerProvider()` method to the `"go.opentelemetry.io/otel/trace".Span` interface. This can be used to obtain a `TracerProvider` from a given span that utilizes the same trace processing pipeline. (#2009) + +### Changed + +- Make `NewSplitDriver` from `go.opentelemetry.io/otel/exporters/otlp` take variadic arguments instead of a `SplitConfig` item. + `NewSplitDriver` now automatically implements an internal `noopDriver` for `SplitConfig` fields that are not initialized. (#1798) +- `resource.New()` now creates a Resource without builtin detectors. Previous behavior is now achieved by using `WithBuiltinDetectors` Option. (#1810) +- Move the `Event` type from the `go.opentelemetry.io/otel` package to the `go.opentelemetry.io/otel/sdk/trace` package. (#1846) +- CI builds validate against last two versions of Go, dropping 1.14 and adding 1.16. (#1865) +- BatchSpanProcessor now report export failures when calling `ForceFlush()` method. (#1860) +- `Set.Encoded(Encoder)` no longer caches the result of an encoding. (#1855) +- Renamed `CloudZoneKey` to `CloudAvailabilityZoneKey` in Resource semantic conventions according to spec. (#1871) +- The `StatusCode` and `StatusMessage` methods of the `ReadOnlySpan` interface and the `Span` produced by the `go.opentelemetry.io/otel/sdk/trace` package have been replaced with a single `Status` method. + This method returns the status of a span using the new `Status` type. (#1874) +- Updated `ExportSpans` method of the`SpanExporter` interface type to accept `ReadOnlySpan`s instead of the removed `SpanSnapshot`. + This brings the export interface into compliance with the specification in that it now accepts an explicitly immutable type instead of just an implied one. (#1873) +- Unembed `SpanContext` in `Link`. (#1877) +- Generate Semantic conventions from the specification YAML. (#1891) +- Spans created by the global `Tracer` obtained from `go.opentelemetry.io/otel`, prior to a functioning `TracerProvider` being set, now propagate the span context from their parent if one exists. (#1901) +- The `"go.opentelemetry.io/otel".Tracer` function now accepts tracer options. (#1902) +- Move the `go.opentelemetry.io/otel/unit` package to `go.opentelemetry.io/otel/metric/unit`. (#1903) +- Changed `go.opentelemetry.io/otel/trace.TracerConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config.) (#1921) +- Changed `go.opentelemetry.io/otel/trace.SpanConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) +- Changed `span.End()` now only accepts Options that are allowed at `End()`. (#1921) +- Changed `go.opentelemetry.io/otel/metric.InstrumentConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) +- Changed `go.opentelemetry.io/otel/metric.MeterConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) +- Refactored option types according to the contribution style guide. (#1882) +- Move the `go.opentelemetry.io/otel/trace.TraceStateFromKeyValues` function to the `go.opentelemetry.io/otel/oteltest` package. + This function is preserved for testing purposes where it may be useful to create a `TraceState` from `attribute.KeyValue`s, but it is not intended for production use. + The new `ParseTraceState` function should be used to create a `TraceState`. (#1931) +- Updated `MarshalJSON` method of the `go.opentelemetry.io/otel/trace.TraceState` type to marshal the type into the string representation of the `TraceState`. (#1931) +- The `TraceState.Delete` method from the `go.opentelemetry.io/otel/trace` package no longer returns an error in addition to a `TraceState`. (#1931) +- Updated `Get` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931) +- Updated `Insert` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a pair of `string`s instead of an `attribute.KeyValue` type. (#1931) +- Updated `Delete` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/stdout` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/metric/prometheus` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985) +- Renamed `NewUnstartedExporter` to `NewUnstarted` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985) +- The `go.opentelemetry.io/otel/semconv` package has been moved to `go.opentelemetry.io/otel/semconv/v1.4.0` to allow for multiple [telemetry schema](https://github.com/open-telemetry/oteps/blob/main/text/0152-telemetry-schemas.md) versions to be used concurrently. (#1987) +- Metrics test helpers in `go.opentelemetry.io/otel/oteltest` have been moved to `go.opentelemetry.io/otel/metric/metrictest`. (#1988) + +### Deprecated + +- The `go.opentelemetry.io/otel/exporters/metric/prometheus` is deprecated, use `go.opentelemetry.io/otel/exporters/prometheus` instead. (#1993) +- The `go.opentelemetry.io/otel/exporters/trace/jaeger` is deprecated, use `go.opentelemetry.io/otel/exporters/jaeger` instead. (#1993) +- The `go.opentelemetry.io/otel/exporters/trace/zipkin` is deprecated, use `go.opentelemetry.io/otel/exporters/zipkin` instead. (#1993) + +### Removed + +- Removed `resource.WithoutBuiltin()`. Use `resource.New()`. (#1810) +- Unexported types `resource.FromEnv`, `resource.Host`, and `resource.TelemetrySDK`, Use the corresponding `With*()` to use individually. (#1810) +- Removed the `Tracer` and `IsRecording` method from the `ReadOnlySpan` in the `go.opentelemetry.io/otel/sdk/trace`. + The `Tracer` method is not a required to be included in this interface and given the mutable nature of the tracer that is associated with a span, this method is not appropriate. + The `IsRecording` method returns if the span is recording or not. + A read-only span value does not need to know if updates to it will be recorded or not. + By definition, it cannot be updated so there is no point in communicating if an update is recorded. (#1873) +- Removed the `SpanSnapshot` type from the `go.opentelemetry.io/otel/sdk/trace` package. + The use of this type has been replaced with the use of the explicitly immutable `ReadOnlySpan` type. + When a concrete representation of a read-only span is needed for testing, the newly added `SpanStub` in the `go.opentelemetry.io/otel/sdk/trace/tracetest` package should be used. (#1873) +- Removed the `Tracer` method from the `Span` interface in the `go.opentelemetry.io/otel/trace` package. + Using the same tracer that created a span introduces the error where an instrumentation library's `Tracer` is used by other code instead of their own. + The `"go.opentelemetry.io/otel".Tracer` function or a `TracerProvider` should be used to acquire a library specific `Tracer` instead. (#1900) + - The `TracerProvider()` method on the `Span` interface may also be used to obtain a `TracerProvider` using the same trace processing pipeline. (#2009) +- The `http.url` attribute generated by `HTTPClientAttributesFromHTTPRequest` will no longer include username or password information. (#1919) +- Removed `IsEmpty` method of the `TraceState` type in the `go.opentelemetry.io/otel/trace` package in favor of using the added `TraceState.Len` method. (#1931) +- Removed `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions in the `go.opentelemetry.io/otel/baggage` package. + Handling of baggage is now done using the added `Baggage` type and related context functions (`ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext`) in that package. (#1967) +- The `InstallNewPipeline` and `NewExportPipeline` creation functions in all the exporters (prometheus, otlp, stdout, jaeger, and zipkin) have been removed. + These functions were deemed premature attempts to provide convenience that did not achieve this aim. (#1985) +- The `go.opentelemetry.io/otel/exporters/otlp` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace` instead. (#1990) +- The `go.opentelemetry.io/otel/exporters/stdout` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` or `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` instead. (#2005) + +### Fixed + +- Only report errors from the `"go.opentelemetry.io/otel/sdk/resource".Environment` function when they are not `nil`. (#1850, #1851) +- The `Shutdown` method of the simple `SpanProcessor` in the `go.opentelemetry.io/otel/sdk/trace` package now honors the context deadline or cancellation. (#1616, #1856) +- BatchSpanProcessor now drops span batches that failed to be exported. (#1860) +- Use `http://localhost:14268/api/traces` as default Jaeger collector endpoint instead of `http://localhost:14250`. (#1898) +- Allow trailing and leading whitespace in the parsing of a `tracestate` header. (#1931) +- Add logic to determine if the channel is closed to fix Jaeger exporter test panic with close closed channel. (#1870, #1973) +- Avoid transport security when OTLP endpoint is a Unix socket. (#2001) + +### Security + +## [0.20.0] - 2021-04-23 + +### Added + +- The OTLP exporter now has two new convenience functions, `NewExportPipeline` and `InstallNewPipeline`, setup and install the exporter in tracing and metrics pipelines. (#1373) +- Adds semantic conventions for exceptions. (#1492) +- Added Jaeger Environment variables: `OTEL_EXPORTER_JAEGER_AGENT_HOST`, `OTEL_EXPORTER_JAEGER_AGENT_PORT` + These environment variables can be used to override Jaeger agent hostname and port (#1752) +- Option `ExportTimeout` was added to batch span processor. (#1755) +- `trace.TraceFlags` is now a defined type over `byte` and `WithSampled(bool) TraceFlags` and `IsSampled() bool` methods have been added to it. (#1770) +- The `Event` and `Link` struct types from the `go.opentelemetry.io/otel` package now include a `DroppedAttributeCount` field to record the number of attributes that were not recorded due to configured limits being reached. (#1771) +- The Jaeger exporter now reports dropped attributes for a Span event in the exported log. (#1771) +- Adds test to check BatchSpanProcessor ignores `OnEnd` and `ForceFlush` post `Shutdown`. (#1772) +- Extract resource attributes from the `OTEL_RESOURCE_ATTRIBUTES` environment variable and merge them with the `resource.Default` resource as well as resources provided to the `TracerProvider` and metric `Controller`. (#1785) +- Added `WithOSType` resource configuration option to set OS (Operating System) type resource attribute (`os.type`). (#1788) +- Added `WithProcess*` resource configuration options to set Process resource attributes. (#1788) + - `process.pid` + - `process.executable.name` + - `process.executable.path` + - `process.command_args` + - `process.owner` + - `process.runtime.name` + - `process.runtime.version` + - `process.runtime.description` +- Adds `k8s.node.name` and `k8s.node.uid` attribute keys to the `semconv` package. (#1789) +- Added support for configuring OTLP/HTTP and OTLP/gRPC Endpoints, TLS Certificates, Headers, Compression and Timeout via Environment Variables. (#1758, #1769 and #1811) + - `OTEL_EXPORTER_OTLP_ENDPOINT` + - `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` + - `OTEL_EXPORTER_OTLP_METRICS_ENDPOINT` + - `OTEL_EXPORTER_OTLP_HEADERS` + - `OTEL_EXPORTER_OTLP_TRACES_HEADERS` + - `OTEL_EXPORTER_OTLP_METRICS_HEADERS` + - `OTEL_EXPORTER_OTLP_COMPRESSION` + - `OTEL_EXPORTER_OTLP_TRACES_COMPRESSION` + - `OTEL_EXPORTER_OTLP_METRICS_COMPRESSION` + - `OTEL_EXPORTER_OTLP_TIMEOUT` + - `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT` + - `OTEL_EXPORTER_OTLP_METRICS_TIMEOUT` + - `OTEL_EXPORTER_OTLP_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE` +- Adds `otlpgrpc.WithTimeout` option for configuring timeout to the otlp/gRPC exporter. (#1821) +- Adds `jaeger.WithMaxPacketSize` option for configuring maximum UDP packet size used when connecting to the Jaeger agent. (#1853) + +### Fixed + +- The `Span.IsRecording` implementation from `go.opentelemetry.io/otel/sdk/trace` always returns false when not being sampled. (#1750) +- The Jaeger exporter now correctly sets tags for the Span status code and message. + This means it uses the correct tag keys (`"otel.status_code"`, `"otel.status_description"`) and does not set the status message as a tag unless it is set on the span. (#1761) +- The Jaeger exporter now correctly records Span event's names using the `"event"` key for a tag. + Additionally, this tag is overridden, as specified in the OTel specification, if the event contains an attribute with that key. (#1768) +- Zipkin Exporter: Ensure mapping between OTel and Zipkin span data complies with the specification. (#1688) +- Fixed typo for default service name in Jaeger Exporter. (#1797) +- Fix flaky OTLP for the reconnnection of the client connection. (#1527, #1814) +- Fix Jaeger exporter dropping of span batches that exceed the UDP packet size limit. + Instead, the exporter now splits the batch into smaller sendable batches. (#1828) + +### Changed + +- Span `RecordError` now records an `exception` event to comply with the semantic convention specification. (#1492) +- Jaeger exporter was updated to use thrift v0.14.1. (#1712) +- Migrate from using internally built and maintained version of the OTLP to the one hosted at `go.opentelemetry.io/proto/otlp`. (#1713) +- Migrate from using `github.com/gogo/protobuf` to `google.golang.org/protobuf` to match `go.opentelemetry.io/proto/otlp`. (#1713) +- The storage of a local or remote Span in a `context.Context` using its SpanContext is unified to store just the current Span. + The Span's SpanContext can now self-identify as being remote or not. + This means that `"go.opentelemetry.io/otel/trace".ContextWithRemoteSpanContext` will now overwrite any existing current Span, not just existing remote Spans, and make it the current Span in a `context.Context`. (#1731) +- Improve OTLP/gRPC exporter connection errors. (#1737) +- Information about a parent span context in a `"go.opentelemetry.io/otel/export/trace".SpanSnapshot` is unified in a new `Parent` field. + The existing `ParentSpanID` and `HasRemoteParent` fields are removed in favor of this. (#1748) +- The `ParentContext` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is updated to hold a `context.Context` containing the parent span. + This changes it to make `SamplingParameters` conform with the OpenTelemetry specification. (#1749) +- Updated Jaeger Environment Variables: `JAEGER_ENDPOINT`, `JAEGER_USER`, `JAEGER_PASSWORD` + to `OTEL_EXPORTER_JAEGER_ENDPOINT`, `OTEL_EXPORTER_JAEGER_USER`, `OTEL_EXPORTER_JAEGER_PASSWORD` in compliance with OTel specification. (#1752) +- Modify `BatchSpanProcessor.ForceFlush` to abort after timeout/cancellation. (#1757) +- The `DroppedAttributeCount` field of the `Span` in the `go.opentelemetry.io/otel` package now only represents the number of attributes dropped for the span itself. + It no longer is a conglomerate of itself, events, and link attributes that have been dropped. (#1771) +- Make `ExportSpans` in Jaeger Exporter honor context deadline. (#1773) +- Modify Zipkin Exporter default service name, use default resource's serviceName instead of empty. (#1777) +- The `go.opentelemetry.io/otel/sdk/export/trace` package is merged into the `go.opentelemetry.io/otel/sdk/trace` package. (#1778) +- The prometheus.InstallNewPipeline example is moved from comment to example test (#1796) +- The convenience functions for the stdout exporter have been updated to return the `TracerProvider` implementation and enable the shutdown of the exporter. (#1800) +- Replace the flush function returned from the Jaeger exporter's convenience creation functions (`InstallNewPipeline` and `NewExportPipeline`) with the `TracerProvider` implementation they create. + This enables the caller to shutdown and flush using the related `TracerProvider` methods. (#1822) +- Updated the Jaeger exporter to have a default endpoint, `http://localhost:14250`, for the collector. (#1824) +- Changed the function `WithCollectorEndpoint` in the Jaeger exporter to no longer accept an endpoint as an argument. + The endpoint can be passed with the `CollectorEndpointOption` using the `WithEndpoint` function or by setting the `OTEL_EXPORTER_JAEGER_ENDPOINT` environment variable value appropriately. (#1824) +- The Jaeger exporter no longer batches exported spans itself, instead it relies on the SDK's `BatchSpanProcessor` for this functionality. (#1830) +- The Jaeger exporter creation functions (`NewRawExporter`, `NewExportPipeline`, and `InstallNewPipeline`) no longer accept the removed `Option` type as a variadic argument. (#1830) + +### Removed + +- Removed Jaeger Environment variables: `JAEGER_SERVICE_NAME`, `JAEGER_DISABLED`, `JAEGER_TAGS` + These environment variables will no longer be used to override values of the Jaeger exporter (#1752) +- No longer set the links for a `Span` in `go.opentelemetry.io/otel/sdk/trace` that is configured to be a new root. + This is unspecified behavior that the OpenTelemetry community plans to standardize in the future. + To prevent backwards incompatible changes when it is specified, these links are removed. (#1726) +- Setting error status while recording error with Span from oteltest package. (#1729) +- The concept of a remote and local Span stored in a context is unified to just the current Span. + Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed. + Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContex` can be used to return the current Span. + If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731) +- The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed. + This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749) +- The `trace.FlagsDebug` and `trace.FlagsDeferred` constants have been removed and will be localized to the B3 propagator. (#1770) +- Remove `Process` configuration, `WithProcessFromEnv` and `ProcessFromEnv`, and type from the Jaeger exporter package. + The information that could be configured in the `Process` struct should be configured in a `Resource` instead. (#1776, #1804) +- Remove the `WithDisabled` option from the Jaeger exporter. + To disable the exporter unregister it from the `TracerProvider` or use a no-operation `TracerProvider`. (#1806) +- Removed the functions `CollectorEndpointFromEnv` and `WithCollectorEndpointOptionFromEnv` from the Jaeger exporter. + These functions for retrieving specific environment variable values are redundant of other internal functions and + are not intended for end user use. (#1824) +- Removed the Jaeger exporter `WithSDKOptions` `Option`. + This option was used to set SDK options for the exporter creation convenience functions. + These functions are provided as a way to easily setup or install the exporter with what are deemed reasonable SDK settings for common use cases. + If the SDK needs to be configured differently, the `NewRawExporter` function and direct setup of the SDK with the desired settings should be used. (#1825) +- The `WithBufferMaxCount` and `WithBatchMaxCount` `Option`s from the Jaeger exporter are removed. + The exporter no longer batches exports, instead relying on the SDK's `BatchSpanProcessor` for this functionality. (#1830) +- The Jaeger exporter `Option` type is removed. + The type is no longer used by the exporter to configure anything. + All the previous configurations these options provided were duplicates of SDK configuration. + They have been removed in favor of using the SDK configuration and focuses the exporter configuration to be only about the endpoints it will send telemetry to. (#1830) + +## [0.19.0] - 2021-03-18 + +### Added + +- Added `Marshaler` config option to `otlphttp` to enable otlp over json or protobufs. (#1586) +- A `ForceFlush` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` to flush all registered `SpanProcessor`s. (#1608) +- Added `WithSampler` and `WithSpanLimits` to tracer provider. (#1633, #1702) +- `"go.opentelemetry.io/otel/trace".SpanContext` now has a `remote` property, and `IsRemote()` predicate, that is true when the `SpanContext` has been extracted from remote context data. (#1701) +- A `Valid` method to the `"go.opentelemetry.io/otel/attribute".KeyValue` type. (#1703) + +### Changed + +- `trace.SpanContext` is now immutable and has no exported fields. (#1573) + - `trace.NewSpanContext()` can be used in conjunction with the `trace.SpanContextConfig` struct to initialize a new `SpanContext` where all values are known. +- Update the `ForceFlush` method signature to the `"go.opentelemetry.io/otel/sdk/trace".SpanProcessor` to accept a `context.Context` and return an error. (#1608) +- Update the `Shutdown` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` return an error on shutdown failure. (#1608) +- The SimpleSpanProcessor will now shut down the enclosed `SpanExporter` and gracefully ignore subsequent calls to `OnEnd` after `Shutdown` is called. (#1612) +- `"go.opentelemetry.io/sdk/metric/controller.basic".WithPusher` is replaced with `WithExporter` to provide consistent naming across project. (#1656) +- Added non-empty string check for trace `Attribute` keys. (#1659) +- Add `description` to SpanStatus only when `StatusCode` is set to error. (#1662) +- Jaeger exporter falls back to `resource.Default`'s `service.name` if the exported Span does not have one. (#1673) +- Jaeger exporter populates Jaeger's Span Process from Resource. (#1673) +- Renamed the `LabelSet` method of `"go.opentelemetry.io/otel/sdk/resource".Resource` to `Set`. (#1692) +- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1693) +- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1693) + +### Removed + +- Removed `serviceName` parameter from Zipkin exporter and uses resource instead. (#1549) +- Removed `WithConfig` from tracer provider to avoid overriding configuration. (#1633) +- Removed the exported `SimpleSpanProcessor` and `BatchSpanProcessor` structs. + These are now returned as a SpanProcessor interface from their respective constructors. (#1638) +- Removed `WithRecord()` from `trace.SpanOption` when creating a span. (#1660) +- Removed setting status to `Error` while recording an error as a span event in `RecordError`. (#1663) +- Removed `jaeger.WithProcess` configuration option. (#1673) +- Removed `ApplyConfig` method from `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` and the now unneeded `Config` struct. (#1693) + +### Fixed + +- Jaeger Exporter: Ensure mapping between OTEL and Jaeger span data complies with the specification. (#1626) +- `SamplingResult.TraceState` is correctly propagated to a newly created span's `SpanContext`. (#1655) +- The `otel-collector` example now correctly flushes metric events prior to shutting down the exporter. (#1678) +- Do not set span status message in `SpanStatusFromHTTPStatusCode` if it can be inferred from `http.status_code`. (#1681) +- Synchronization issues in global trace delegate implementation. (#1686) +- Reduced excess memory usage by global `TracerProvider`. (#1687) + +## [0.18.0] - 2021-03-03 + +### Added + +- Added `resource.Default()` for use with meter and tracer providers. (#1507) +- `AttributePerEventCountLimit` and `AttributePerLinkCountLimit` for `SpanLimits`. (#1535) +- Added `Keys()` method to `propagation.TextMapCarrier` and `propagation.HeaderCarrier` to adapt `http.Header` to this interface. (#1544) +- Added `code` attributes to `go.opentelemetry.io/otel/semconv` package. (#1558) +- Compatibility testing suite in the CI system for the following systems. (#1567) + | OS | Go Version | Architecture | + | ------- | ---------- | ------------ | + | Ubuntu | 1.15 | amd64 | + | Ubuntu | 1.14 | amd64 | + | Ubuntu | 1.15 | 386 | + | Ubuntu | 1.14 | 386 | + | MacOS | 1.15 | amd64 | + | MacOS | 1.14 | amd64 | + | Windows | 1.15 | amd64 | + | Windows | 1.14 | amd64 | + | Windows | 1.15 | 386 | + | Windows | 1.14 | 386 | + +### Changed + +- Replaced interface `oteltest.SpanRecorder` with its existing implementation + `StandardSpanRecorder`. (#1542) +- Default span limit values to 128. (#1535) +- Rename `MaxEventsPerSpan`, `MaxAttributesPerSpan` and `MaxLinksPerSpan` to `EventCountLimit`, `AttributeCountLimit` and `LinkCountLimit`, and move these fields into `SpanLimits`. (#1535) +- Renamed the `otel/label` package to `otel/attribute`. (#1541) +- Vendor the Jaeger exporter's dependency on Apache Thrift. (#1551) +- Parallelize the CI linting and testing. (#1567) +- Stagger timestamps in exact aggregator tests. (#1569) +- Changed all examples to use `WithBatchTimeout(5 * time.Second)` rather than `WithBatchTimeout(5)`. (#1621) +- Prevent end-users from implementing some interfaces (#1575) + + ``` + "otel/exporters/otlp/otlphttp".Option + "otel/exporters/stdout".Option + "otel/oteltest".Option + "otel/trace".TracerOption + "otel/trace".SpanOption + "otel/trace".EventOption + "otel/trace".LifeCycleOption + "otel/trace".InstrumentationOption + "otel/sdk/resource".Option + "otel/sdk/trace".ParentBasedSamplerOption + "otel/sdk/trace".ReadOnlySpan + "otel/sdk/trace".ReadWriteSpan + ``` + +### Removed + +- Removed attempt to resample spans upon changing the span name with `span.SetName()`. (#1545) +- The `test-benchmark` is no longer a dependency of the `precommit` make target. (#1567) +- Removed the `test-386` make target. + This was replaced with a full compatibility testing suite (i.e. multi OS/arch) in the CI system. (#1567) + +### Fixed + +- The sequential timing check of timestamps in the stdout exporter are now setup explicitly to be sequential (#1571). (#1572) +- Windows build of Jaeger tests now compiles with OS specific functions (#1576). (#1577) +- The sequential timing check of timestamps of go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue are now setup explicitly to be sequential (#1578). (#1579) +- Validate tracestate header keys with vendors according to the W3C TraceContext specification (#1475). (#1581) +- The OTLP exporter includes related labels for translations of a GaugeArray (#1563). (#1570) + +## [0.17.0] - 2021-02-12 + +### Changed + +- Rename project default branch from `master` to `main`. (#1505) +- Reverse order in which `Resource` attributes are merged, per change in spec. (#1501) +- Add tooling to maintain "replace" directives in go.mod files automatically. (#1528) +- Create new modules: otel/metric, otel/trace, otel/oteltest, otel/sdk/export/metric, otel/sdk/metric (#1528) +- Move metric-related public global APIs from otel to otel/metric/global. (#1528) + +## Fixed + +- Fixed otlpgrpc reconnection issue. +- The example code in the README.md of `go.opentelemetry.io/otel/exporters/otlp` is moved to a compiled example test and used the new `WithAddress` instead of `WithEndpoint`. (#1513) +- The otel-collector example now uses the default OTLP receiver port of the collector. + +## [0.16.0] - 2021-01-13 + +### Added + +- Add the `ReadOnlySpan` and `ReadWriteSpan` interfaces to provide better control for accessing span data. (#1360) +- `NewGRPCDriver` function returns a `ProtocolDriver` that maintains a single gRPC connection to the collector. (#1369) +- Added documentation about the project's versioning policy. (#1388) +- Added `NewSplitDriver` for OTLP exporter that allows sending traces and metrics to different endpoints. (#1418) +- Added codeql workflow to GitHub Actions (#1428) +- Added Gosec workflow to GitHub Actions (#1429) +- Add new HTTP driver for OTLP exporter in `exporters/otlp/otlphttp`. Currently it only supports the binary protobuf payloads. (#1420) +- Add an OpenCensus exporter bridge. (#1444) + +### Changed + +- Rename `internal/testing` to `internal/internaltest`. (#1449) +- Rename `export.SpanData` to `export.SpanSnapshot` and use it only for exporting spans. (#1360) +- Store the parent's full `SpanContext` rather than just its span ID in the `span` struct. (#1360) +- Improve span duration accuracy. (#1360) +- Migrated CI/CD from CircleCI to GitHub Actions (#1382) +- Remove duplicate checkout from GitHub Actions workflow (#1407) +- Metric `array` aggregator renamed `exact` to match its `aggregation.Kind` (#1412) +- Metric `exact` aggregator includes per-point timestamps (#1412) +- Metric stdout exporter uses MinMaxSumCount aggregator for ValueRecorder instruments (#1412) +- `NewExporter` from `exporters/otlp` now takes a `ProtocolDriver` as a parameter. (#1369) +- Many OTLP Exporter options became gRPC ProtocolDriver options. (#1369) +- Unify endpoint API that related to OTel exporter. (#1401) +- Optimize metric histogram aggregator to re-use its slice of buckets. (#1435) +- Metric aggregator Count() and histogram Bucket.Counts are consistently `uint64`. (1430) +- Histogram aggregator accepts functional options, uses default boundaries if none given. (#1434) +- `SamplingResult` now passed a `Tracestate` from the parent `SpanContext` (#1432) +- Moved gRPC driver for OTLP exporter to `exporters/otlp/otlpgrpc`. (#1420) +- The `TraceContext` propagator now correctly propagates `TraceState` through the `SpanContext`. (#1447) +- Metric Push and Pull Controller components are combined into a single "basic" Controller: + - `WithExporter()` and `Start()` to configure Push behavior + - `Start()` is optional; use `Collect()` and `ForEach()` for Pull behavior + - `Start()` and `Stop()` accept Context. (#1378) +- The `Event` type is moved from the `otel/sdk/export/trace` package to the `otel/trace` API package. (#1452) + +### Removed + +- Remove `errUninitializedSpan` as its only usage is now obsolete. (#1360) +- Remove Metric export functionality related to quantiles and summary data points: this is not specified (#1412) +- Remove DDSketch metric aggregator; our intention is to re-introduce this as an option of the histogram aggregator after [new OTLP histogram data types](https://github.com/open-telemetry/opentelemetry-proto/pull/226) are released (#1412) + +### Fixed + +- `BatchSpanProcessor.Shutdown()` will now shutdown underlying `export.SpanExporter`. (#1443) + +## [0.15.0] - 2020-12-10 + +### Added + +- The `WithIDGenerator` `TracerProviderOption` is added to the `go.opentelemetry.io/otel/trace` package to configure an `IDGenerator` for the `TracerProvider`. (#1363) + +### Changed + +- The Zipkin exporter now uses the Span status code to determine. (#1328) +- `NewExporter` and `Start` functions in `go.opentelemetry.io/otel/exporters/otlp` now receive `context.Context` as a first parameter. (#1357) +- Move the OpenCensus example into `example` directory. (#1359) +- Moved the SDK's `internal.IDGenerator` interface in to the `sdk/trace` package to enable support for externally-defined ID generators. (#1363) +- Bump `github.com/google/go-cmp` from 0.5.3 to 0.5.4 (#1374) +- Bump `github.com/golangci/golangci-lint` in `/internal/tools` (#1375) + +### Fixed + +- Metric SDK `SumObserver` and `UpDownSumObserver` instruments correctness fixes. (#1381) + +## [0.14.0] - 2020-11-19 + +### Added + +- An `EventOption` and the related `NewEventConfig` function are added to the `go.opentelemetry.io/otel` package to configure Span events. (#1254) +- A `TextMapPropagator` and associated `TextMapCarrier` are added to the `go.opentelemetry.io/otel/oteltest` package to test `TextMap` type propagators and their use. (#1259) +- `SpanContextFromContext` returns `SpanContext` from context. (#1255) +- `TraceState` has been added to `SpanContext`. (#1340) +- `DeploymentEnvironmentKey` added to `go.opentelemetry.io/otel/semconv` package. (#1323) +- Add an OpenCensus to OpenTelemetry tracing bridge. (#1305) +- Add a parent context argument to `SpanProcessor.OnStart` to follow the specification. (#1333) +- Add missing tests for `sdk/trace/attributes_map.go`. (#1337) + +### Changed + +- Move the `go.opentelemetry.io/otel/api/trace` package into `go.opentelemetry.io/otel/trace` with the following changes. (#1229) (#1307) + - `ID` has been renamed to `TraceID`. + - `IDFromHex` has been renamed to `TraceIDFromHex`. + - `EmptySpanContext` is removed. +- Move the `go.opentelemetry.io/otel/api/trace/tracetest` package into `go.opentelemetry.io/otel/oteltest`. (#1229) +- OTLP Exporter updates: + - supports OTLP v0.6.0 (#1230, #1354) + - supports configurable aggregation temporality (default: Cumulative, optional: Stateless). (#1296) +- The Sampler is now called on local child spans. (#1233) +- The `Kind` type from the `go.opentelemetry.io/otel/api/metric` package was renamed to `InstrumentKind` to more specifically describe what it is and avoid semantic ambiguity. (#1240) +- The `MetricKind` method of the `Descriptor` type in the `go.opentelemetry.io/otel/api/metric` package was renamed to `Descriptor.InstrumentKind`. + This matches the returned type and fixes misuse of the term metric. (#1240) +- Move test harness from the `go.opentelemetry.io/otel/api/apitest` package into `go.opentelemetry.io/otel/oteltest`. (#1241) +- Move the `go.opentelemetry.io/otel/api/metric/metrictest` package into `go.opentelemetry.io/oteltest` as part of #964. (#1252) +- Move the `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric` as part of #1303. (#1321) +- Move the `go.opentelemetry.io/otel/api/metric/registry` package into `go.opentelemetry.io/otel/metric/registry` as a part of #1303. (#1316) +- Move the `Number` type (together with related functions) from `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric/number` as a part of #1303. (#1316) +- The function signature of the Span `AddEvent` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required name and a variable number of `EventOption`s. (#1254) +- The function signature of the Span `RecordError` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required error value and a variable number of `EventOption`s. (#1254) +- Move the `go.opentelemetry.io/otel/api/global` package to `go.opentelemetry.io/otel`. (#1262) (#1330) +- Move the `Version` function from `go.opentelemetry.io/otel/sdk` to `go.opentelemetry.io/otel`. (#1330) +- Rename correlation context header from `"otcorrelations"` to `"baggage"` to match the OpenTelemetry specification. (#1267) +- Fix `Code.UnmarshalJSON` to work with valid JSON only. (#1276) +- The `resource.New()` method changes signature to support builtin attributes and functional options, including `telemetry.sdk.*` and + `host.name` semantic conventions; the former method is renamed `resource.NewWithAttributes`. (#1235) +- The Prometheus exporter now exports non-monotonic counters (i.e. `UpDownCounter`s) as gauges. (#1210) +- Correct the `Span.End` method documentation in the `otel` API to state updates are not allowed on a span after it has ended. (#1310) +- Updated span collection limits for attribute, event and link counts to 1000 (#1318) +- Renamed `semconv.HTTPUrlKey` to `semconv.HTTPURLKey`. (#1338) + +### Removed + +- The `ErrInvalidHexID`, `ErrInvalidTraceIDLength`, `ErrInvalidSpanIDLength`, `ErrInvalidSpanIDLength`, or `ErrNilSpanID` from the `go.opentelemetry.io/otel` package are unexported now. (#1243) +- The `AddEventWithTimestamp` method on the `Span` interface in `go.opentelemetry.io/otel` is removed due to its redundancy. + It is replaced by using the `AddEvent` method with a `WithTimestamp` option. (#1254) +- The `MockSpan` and `MockTracer` types are removed from `go.opentelemetry.io/otel/oteltest`. + `Tracer` and `Span` from the same module should be used in their place instead. (#1306) +- `WorkerCount` option is removed from `go.opentelemetry.io/otel/exporters/otlp`. (#1350) +- Remove the following labels types: INT32, UINT32, UINT64 and FLOAT32. (#1314) + +### Fixed + +- Rename `MergeItererator` to `MergeIterator` in the `go.opentelemetry.io/otel/label` package. (#1244) +- The `go.opentelemetry.io/otel/api/global` packages global TextMapPropagator now delegates functionality to a globally set delegate for all previously returned propagators. (#1258) +- Fix condition in `label.Any`. (#1299) +- Fix global `TracerProvider` to pass options to its configured provider. (#1329) +- Fix missing handler for `ExactKind` aggregator in OTLP metrics transformer (#1309) + +## [0.13.0] - 2020-10-08 + +### Added + +- OTLP Metric exporter supports Histogram aggregation. (#1209) +- The `Code` struct from the `go.opentelemetry.io/otel/codes` package now supports JSON marshaling and unmarshaling as well as implements the `Stringer` interface. (#1214) +- A Baggage API to implement the OpenTelemetry specification. (#1217) +- Add Shutdown method to sdk/trace/provider, shutdown processors in the order they were registered. (#1227) + +### Changed + +- Set default propagator to no-op propagator. (#1184) +- The `HTTPSupplier`, `HTTPExtractor`, `HTTPInjector`, and `HTTPPropagator` from the `go.opentelemetry.io/otel/api/propagation` package were replaced with unified `TextMapCarrier` and `TextMapPropagator` in the `go.opentelemetry.io/otel/propagation` package. (#1212) (#1325) +- The `New` function from the `go.opentelemetry.io/otel/api/propagation` package was replaced with `NewCompositeTextMapPropagator` in the `go.opentelemetry.io/otel` package. (#1212) +- The status codes of the `go.opentelemetry.io/otel/codes` package have been updated to match the latest OpenTelemetry specification. + They now are `Unset`, `Error`, and `Ok`. + They no longer track the gRPC codes. (#1214) +- The `StatusCode` field of the `SpanData` struct in the `go.opentelemetry.io/otel/sdk/export/trace` package now uses the codes package from this package instead of the gRPC project. (#1214) +- Move the `go.opentelemetry.io/otel/api/baggage` package into `go.opentelemetry.io/otel/baggage`. (#1217) (#1325) +- A `Shutdown` method of `SpanProcessor` and all its implementations receives a context and returns an error. (#1264) + +### Fixed + +- Copies of data from arrays and slices passed to `go.opentelemetry.io/otel/label.ArrayValue()` are now used in the returned `Value` instead of using the mutable data itself. (#1226) + +### Removed + +- The `ExtractHTTP` and `InjectHTTP` functions from the `go.opentelemetry.io/otel/api/propagation` package were removed. (#1212) +- The `Propagators` interface from the `go.opentelemetry.io/otel/api/propagation` package was removed to conform to the OpenTelemetry specification. + The explicit `TextMapPropagator` type can be used in its place as this is the `Propagator` type the specification defines. (#1212) +- The `SetAttribute` method of the `Span` from the `go.opentelemetry.io/otel/api/trace` package was removed given its redundancy with the `SetAttributes` method. (#1216) +- The internal implementation of Baggage storage is removed in favor of using the new Baggage API functionality. (#1217) +- Remove duplicate hostname key `HostHostNameKey` in Resource semantic conventions. (#1219) +- Nested array/slice support has been removed. (#1226) + +## [0.12.0] - 2020-09-24 + +### Added + +- A `SpanConfigure` function in `go.opentelemetry.io/otel/api/trace` to create a new `SpanConfig` from `SpanOption`s. (#1108) +- In the `go.opentelemetry.io/otel/api/trace` package, `NewTracerConfig` was added to construct new `TracerConfig`s. + This addition was made to conform with our project option conventions. (#1155) +- Instrumentation library information was added to the Zipkin exporter. (#1119) +- The `SpanProcessor` interface now has a `ForceFlush()` method. (#1166) +- More semantic conventions for k8s as resource attributes. (#1167) + +### Changed + +- Add reconnecting udp connection type to Jaeger exporter. + This change adds a new optional implementation of the udp conn interface used to detect changes to an agent's host dns record. + It then adopts the new destination address to ensure the exporter doesn't get stuck. This change was ported from jaegertracing/jaeger-client-go#520. (#1063) +- Replace `StartOption` and `EndOption` in `go.opentelemetry.io/otel/api/trace` with `SpanOption`. + This change is matched by replacing the `StartConfig` and `EndConfig` with a unified `SpanConfig`. (#1108) +- Replace the `LinkedTo` span option in `go.opentelemetry.io/otel/api/trace` with `WithLinks`. + This is be more consistent with our other option patterns, i.e. passing the item to be configured directly instead of its component parts, and provides a cleaner function signature. (#1108) +- The `go.opentelemetry.io/otel/api/trace` `TracerOption` was changed to an interface to conform to project option conventions. (#1109) +- Move the `B3` and `TraceContext` from within the `go.opentelemetry.io/otel/api/trace` package to their own `go.opentelemetry.io/otel/propagators` package. + This removal of the propagators is reflective of the OpenTelemetry specification for these propagators as well as cleans up the `go.opentelemetry.io/otel/api/trace` API. (#1118) +- Rename Jaeger tags used for instrumentation library information to reflect changes in OpenTelemetry specification. (#1119) +- Rename `ProbabilitySampler` to `TraceIDRatioBased` and change semantics to ignore parent span sampling status. (#1115) +- Move `tools` package under `internal`. (#1141) +- Move `go.opentelemetry.io/otel/api/correlation` package to `go.opentelemetry.io/otel/api/baggage`. (#1142) + The `correlation.CorrelationContext` propagator has been renamed `baggage.Baggage`. Other exported functions and types are unchanged. +- Rename `ParentOrElse` sampler to `ParentBased` and allow setting samplers depending on parent span. (#1153) +- In the `go.opentelemetry.io/otel/api/trace` package, `SpanConfigure` was renamed to `NewSpanConfig`. (#1155) +- Change `dependabot.yml` to add a `Skip Changelog` label to dependabot-sourced PRs. (#1161) +- The [configuration style guide](https://github.com/open-telemetry/opentelemetry-go/blob/master/CONTRIBUTING.md#config) has been updated to + recommend the use of `newConfig()` instead of `configure()`. (#1163) +- The `otlp.Config` type has been unexported and changed to `otlp.config`, along with its initializer. (#1163) +- Ensure exported interface types include parameter names and update the + Style Guide to reflect this styling rule. (#1172) +- Don't consider unset environment variable for resource detection to be an error. (#1170) +- Rename `go.opentelemetry.io/otel/api/metric.ConfigureInstrument` to `NewInstrumentConfig` and + `go.opentelemetry.io/otel/api/metric.ConfigureMeter` to `NewMeterConfig`. +- ValueObserver instruments use LastValue aggregator by default. (#1165) +- OTLP Metric exporter supports LastValue aggregation. (#1165) +- Move the `go.opentelemetry.io/otel/api/unit` package to `go.opentelemetry.io/otel/unit`. (#1185) +- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190) +- Rename `NoopProvider` to `NoopMeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190) +- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metric/metrictest` package. (#1190) +- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric/registry` package. (#1190) +- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metri/registryc` package. (#1190) +- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190) +- Rename `NoopProvider` to `NoopTracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190) +- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190) +- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190) +- Rename `WrapperProvider` to `WrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190) +- Rename `NewWrapperProvider` to `NewWrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190) +- Rename `Provider` method of the pull controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/pull` package. (#1190) +- Rename `Provider` method of the push controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/push` package. (#1190) +- Rename `ProviderOptions` to `TracerProviderConfig` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Rename `ProviderOption` to `TracerProviderOption` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Renamed `SamplingDecision` values to comply with OpenTelemetry specification change. (#1192) +- Renamed Zipkin attribute names from `ot.status_code & ot.status_description` to `otel.status_code & otel.status_description`. (#1201) +- The default SDK now invokes registered `SpanProcessor`s in the order they were registered with the `TracerProvider`. (#1195) +- Add test of spans being processed by the `SpanProcessor`s in the order they were registered. (#1203) + +### Removed + +- Remove the B3 propagator from `go.opentelemetry.io/otel/propagators`. It is now located in the + `go.opentelemetry.io/contrib/propagators/` module. (#1191) +- Remove the semantic convention for HTTP status text, `HTTPStatusTextKey` from package `go.opentelemetry.io/otel/semconv`. (#1194) + +### Fixed + +- Zipkin example no longer mentions `ParentSampler`, corrected to `ParentBased`. (#1171) +- Fix missing shutdown processor in otel-collector example. (#1186) +- Fix missing shutdown processor in basic and namedtracer examples. (#1197) + +## [0.11.0] - 2020-08-24 + +### Added + +- Support for exporting array-valued attributes via OTLP. (#992) +- `Noop` and `InMemory` `SpanBatcher` implementations to help with testing integrations. (#994) +- Support for filtering metric label sets. (#1047) +- A dimensionality-reducing metric Processor. (#1057) +- Integration tests for more OTel Collector Attribute types. (#1062) +- A new `WithSpanProcessor` `ProviderOption` is added to the `go.opentelemetry.io/otel/sdk/trace` package to create a `Provider` and automatically register the `SpanProcessor`. (#1078) + +### Changed + +- Rename `sdk/metric/processor/test` to `sdk/metric/processor/processortest`. (#1049) +- Rename `sdk/metric/controller/test` to `sdk/metric/controller/controllertest`. (#1049) +- Rename `api/testharness` to `api/apitest`. (#1049) +- Rename `api/trace/testtrace` to `api/trace/tracetest`. (#1049) +- Change Metric Processor to merge multiple observations. (#1024) +- The `go.opentelemetry.io/otel/bridge/opentracing` bridge package has been made into its own module. + This removes the package dependencies of this bridge from the rest of the OpenTelemetry based project. (#1038) +- Renamed `go.opentelemetry.io/otel/api/standard` package to `go.opentelemetry.io/otel/semconv` to avoid the ambiguous and generic name `standard` and better describe the package as containing OpenTelemetry semantic conventions. (#1016) +- The environment variable used for resource detection has been changed from `OTEL_RESOURCE_LABELS` to `OTEL_RESOURCE_ATTRIBUTES` (#1042) +- Replace `WithSyncer` with `WithBatcher` in examples. (#1044) +- Replace the `google.golang.org/grpc/codes` dependency in the API with an equivalent `go.opentelemetry.io/otel/codes` package. (#1046) +- Merge the `go.opentelemetry.io/otel/api/label` and `go.opentelemetry.io/otel/api/kv` into the new `go.opentelemetry.io/otel/label` package. (#1060) +- Unify Callback Function Naming. + Rename `*Callback` with `*Func`. (#1061) +- CI builds validate against last two versions of Go, dropping 1.13 and adding 1.15. (#1064) +- The `go.opentelemetry.io/otel/sdk/export/trace` interfaces `SpanSyncer` and `SpanBatcher` have been replaced with a specification compliant `Exporter` interface. + This interface still supports the export of `SpanData`, but only as a slice. + Implementation are also required now to return any error from `ExportSpans` if one occurs as well as implement a `Shutdown` method for exporter clean-up. (#1078) +- The `go.opentelemetry.io/otel/sdk/trace` `NewBatchSpanProcessor` function no longer returns an error. + If a `nil` exporter is passed as an argument to this function, instead of it returning an error, it now returns a `BatchSpanProcessor` that handles the export of `SpanData` by not taking any action. (#1078) +- The `go.opentelemetry.io/otel/sdk/trace` `NewProvider` function to create a `Provider` no longer returns an error, instead only a `*Provider`. + This change is related to `NewBatchSpanProcessor` not returning an error which was the only error this function would return. (#1078) + +### Removed + +- Duplicate, unused API sampler interface. (#999) + Use the [`Sampler` interface](https://github.com/open-telemetry/opentelemetry-go/blob/v0.11.0/sdk/trace/sampling.go) provided by the SDK instead. +- The `grpctrace` instrumentation was moved to the `go.opentelemetry.io/contrib` repository and out of this repository. + This move includes moving the `grpc` example to the `go.opentelemetry.io/contrib` as well. (#1027) +- The `WithSpan` method of the `Tracer` interface. + The functionality this method provided was limited compared to what a user can provide themselves. + It was removed with the understanding that if there is sufficient user need it can be added back based on actual user usage. (#1043) +- The `RegisterSpanProcessor` and `UnregisterSpanProcessor` functions. + These were holdovers from an approach prior to the TracerProvider design. They were not used anymore. (#1077) +- The `oterror` package. (#1026) +- The `othttp` and `httptrace` instrumentations were moved to `go.opentelemetry.io/contrib`. (#1032) + +### Fixed + +- The `semconv.HTTPServerMetricAttributesFromHTTPRequest()` function no longer generates the high-cardinality `http.request.content.length` label. (#1031) +- Correct instrumentation version tag in Jaeger exporter. (#1037) +- The SDK span will now set an error event if the `End` method is called during a panic (i.e. it was deferred). (#1043) +- Move internally generated protobuf code from the `go.opentelemetry.io/otel` to the OTLP exporter to reduce dependency overhead. (#1050) +- The `otel-collector` example referenced outdated collector processors. (#1006) + +## [0.10.0] - 2020-07-29 + +This release migrates the default OpenTelemetry SDK into its own Go module, decoupling the SDK from the API and reducing dependencies for instrumentation packages. + +### Added + +- The Zipkin exporter now has `NewExportPipeline` and `InstallNewPipeline` constructor functions to match the common pattern. + These function build a new exporter with default SDK options and register the exporter with the `global` package respectively. (#944) +- Add propagator option for gRPC instrumentation. (#986) +- The `testtrace` package now tracks the `trace.SpanKind` for each span. (#987) + +### Changed + +- Replace the `RegisterGlobal` `Option` in the Jaeger exporter with an `InstallNewPipeline` constructor function. + This matches the other exporter constructor patterns and will register a new exporter after building it with default configuration. (#944) +- The trace (`go.opentelemetry.io/otel/exporters/trace/stdout`) and metric (`go.opentelemetry.io/otel/exporters/metric/stdout`) `stdout` exporters are now merged into a single exporter at `go.opentelemetry.io/otel/exporters/stdout`. + This new exporter was made into its own Go module to follow the pattern of all exporters and decouple it from the `go.opentelemetry.io/otel` module. (#956, #963) +- Move the `go.opentelemetry.io/otel/exporters/test` test package to `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#962) +- The `go.opentelemetry.io/otel/api/kv/value` package was merged into the parent `go.opentelemetry.io/otel/api/kv` package. (#968) + - `value.Bool` was replaced with `kv.BoolValue`. + - `value.Int64` was replaced with `kv.Int64Value`. + - `value.Uint64` was replaced with `kv.Uint64Value`. + - `value.Float64` was replaced with `kv.Float64Value`. + - `value.Int32` was replaced with `kv.Int32Value`. + - `value.Uint32` was replaced with `kv.Uint32Value`. + - `value.Float32` was replaced with `kv.Float32Value`. + - `value.String` was replaced with `kv.StringValue`. + - `value.Int` was replaced with `kv.IntValue`. + - `value.Uint` was replaced with `kv.UintValue`. + - `value.Array` was replaced with `kv.ArrayValue`. +- Rename `Infer` to `Any` in the `go.opentelemetry.io/otel/api/kv` package. (#972) +- Change `othttp` to use the `httpsnoop` package to wrap the `ResponseWriter` so that optional interfaces (`http.Hijacker`, `http.Flusher`, etc.) that are implemented by the original `ResponseWriter`are also implemented by the wrapped `ResponseWriter`. (#979) +- Rename `go.opentelemetry.io/otel/sdk/metric/aggregator/test` package to `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest`. (#980) +- Make the SDK into its own Go module called `go.opentelemetry.io/otel/sdk`. (#985) +- Changed the default trace `Sampler` from `AlwaysOn` to `ParentOrElse(AlwaysOn)`. (#989) + +### Removed + +- The `IndexedAttribute` function from the `go.opentelemetry.io/otel/api/label` package was removed in favor of `IndexedLabel` which it was synonymous with. (#970) + +### Fixed + +- Bump github.com/golangci/golangci-lint from 1.28.3 to 1.29.0 in /tools. (#953) +- Bump github.com/google/go-cmp from 0.5.0 to 0.5.1. (#957) +- Use `global.Handle` for span export errors in the OTLP exporter. (#946) +- Correct Go language formatting in the README documentation. (#961) +- Remove default SDK dependencies from the `go.opentelemetry.io/otel/api` package. (#977) +- Remove default SDK dependencies from the `go.opentelemetry.io/otel/instrumentation` package. (#983) +- Move documented examples for `go.opentelemetry.io/otel/instrumentation/grpctrace` interceptors into Go example tests. (#984) + +## [0.9.0] - 2020-07-20 + +### Added + +- A new Resource Detector interface is included to allow resources to be automatically detected and included. (#939) +- A Detector to automatically detect resources from an environment variable. (#939) +- Github action to generate protobuf Go bindings locally in `internal/opentelemetry-proto-gen`. (#938) +- OTLP .proto files from `open-telemetry/opentelemetry-proto` imported as a git submodule under `internal/opentelemetry-proto`. + References to `github.com/open-telemetry/opentelemetry-proto` changed to `go.opentelemetry.io/otel/internal/opentelemetry-proto-gen`. (#942) + +### Changed + +- Non-nil value `struct`s for key-value pairs will be marshalled using JSON rather than `Sprintf`. (#948) + +### Removed + +- Removed dependency on `github.com/open-telemetry/opentelemetry-collector`. (#943) + +## [0.8.0] - 2020-07-09 + +### Added + +- The `B3Encoding` type to represent the B3 encoding(s) the B3 propagator can inject. + A value for HTTP supported encodings (Multiple Header: `MultipleHeader`, Single Header: `SingleHeader`) are included. (#882) +- The `FlagsDeferred` trace flag to indicate if the trace sampling decision has been deferred. (#882) +- The `FlagsDebug` trace flag to indicate if the trace is a debug trace. (#882) +- Add `peer.service` semantic attribute. (#898) +- Add database-specific semantic attributes. (#899) +- Add semantic convention for `faas.coldstart` and `container.id`. (#909) +- Add http content size semantic conventions. (#905) +- Include `http.request_content_length` in HTTP request basic attributes. (#905) +- Add semantic conventions for operating system process resource attribute keys. (#919) +- The Jaeger exporter now has a `WithBatchMaxCount` option to specify the maximum number of spans sent in a batch. (#931) + +### Changed + +- Update `CONTRIBUTING.md` to ask for updates to `CHANGELOG.md` with each pull request. (#879) +- Use lowercase header names for B3 Multiple Headers. (#881) +- The B3 propagator `SingleHeader` field has been replaced with `InjectEncoding`. + This new field can be set to combinations of the `B3Encoding` bitmasks and will inject trace information in these encodings. + If no encoding is set, the propagator will default to `MultipleHeader` encoding. (#882) +- The B3 propagator now extracts from either HTTP encoding of B3 (Single Header or Multiple Header) based on what is contained in the header. + Preference is given to Single Header encoding with Multiple Header being the fallback if Single Header is not found or is invalid. + This behavior change is made to dynamically support all correctly encoded traces received instead of having to guess the expected encoding prior to receiving. (#882) +- Extend semantic conventions for RPC. (#900) +- To match constant naming conventions in the `api/standard` package, the `FaaS*` key names are appended with a suffix of `Key`. (#920) + - `"api/standard".FaaSName` -> `FaaSNameKey` + - `"api/standard".FaaSID` -> `FaaSIDKey` + - `"api/standard".FaaSVersion` -> `FaaSVersionKey` + - `"api/standard".FaaSInstance` -> `FaaSInstanceKey` + +### Removed + +- The `FlagsUnused` trace flag is removed. + The purpose of this flag was to act as the inverse of `FlagsSampled`, the inverse of `FlagsSampled` is used instead. (#882) +- The B3 header constants (`B3SingleHeader`, `B3DebugFlagHeader`, `B3TraceIDHeader`, `B3SpanIDHeader`, `B3SampledHeader`, `B3ParentSpanIDHeader`) are removed. + If B3 header keys are needed [the authoritative OpenZipkin package constants](https://pkg.go.dev/github.com/openzipkin/zipkin-go@v0.2.2/propagation/b3?tab=doc#pkg-constants) should be used instead. (#882) + +### Fixed + +- The B3 Single Header name is now correctly `b3` instead of the previous `X-B3`. (#881) +- The B3 propagator now correctly supports sampling only values (`b3: 0`, `b3: 1`, or `b3: d`) for a Single B3 Header. (#882) +- The B3 propagator now propagates the debug flag. + This removes the behavior of changing the debug flag into a set sampling bit. + Instead, this now follow the B3 specification and omits the `X-B3-Sampling` header. (#882) +- The B3 propagator now tracks "unset" sampling state (meaning "defer the decision") and does not set the `X-B3-Sampling` header when injecting. (#882) +- Bump github.com/itchyny/gojq from 0.10.3 to 0.10.4 in /tools. (#883) +- Bump github.com/opentracing/opentracing-go from v1.1.1-0.20190913142402-a7454ce5950e to v1.2.0. (#885) +- The tracing time conversion for OTLP spans is now correctly set to `UnixNano`. (#896) +- Ensure span status is not set to `Unknown` when no HTTP status code is provided as it is assumed to be `200 OK`. (#908) +- Ensure `httptrace.clientTracer` closes `http.headers` span. (#912) +- Prometheus exporter will not apply stale updates or forget inactive metrics. (#903) +- Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905) +- Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913) +- Update otel-colector example to use the v0.5.0 collector. (#915) +- The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922) +- The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922) +- The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists. + This is in accordance with OpenTelemetry semantic conventions. (#922) +- Correlation Context extractor will no longer insert an empty map into the returned context when no valid values are extracted. (#923) +- Bump google.golang.org/api from 0.28.0 to 0.29.0 in /exporters/trace/jaeger. (#925) +- Bump github.com/itchyny/gojq from 0.10.4 to 0.11.0 in /tools. (#926) +- Bump github.com/golangci/golangci-lint from 1.28.1 to 1.28.2 in /tools. (#930) + +## [0.7.0] - 2020-06-26 + +This release implements the v0.5.0 version of the OpenTelemetry specification. + +### Added + +- The othttp instrumentation now includes default metrics. (#861) +- This CHANGELOG file to track all changes in the project going forward. +- Support for array type attributes. (#798) +- Apply transitive dependabot go.mod dependency updates as part of a new automatic Github workflow. (#844) +- Timestamps are now passed to exporters for each export. (#835) +- Add new `Accumulation` type to metric SDK to transport telemetry from `Accumulator`s to `Processor`s. + This replaces the prior `Record` `struct` use for this purpose. (#835) +- New dependabot integration to automate package upgrades. (#814) +- `Meter` and `Tracer` implementations accept instrumentation version version as an optional argument. + This instrumentation version is passed on to exporters. (#811) (#805) (#802) +- The OTLP exporter includes the instrumentation version in telemetry it exports. (#811) +- Environment variables for Jaeger exporter are supported. (#796) +- New `aggregation.Kind` in the export metric API. (#808) +- New example that uses OTLP and the collector. (#790) +- Handle errors in the span `SetName` during span initialization. (#791) +- Default service config to enable retries for retry-able failed requests in the OTLP exporter and an option to override this default. (#777) +- New `go.opentelemetry.io/otel/api/oterror` package to uniformly support error handling and definitions for the project. (#778) +- New `global` default implementation of the `go.opentelemetry.io/otel/api/oterror.Handler` interface to be used to handle errors prior to an user defined `Handler`. + There is also functionality for the user to register their `Handler` as well as a convenience function `Handle` to handle an error with this global `Handler`(#778) +- Options to specify propagators for httptrace and grpctrace instrumentation. (#784) +- The required `application/json` header for the Zipkin exporter is included in all exports. (#774) +- Integrate HTTP semantics helpers from the contrib repository into the `api/standard` package. #769 + +### Changed + +- Rename `Integrator` to `Processor` in the metric SDK. (#863) +- Rename `AggregationSelector` to `AggregatorSelector`. (#859) +- Rename `SynchronizedCopy` to `SynchronizedMove`. (#858) +- Rename `simple` integrator to `basic` integrator. (#857) +- Merge otlp collector examples. (#841) +- Change the metric SDK to support cumulative, delta, and pass-through exporters directly. + With these changes, cumulative and delta specific exporters are able to request the correct kind of aggregation from the SDK. (#840) +- The `Aggregator.Checkpoint` API is renamed to `SynchronizedCopy` and adds an argument, a different `Aggregator` into which the copy is stored. (#812) +- The `export.Aggregator` contract is that `Update()` and `SynchronizedCopy()` are synchronized with each other. + All the aggregation interfaces (`Sum`, `LastValue`, ...) are not meant to be synchronized, as the caller is expected to synchronize aggregators at a higher level after the `Accumulator`. + Some of the `Aggregators` used unnecessary locking and that has been cleaned up. (#812) +- Use of `metric.Number` was replaced by `int64` now that we use `sync.Mutex` in the `MinMaxSumCount` and `Histogram` `Aggregators`. (#812) +- Replace `AlwaysParentSample` with `ParentSample(fallback)` to match the OpenTelemetry v0.5.0 specification. (#810) +- Rename `sdk/export/metric/aggregator` to `sdk/export/metric/aggregation`. #808 +- Send configured headers with every request in the OTLP exporter, instead of just on connection creation. (#806) +- Update error handling for any one off error handlers, replacing, instead, with the `global.Handle` function. (#791) +- Rename `plugin` directory to `instrumentation` to match the OpenTelemetry specification. (#779) +- Makes the argument order to Histogram and DDSketch `New()` consistent. (#781) + +### Removed + +- `Uint64NumberKind` and related functions from the API. (#864) +- Context arguments from `Aggregator.Checkpoint` and `Integrator.Process` as they were unused. (#803) +- `SpanID` is no longer included in parameters for sampling decision to match the OpenTelemetry specification. (#775) + +### Fixed + +- Upgrade OTLP exporter to opentelemetry-proto matching the opentelemetry-collector v0.4.0 release. (#866) +- Allow changes to `go.sum` and `go.mod` when running dependabot tidy-up. (#871) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1. (#824) +- Bump github.com/prometheus/client_golang from 1.7.0 to 1.7.1 in /exporters/metric/prometheus. (#867) +- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/jaeger. (#853) +- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/zipkin. (#854) +- Bumps github.com/golang/protobuf from 1.3.2 to 1.4.2 (#848) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/otlp (#817) +- Bump github.com/golangci/golangci-lint from 1.25.1 to 1.27.0 in /tools (#828) +- Bump github.com/prometheus/client_golang from 1.5.0 to 1.7.0 in /exporters/metric/prometheus (#838) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/jaeger (#829) +- Bump github.com/benbjohnson/clock from 1.0.0 to 1.0.3 (#815) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/zipkin (#823) +- Bump github.com/itchyny/gojq from 0.10.1 to 0.10.3 in /tools (#830) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/metric/prometheus (#822) +- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/zipkin (#820) +- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/jaeger (#831) +- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 (#836) +- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/trace/jaeger (#837) +- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/otlp (#839) +- Bump google.golang.org/api from 0.20.0 to 0.28.0 in /exporters/trace/jaeger (#843) +- Set span status from HTTP status code in the othttp instrumentation. (#832) +- Fixed typo in push controller comment. (#834) +- The `Aggregator` testing has been updated and cleaned. (#812) +- `metric.Number(0)` expressions are replaced by `0` where possible. (#812) +- Fixed `global` `handler_test.go` test failure. #804 +- Fixed `BatchSpanProcessor.Shutdown` to wait until all spans are processed. (#766) +- Fixed OTLP example's accidental early close of exporter. (#807) +- Ensure zipkin exporter reads and closes response body. (#788) +- Update instrumentation to use `api/standard` keys instead of custom keys. (#782) +- Clean up tools and RELEASING documentation. (#762) + +## [0.6.0] - 2020-05-21 + +### Added + +- Support for `Resource`s in the prometheus exporter. (#757) +- New pull controller. (#751) +- New `UpDownSumObserver` instrument. (#750) +- OpenTelemetry collector demo. (#711) +- New `SumObserver` instrument. (#747) +- New `UpDownCounter` instrument. (#745) +- New timeout `Option` and configuration function `WithTimeout` to the push controller. (#742) +- New `api/standards` package to implement semantic conventions and standard key-value generation. (#731) + +### Changed + +- Rename `Register*` functions in the metric API to `New*` for all `Observer` instruments. (#761) +- Use `[]float64` for histogram boundaries, not `[]metric.Number`. (#758) +- Change OTLP example to use exporter as a trace `Syncer` instead of as an unneeded `Batcher`. (#756) +- Replace `WithResourceAttributes()` with `WithResource()` in the trace SDK. (#754) +- The prometheus exporter now uses the new pull controller. (#751) +- Rename `ScheduleDelayMillis` to `BatchTimeout` in the trace `BatchSpanProcessor`.(#752) +- Support use of synchronous instruments in asynchronous callbacks (#725) +- Move `Resource` from the `Export` method parameter into the metric export `Record`. (#739) +- Rename `Observer` instrument to `ValueObserver`. (#734) +- The push controller now has a method (`Provider()`) to return a `metric.Provider` instead of the old `Meter` method that acted as a `metric.Provider`. (#738) +- Replace `Measure` instrument by `ValueRecorder` instrument. (#732) +- Rename correlation context header from `"Correlation-Context"` to `"otcorrelations"` to match the OpenTelemetry specification. (#727) + +### Fixed + +- Ensure gRPC `ClientStream` override methods do not panic in grpctrace package. (#755) +- Disable parts of `BatchSpanProcessor` test until a fix is found. (#743) +- Fix `string` case in `kv` `Infer` function. (#746) +- Fix panic in grpctrace client interceptors. (#740) +- Refactor the `api/metrics` push controller and add `CheckpointSet` synchronization. (#737) +- Rewrite span batch process queue batching logic. (#719) +- Remove the push controller named Meter map. (#738) +- Fix Histogram aggregator initial state (fix #735). (#736) +- Ensure golang alpine image is running `golang-1.14` for examples. (#733) +- Added test for grpctrace `UnaryInterceptorClient`. (#695) +- Rearrange `api/metric` code layout. (#724) + +## [0.5.0] - 2020-05-13 + +### Added + +- Batch `Observer` callback support. (#717) +- Alias `api` types to root package of project. (#696) +- Create basic `othttp.Transport` for simple client instrumentation. (#678) +- `SetAttribute(string, interface{})` to the trace API. (#674) +- Jaeger exporter option that allows user to specify custom http client. (#671) +- `Stringer` and `Infer` methods to `key`s. (#662) + +### Changed + +- Rename `NewKey` in the `kv` package to just `Key`. (#721) +- Move `core` and `key` to `kv` package. (#720) +- Make the metric API `Meter` a `struct` so the abstract `MeterImpl` can be passed and simplify implementation. (#709) +- Rename SDK `Batcher` to `Integrator` to match draft OpenTelemetry SDK specification. (#710) +- Rename SDK `Ungrouped` integrator to `simple.Integrator` to match draft OpenTelemetry SDK specification. (#710) +- Rename SDK `SDK` `struct` to `Accumulator` to match draft OpenTelemetry SDK specification. (#710) +- Move `Number` from `core` to `api/metric` package. (#706) +- Move `SpanContext` from `core` to `trace` package. (#692) +- Change traceparent header from `Traceparent` to `traceparent` to implement the W3C specification. (#681) + +### Fixed + +- Update tooling to run generators in all submodules. (#705) +- gRPC interceptor regexp to match methods without a service name. (#683) +- Use a `const` for padding 64-bit B3 trace IDs. (#701) +- Update `mockZipkin` listen address from `:0` to `127.0.0.1:0`. (#700) +- Left-pad 64-bit B3 trace IDs with zero. (#698) +- Propagate at least the first W3C tracestate header. (#694) +- Remove internal `StateLocker` implementation. (#688) +- Increase instance size CI system uses. (#690) +- Add a `key` benchmark and use reflection in `key.Infer()`. (#679) +- Fix internal `global` test by using `global.Meter` with `RecordBatch()`. (#680) +- Reimplement histogram using mutex instead of `StateLocker`. (#669) +- Switch `MinMaxSumCount` to a mutex lock implementation instead of `StateLocker`. (#667) +- Update documentation to not include any references to `WithKeys`. (#672) +- Correct misspelling. (#668) +- Fix clobbering of the span context if extraction fails. (#656) +- Bump `golangci-lint` and work around the corrupting bug. (#666) (#670) + +## [0.4.3] - 2020-04-24 + +### Added + +- `Dockerfile` and `docker-compose.yml` to run example code. (#635) +- New `grpctrace` package that provides gRPC client and server interceptors for both unary and stream connections. (#621) +- New `api/label` package, providing common label set implementation. (#651) +- Support for JSON marshaling of `Resources`. (#654) +- `TraceID` and `SpanID` implementations for `Stringer` interface. (#642) +- `RemoteAddrKey` in the othttp plugin to include the HTTP client address in top-level spans. (#627) +- `WithSpanFormatter` option to the othttp plugin. (#617) +- Updated README to include section for compatible libraries and include reference to the contrib repository. (#612) +- The prometheus exporter now supports exporting histograms. (#601) +- A `String` method to the `Resource` to return a hashable identifier for a now unique resource. (#613) +- An `Iter` method to the `Resource` to return an array `AttributeIterator`. (#613) +- An `Equal` method to the `Resource` test the equivalence of resources. (#613) +- An iterable structure (`AttributeIterator`) for `Resource` attributes. + +### Changed + +- zipkin export's `NewExporter` now requires a `serviceName` argument to ensure this needed values is provided. (#644) +- Pass `Resources` through the metrics export pipeline. (#659) + +### Removed + +- `WithKeys` option from the metric API. (#639) + +### Fixed + +- Use the `label.Set.Equivalent` value instead of an encoding in the batcher. (#658) +- Correct typo `trace.Exporter` to `trace.SpanSyncer` in comments. (#653) +- Use type names for return values in jaeger exporter. (#648) +- Increase the visibility of the `api/key` package by updating comments and fixing usages locally. (#650) +- `Checkpoint` only after `Update`; Keep records in the `sync.Map` longer. (#647) +- Do not cache `reflect.ValueOf()` in metric Labels. (#649) +- Batch metrics exported from the OTLP exporter based on `Resource` and labels. (#626) +- Add error wrapping to the prometheus exporter. (#631) +- Update the OTLP exporter batching of traces to use a unique `string` representation of an associated `Resource` as the batching key. (#623) +- Update OTLP `SpanData` transform to only include the `ParentSpanID` if one exists. (#614) +- Update `Resource` internal representation to uniquely and reliably identify resources. (#613) +- Check return value from `CheckpointSet.ForEach` in prometheus exporter. (#622) +- Ensure spans created by httptrace client tracer reflect operation structure. (#618) +- Create a new recorder rather than reuse when multiple observations in same epoch for asynchronous instruments. #610 +- The default port the OTLP exporter uses to connect to the OpenTelemetry collector is updated to match the one the collector listens on by default. (#611) + +## [0.4.2] - 2020-03-31 + +### Fixed + +- Fix `pre_release.sh` to update version in `sdk/opentelemetry.go`. (#607) +- Fix time conversion from internal to OTLP in OTLP exporter. (#606) + +## [0.4.1] - 2020-03-31 + +### Fixed + +- Update `tag.sh` to create signed tags. (#604) + +## [0.4.0] - 2020-03-30 + +### Added + +- New API package `api/metric/registry` that exposes a `MeterImpl` wrapper for use by SDKs to generate unique instruments. (#580) +- Script to verify examples after a new release. (#579) + +### Removed + +- The dogstatsd exporter due to lack of support. + This additionally removes support for statsd. (#591) +- `LabelSet` from the metric API. + This is replaced by a `[]core.KeyValue` slice. (#595) +- `Labels` from the metric API's `Meter` interface. (#595) + +### Changed + +- The metric `export.Labels` became an interface which the SDK implements and the `export` package provides a simple, immutable implementation of this interface intended for testing purposes. (#574) +- Renamed `internal/metric.Meter` to `MeterImpl`. (#580) +- Renamed `api/global/internal.obsImpl` to `asyncImpl`. (#580) + +### Fixed + +- Corrected missing return in mock span. (#582) +- Update License header for all source files to match CNCF guidelines and include a test to ensure it is present. (#586) (#596) +- Update to v0.3.0 of the OTLP in the OTLP exporter. (#588) +- Update pre-release script to be compatible between GNU and BSD based systems. (#592) +- Add a `RecordBatch` benchmark. (#594) +- Moved span transforms of the OTLP exporter to the internal package. (#593) +- Build both go-1.13 and go-1.14 in circleci to test for all supported versions of Go. (#569) +- Removed unneeded allocation on empty labels in OLTP exporter. (#597) +- Update `BatchedSpanProcessor` to process the queue until no data but respect max batch size. (#599) +- Update project documentation godoc.org links to pkg.go.dev. (#602) + +## [0.3.0] - 2020-03-21 + +This is a first official beta release, which provides almost fully complete metrics, tracing, and context propagation functionality. +There is still a possibility of breaking changes. + +### Added + +- Add `Observer` metric instrument. (#474) +- Add global `Propagators` functionality to enable deferred initialization for propagators registered before the first Meter SDK is installed. (#494) +- Simplified export setup pipeline for the jaeger exporter to match other exporters. (#459) +- The zipkin trace exporter. (#495) +- The OTLP exporter to export metric and trace telemetry to the OpenTelemetry collector. (#497) (#544) (#545) +- Add `StatusMessage` field to the trace `Span`. (#524) +- Context propagation in OpenTracing bridge in terms of OpenTelemetry context propagation. (#525) +- The `Resource` type was added to the SDK. (#528) +- The global API now supports a `Tracer` and `Meter` function as shortcuts to getting a global `*Provider` and calling these methods directly. (#538) +- The metric API now defines a generic `MeterImpl` interface to support general purpose `Meter` construction. + Additionally, `SyncImpl` and `AsyncImpl` are added to support general purpose instrument construction. (#560) +- A metric `Kind` is added to represent the `MeasureKind`, `ObserverKind`, and `CounterKind`. (#560) +- Scripts to better automate the release process. (#576) + +### Changed + +- Default to to use `AlwaysSampler` instead of `ProbabilitySampler` to match OpenTelemetry specification. (#506) +- Renamed `AlwaysSampleSampler` to `AlwaysOnSampler` in the trace API. (#511) +- Renamed `NeverSampleSampler` to `AlwaysOffSampler` in the trace API. (#511) +- The `Status` field of the `Span` was changed to `StatusCode` to disambiguate with the added `StatusMessage`. (#524) +- Updated the trace `Sampler` interface conform to the OpenTelemetry specification. (#531) +- Rename metric API `Options` to `Config`. (#541) +- Rename metric `Counter` aggregator to be `Sum`. (#541) +- Unify metric options into `Option` from instrument specific options. (#541) +- The trace API's `TraceProvider` now support `Resource`s. (#545) +- Correct error in zipkin module name. (#548) +- The jaeger trace exporter now supports `Resource`s. (#551) +- Metric SDK now supports `Resource`s. + The `WithResource` option was added to configure a `Resource` on creation and the `Resource` method was added to the metric `Descriptor` to return the associated `Resource`. (#552) +- Replace `ErrNoLastValue` and `ErrEmptyDataSet` by `ErrNoData` in the metric SDK. (#557) +- The stdout trace exporter now supports `Resource`s. (#558) +- The metric `Descriptor` is now included at the API instead of the SDK. (#560) +- Replace `Ordered` with an iterator in `export.Labels`. (#567) + +### Removed + +- The vendor specific Stackdriver. It is now hosted on 3rd party vendor infrastructure. (#452) +- The `Unregister` method for metric observers as it is not in the OpenTelemetry specification. (#560) +- `GetDescriptor` from the metric SDK. (#575) +- The `Gauge` instrument from the metric API. (#537) + +### Fixed + +- Make histogram aggregator checkpoint consistent. (#438) +- Update README with import instructions and how to build and test. (#505) +- The default label encoding was updated to be unique. (#508) +- Use `NewRoot` in the othttp plugin for public endpoints. (#513) +- Fix data race in `BatchedSpanProcessor`. (#518) +- Skip test-386 for Mac OS 10.15.x (Catalina and upwards). #521 +- Use a variable-size array to represent ordered labels in maps. (#523) +- Update the OTLP protobuf and update changed import path. (#532) +- Use `StateLocker` implementation in `MinMaxSumCount`. (#546) +- Eliminate goroutine leak in histogram stress test. (#547) +- Update OTLP exporter with latest protobuf. (#550) +- Add filters to the othttp plugin. (#556) +- Provide an implementation of the `Header*` filters that do not depend on Go 1.14. (#565) +- Encode labels once during checkpoint. + The checkpoint function is executed in a single thread so we can do the encoding lazily before passing the encoded version of labels to the exporter. + This is a cheap and quick way to avoid encoding the labels on every collection interval. (#572) +- Run coverage over all packages in `COVERAGE_MOD_DIR`. (#573) + +## [0.2.3] - 2020-03-04 + +### Added + +- `RecordError` method on `Span`s in the trace API to Simplify adding error events to spans. (#473) +- Configurable push frequency for exporters setup pipeline. (#504) + +### Changed + +- Rename the `exporter` directory to `exporters`. + The `go.opentelemetry.io/otel/exporter/trace/jaeger` package was mistakenly released with a `v1.0.0` tag instead of `v0.1.0`. + This resulted in all subsequent releases not becoming the default latest. + A consequence of this was that all `go get`s pulled in the incompatible `v0.1.0` release of that package when pulling in more recent packages from other otel packages. + Renaming the `exporter` directory to `exporters` fixes this issue by renaming the package and therefore clearing any existing dependency tags. + Consequentially, this action also renames *all* exporter packages. (#502) + +### Removed + +- The `CorrelationContextHeader` constant in the `correlation` package is no longer exported. (#503) + +## [0.2.2] - 2020-02-27 + +### Added + +- `HTTPSupplier` interface in the propagation API to specify methods to retrieve and store a single value for a key to be associated with a carrier. (#467) +- `HTTPExtractor` interface in the propagation API to extract information from an `HTTPSupplier` into a context. (#467) +- `HTTPInjector` interface in the propagation API to inject information into an `HTTPSupplier.` (#467) +- `Config` and configuring `Option` to the propagator API. (#467) +- `Propagators` interface in the propagation API to contain the set of injectors and extractors for all supported carrier formats. (#467) +- `HTTPPropagator` interface in the propagation API to inject and extract from an `HTTPSupplier.` (#467) +- `WithInjectors` and `WithExtractors` functions to the propagator API to configure injectors and extractors to use. (#467) +- `ExtractHTTP` and `InjectHTTP` functions to apply configured HTTP extractors and injectors to a passed context. (#467) +- Histogram aggregator. (#433) +- `DefaultPropagator` function and have it return `trace.TraceContext` as the default context propagator. (#456) +- `AlwaysParentSample` sampler to the trace API. (#455) +- `WithNewRoot` option function to the trace API to specify the created span should be considered a root span. (#451) + +### Changed + +- Renamed `WithMap` to `ContextWithMap` in the correlation package. (#481) +- Renamed `FromContext` to `MapFromContext` in the correlation package. (#481) +- Move correlation context propagation to correlation package. (#479) +- Do not default to putting remote span context into links. (#480) +- `Tracer.WithSpan` updated to accept `StartOptions`. (#472) +- Renamed `MetricKind` to `Kind` to not stutter in the type usage. (#432) +- Renamed the `export` package to `metric` to match directory structure. (#432) +- Rename the `api/distributedcontext` package to `api/correlation`. (#444) +- Rename the `api/propagators` package to `api/propagation`. (#444) +- Move the propagators from the `propagators` package into the `trace` API package. (#444) +- Update `Float64Gauge`, `Int64Gauge`, `Float64Counter`, `Int64Counter`, `Float64Measure`, and `Int64Measure` metric methods to use value receivers instead of pointers. (#462) +- Moved all dependencies of tools package to a tools directory. (#466) + +### Removed + +- Binary propagators. (#467) +- NOOP propagator. (#467) + +### Fixed + +- Upgraded `github.com/golangci/golangci-lint` from `v1.21.0` to `v1.23.6` in `tools/`. (#492) +- Fix a possible nil-dereference crash (#478) +- Correct comments for `InstallNewPipeline` in the stdout exporter. (#483) +- Correct comments for `InstallNewPipeline` in the dogstatsd exporter. (#484) +- Correct comments for `InstallNewPipeline` in the prometheus exporter. (#482) +- Initialize `onError` based on `Config` in prometheus exporter. (#486) +- Correct module name in prometheus exporter README. (#475) +- Removed tracer name prefix from span names. (#430) +- Fix `aggregator_test.go` import package comment. (#431) +- Improved detail in stdout exporter. (#436) +- Fix a dependency issue (generate target should depend on stringer, not lint target) in Makefile. (#442) +- Reorders the Makefile targets within `precommit` target so we generate files and build the code before doing linting, so we can get much nicer errors about syntax errors from the compiler. (#442) +- Reword function documentation in gRPC plugin. (#446) +- Send the `span.kind` tag to Jaeger from the jaeger exporter. (#441) +- Fix `metadataSupplier` in the jaeger exporter to overwrite the header if existing instead of appending to it. (#441) +- Upgraded to Go 1.13 in CI. (#465) +- Correct opentelemetry.io URL in trace SDK documentation. (#464) +- Refactored reference counting logic in SDK determination of stale records. (#468) +- Add call to `runtime.Gosched` in instrument `acquireHandle` logic to not block the collector. (#469) + +## [0.2.1.1] - 2020-01-13 + +### Fixed + +- Use stateful batcher on Prometheus exporter fixing regression introduced in #395. (#428) + +## [0.2.1] - 2020-01-08 + +### Added + +- Global meter forwarding implementation. + This enables deferred initialization for metric instruments registered before the first Meter SDK is installed. (#392) +- Global trace forwarding implementation. + This enables deferred initialization for tracers registered before the first Trace SDK is installed. (#406) +- Standardize export pipeline creation in all exporters. (#395) +- A testing, organization, and comments for 64-bit field alignment. (#418) +- Script to tag all modules in the project. (#414) + +### Changed + +- Renamed `propagation` package to `propagators`. (#362) +- Renamed `B3Propagator` propagator to `B3`. (#362) +- Renamed `TextFormatPropagator` propagator to `TextFormat`. (#362) +- Renamed `BinaryPropagator` propagator to `Binary`. (#362) +- Renamed `BinaryFormatPropagator` propagator to `BinaryFormat`. (#362) +- Renamed `NoopTextFormatPropagator` propagator to `NoopTextFormat`. (#362) +- Renamed `TraceContextPropagator` propagator to `TraceContext`. (#362) +- Renamed `SpanOption` to `StartOption` in the trace API. (#369) +- Renamed `StartOptions` to `StartConfig` in the trace API. (#369) +- Renamed `EndOptions` to `EndConfig` in the trace API. (#369) +- `Number` now has a pointer receiver for its methods. (#375) +- Renamed `CurrentSpan` to `SpanFromContext` in the trace API. (#379) +- Renamed `SetCurrentSpan` to `ContextWithSpan` in the trace API. (#379) +- Renamed `Message` in Event to `Name` in the trace API. (#389) +- Prometheus exporter no longer aggregates metrics, instead it only exports them. (#385) +- Renamed `HandleImpl` to `BoundInstrumentImpl` in the metric API. (#400) +- Renamed `Float64CounterHandle` to `Float64CounterBoundInstrument` in the metric API. (#400) +- Renamed `Int64CounterHandle` to `Int64CounterBoundInstrument` in the metric API. (#400) +- Renamed `Float64GaugeHandle` to `Float64GaugeBoundInstrument` in the metric API. (#400) +- Renamed `Int64GaugeHandle` to `Int64GaugeBoundInstrument` in the metric API. (#400) +- Renamed `Float64MeasureHandle` to `Float64MeasureBoundInstrument` in the metric API. (#400) +- Renamed `Int64MeasureHandle` to `Int64MeasureBoundInstrument` in the metric API. (#400) +- Renamed `Release` method for bound instruments in the metric API to `Unbind`. (#400) +- Renamed `AcquireHandle` method for bound instruments in the metric API to `Bind`. (#400) +- Renamed the `File` option in the stdout exporter to `Writer`. (#404) +- Renamed all `Options` to `Config` for all metric exports where this wasn't already the case. + +### Fixed + +- Aggregator import path corrected. (#421) +- Correct links in README. (#368) +- The README was updated to match latest code changes in its examples. (#374) +- Don't capitalize error statements. (#375) +- Fix ignored errors. (#375) +- Fix ambiguous variable naming. (#375) +- Removed unnecessary type casting. (#375) +- Use named parameters. (#375) +- Updated release schedule. (#378) +- Correct http-stackdriver example module name. (#394) +- Removed the `http.request` span in `httptrace` package. (#397) +- Add comments in the metrics SDK (#399) +- Initialize checkpoint when creating ddsketch aggregator to prevent panic when merging into a empty one. (#402) (#403) +- Add documentation of compatible exporters in the README. (#405) +- Typo fix. (#408) +- Simplify span check logic in SDK tracer implementation. (#419) + +## [0.2.0] - 2019-12-03 + +### Added + +- Unary gRPC tracing example. (#351) +- Prometheus exporter. (#334) +- Dogstatsd metrics exporter. (#326) + +### Changed + +- Rename `MaxSumCount` aggregation to `MinMaxSumCount` and add the `Min` interface for this aggregation. (#352) +- Rename `GetMeter` to `Meter`. (#357) +- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355) +- Rename `HTTPB3Propagator` to `B3Propagator`. (#355) +- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355) +- Move `/global` package to `/api/global`. (#356) +- Rename `GetTracer` to `Tracer`. (#347) + +### Removed + +- `SetAttribute` from the `Span` interface in the trace API. (#361) +- `AddLink` from the `Span` interface in the trace API. (#349) +- `Link` from the `Span` interface in the trace API. (#349) + +### Fixed + +- Exclude example directories from coverage report. (#365) +- Lint make target now implements automatic fixes with `golangci-lint` before a second run to report the remaining issues. (#360) +- Drop `GO111MODULE` environment variable in Makefile as Go 1.13 is the project specified minimum version and this is environment variable is not needed for that version of Go. (#359) +- Run the race checker for all test. (#354) +- Redundant commands in the Makefile are removed. (#354) +- Split the `generate` and `lint` targets of the Makefile. (#354) +- Renames `circle-ci` target to more generic `ci` in Makefile. (#354) +- Add example Prometheus binary to gitignore. (#358) +- Support negative numbers with the `MaxSumCount`. (#335) +- Resolve race conditions in `push_test.go` identified in #339. (#340) +- Use `/usr/bin/env bash` as a shebang in scripts rather than `/bin/bash`. (#336) +- Trace benchmark now tests both `AlwaysSample` and `NeverSample`. + Previously it was testing `AlwaysSample` twice. (#325) +- Trace benchmark now uses a `[]byte` for `TraceID` to fix failing test. (#325) +- Added a trace benchmark to test variadic functions in `setAttribute` vs `setAttributes` (#325) +- The `defaultkeys` batcher was only using the encoded label set as its map key while building a checkpoint. + This allowed distinct label sets through, but any metrics sharing a label set could be overwritten or merged incorrectly. + This was corrected. (#333) + +## [0.1.2] - 2019-11-18 + +### Fixed + +- Optimized the `simplelru` map for attributes to reduce the number of allocations. (#328) +- Removed unnecessary unslicing of parameters that are already a slice. (#324) + +## [0.1.1] - 2019-11-18 + +This release contains a Metrics SDK with stdout exporter and supports basic aggregations such as counter, gauges, array, maxsumcount, and ddsketch. + +### Added + +- Metrics stdout export pipeline. (#265) +- Array aggregation for raw measure metrics. (#282) +- The core.Value now have a `MarshalJSON` method. (#281) + +### Removed + +- `WithService`, `WithResources`, and `WithComponent` methods of tracers. (#314) +- Prefix slash in `Tracer.Start()` for the Jaeger example. (#292) + +### Changed + +- Allocation in LabelSet construction to reduce GC overhead. (#318) +- `trace.WithAttributes` to append values instead of replacing (#315) +- Use a formula for tolerance in sampling tests. (#298) +- Move export types into trace and metric-specific sub-directories. (#289) +- `SpanKind` back to being based on an `int` type. (#288) + +### Fixed + +- URL to OpenTelemetry website in README. (#323) +- Name of othttp default tracer. (#321) +- `ExportSpans` for the stackdriver exporter now handles `nil` context. (#294) +- CI modules cache to correctly restore/save from/to the cache. (#316) +- Fix metric SDK race condition between `LoadOrStore` and the assignment `rec.recorder = i.meter.exporter.AggregatorFor(rec)`. (#293) +- README now reflects the new code structure introduced with these changes. (#291) +- Make the basic example work. (#279) + +## [0.1.0] - 2019-11-04 + +This is the first release of open-telemetry go library. +It contains api and sdk for trace and meter. + +### Added + +- Initial OpenTelemetry trace and metric API prototypes. +- Initial OpenTelemetry trace, metric, and export SDK packages. +- A wireframe bridge to support compatibility with OpenTracing. +- Example code for a basic, http-stackdriver, http, jaeger, and named tracer setup. +- Exporters for Jaeger, Stackdriver, and stdout. +- Propagators for binary, B3, and trace-context protocols. +- Project information and guidelines in the form of a README and CONTRIBUTING. +- Tools to build the project and a Makefile to automate the process. +- Apache-2.0 license. +- CircleCI build CI manifest files. +- CODEOWNERS file to track owners of this project. + +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.19.0...HEAD +[1.19.0/0.42.0/0.0.7]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0 +[1.19.0-rc.1/0.42.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0-rc.1 +[1.18.0/0.41.0/0.0.6]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.18.0 +[1.17.0/0.40.0/0.0.5]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.17.0 +[1.16.0/0.39.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0 +[1.16.0-rc.1/0.39.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0-rc.1 +[1.15.1/0.38.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.1 +[1.15.0/0.38.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0 +[1.15.0-rc.2/0.38.0-rc.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.2 +[1.15.0-rc.1/0.38.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.1 +[1.14.0/0.37.0/0.0.4]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.14.0 +[1.13.0/0.36.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.13.0 +[1.12.0/0.35.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.12.0 +[1.11.2/0.34.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.2 +[1.11.1/0.33.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.1 +[1.11.0/0.32.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.0 +[0.32.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.2 +[0.32.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.1 +[0.32.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.0 +[1.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.10.0 +[1.9.0/0.0.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.9.0 +[1.8.0/0.31.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.8.0 +[1.7.0/0.30.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.7.0 +[0.29.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.29.0 +[1.6.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.3 +[1.6.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.2 +[1.6.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.1 +[1.6.0/0.28.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.0 +[1.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.5.0 +[1.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.1 +[1.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.0 +[1.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.3.0 +[1.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.2.0 +[1.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.1.0 +[1.0.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.1 +[Metrics 0.24.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.24.0 +[1.0.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0 +[1.0.0-RC3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC3 +[1.0.0-RC2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC2 +[Experimental Metrics v0.22.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.22.0 +[1.0.0-RC1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC1 +[0.20.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.20.0 +[0.19.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.19.0 +[0.18.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.18.0 +[0.17.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.17.0 +[0.16.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.16.0 +[0.15.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.15.0 +[0.14.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.14.0 +[0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.13.0 +[0.12.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.12.0 +[0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.11.0 +[0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.10.0 +[0.9.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.9.0 +[0.8.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.8.0 +[0.7.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.7.0 +[0.6.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.6.0 +[0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.5.0 +[0.4.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.3 +[0.4.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.2 +[0.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.1 +[0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.0 +[0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.3.0 +[0.2.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.3 +[0.2.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.2 +[0.2.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1.1 +[0.2.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1 +[0.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.0 +[0.1.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.2 +[0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1 +[0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0 + +[Go 1.20]: https://go.dev/doc/go1.20 +[Go 1.19]: https://go.dev/doc/go1.19 +[Go 1.18]: https://go.dev/doc/go1.18 +[Go 1.19]: https://go.dev/doc/go1.19 + +[metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric +[metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS new file mode 100644 index 00000000..62374000 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -0,0 +1,17 @@ +##################################################### +# +# List of approvers for this repository +# +##################################################### +# +# Learn about membership in OpenTelemetry community: +# https://github.com/open-telemetry/community/blob/main/community-membership.md +# +# +# Learn about CODEOWNERS file format: +# https://help.github.com/en/articles/about-code-owners +# + +* @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu + +CODEOWNERS @MrAlias @MadVikingGod @pellared \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md new file mode 100644 index 00000000..a00dbca7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -0,0 +1,620 @@ +# Contributing to opentelemetry-go + +The Go special interest group (SIG) meets regularly. See the +OpenTelemetry +[community](https://github.com/open-telemetry/community#golang-sdk) +repo for information on this and other language SIGs. + +See the [public meeting +notes](https://docs.google.com/document/d/1E5e7Ld0NuU1iVvf-42tOBpu2VBBLYnh73GJuITGJTTU/edit) +for a summary description of past meetings. To request edit access, +join the meeting or get in touch on +[Slack](https://cloud-native.slack.com/archives/C01NPAXACKT). + +## Development + +You can view and edit the source code by cloning this repository: + +```sh +git clone https://github.com/open-telemetry/opentelemetry-go.git +``` + +Run `make test` to run the tests instead of `go test`. + +There are some generated files checked into the repo. To make sure +that the generated files are up-to-date, run `make` (or `make +precommit` - the `precommit` target is the default). + +The `precommit` target also fixes the formatting of the code and +checks the status of the go module files. + +Additionally, there is a `codespell` target that checks for common +typos in the code. It is not run by default, but you can run it +manually with `make codespell`. It will set up a virtual environment +in `venv` and install `codespell` there. + +If after running `make precommit` the output of `git status` contains +`nothing to commit, working tree clean` then it means that everything +is up-to-date and properly formatted. + +## Pull Requests + +### How to Send Pull Requests + +Everyone is welcome to contribute code to `opentelemetry-go` via +GitHub pull requests (PRs). + +To create a new PR, fork the project in GitHub and clone the upstream +repo: + +```sh +go get -d go.opentelemetry.io/otel +``` + +(This may print some warning about "build constraints exclude all Go +files", just ignore it.) + +This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. You +can alternatively use `git` directly with: + +```sh +git clone https://github.com/open-telemetry/opentelemetry-go +``` + +(Note that `git clone` is *not* using the `go.opentelemetry.io/otel` name - +that name is a kind of a redirector to GitHub that `go get` can +understand, but `git` does not.) + +This would put the project in the `opentelemetry-go` directory in +current working directory. + +Enter the newly created directory and add your fork as a new remote: + +```sh +git remote add git@github.com:/opentelemetry-go +``` + +Check out a new branch, make modifications, run linters and tests, update +`CHANGELOG.md`, and push the branch to your fork: + +```sh +git checkout -b +# edit files +# update changelog +make precommit +git add -p +git commit +git push +``` + +Open a pull request against the main `opentelemetry-go` repo. Be sure to add the pull +request ID to the entry you added to `CHANGELOG.md`. + +### How to Receive Comments + +* If the PR is not ready for review, please put `[WIP]` in the title, + tag it as `work-in-progress`, or mark it as + [`draft`](https://github.blog/2019-02-14-introducing-draft-pull-requests/). +* Make sure CLA is signed and CI is clear. + +### How to Get PRs Merged + +A PR is considered **ready to merge** when: + +* It has received two qualified approvals[^1]. + + This is not enforced through automation, but needs to be validated by the + maintainer merging. + * The qualified approvals need to be from [Approver]s/[Maintainer]s + affiliated with different companies. Two qualified approvals from + [Approver]s or [Maintainer]s affiliated with the same company counts as a + single qualified approval. + * PRs introducing changes that have already been discussed and consensus + reached only need one qualified approval. The discussion and resolution + needs to be linked to the PR. + * Trivial changes[^2] only need one qualified approval. + +* All feedback has been addressed. + * All PR comments and suggestions are resolved. + * All GitHub Pull Request reviews with a status of "Request changes" have + been addressed. Another review by the objecting reviewer with a different + status can be submitted to clear the original review, or the review can be + dismissed by a [Maintainer] when the issues from the original review have + been addressed. + * Any comments or reviews that cannot be resolved between the PR author and + reviewers can be submitted to the community [Approver]s and [Maintainer]s + during the weekly SIG meeting. If consensus is reached among the + [Approver]s and [Maintainer]s during the SIG meeting the objections to the + PR may be dismissed or resolved or the PR closed by a [Maintainer]. + * Any substantive changes to the PR require existing Approval reviews be + cleared unless the approver explicitly states that their approval persists + across changes. This includes changes resulting from other feedback. + [Approver]s and [Maintainer]s can help in clearing reviews and they should + be consulted if there are any questions. + +* The PR branch is up to date with the base branch it is merging into. + * To ensure this does not block the PR, it should be configured to allow + maintainers to update it. + +* It has been open for review for at least one working day. This gives people + reasonable time to review. + * Trivial changes[^2] do not have to wait for one day and may be merged with + a single [Maintainer]'s approval. + +* All required GitHub workflows have succeeded. +* Urgent fix can take exception as long as it has been actively communicated + among [Maintainer]s. + +Any [Maintainer] can merge the PR once the above criteria have been met. + +[^1]: A qualified approval is a GitHub Pull Request review with "Approve" + status from an OpenTelemetry Go [Approver] or [Maintainer]. +[^2]: Trivial changes include: typo corrections, cosmetic non-substantive + changes, documentation corrections or updates, dependency updates, etc. + +## Design Choices + +As with other OpenTelemetry clients, opentelemetry-go follows the +[OpenTelemetry Specification](https://opentelemetry.io/docs/specs/otel). + +It's especially valuable to read through the [library +guidelines](https://opentelemetry.io/docs/specs/otel/library-guidelines). + +### Focus on Capabilities, Not Structure Compliance + +OpenTelemetry is an evolving specification, one where the desires and +use cases are clear, but the method to satisfy those uses cases are +not. + +As such, Contributions should provide functionality and behavior that +conforms to the specification, but the interface and structure is +flexible. + +It is preferable to have contributions follow the idioms of the +language rather than conform to specific API names or argument +patterns in the spec. + +For a deeper discussion, see +[this](https://github.com/open-telemetry/opentelemetry-specification/issues/165). + +## Documentation + +Each (non-internal, non-test) package must be documented using +[Go Doc Comments](https://go.dev/doc/comment), +preferably in a `doc.go` file. + +Prefer using [Examples](https://pkg.go.dev/testing#hdr-Examples) +instead of putting code snippets in Go doc comments. +In some cases, you can even create [Testable Examples](https://go.dev/blog/examples). + +You can install and run a "local Go Doc site" in the following way: + + ```sh + go install golang.org/x/pkgsite/cmd/pkgsite@latest + pkgsite + ``` + +[`go.opentelemetry.io/otel/metric`](https://pkg.go.dev/go.opentelemetry.io/otel/metric) +is an example of a very well-documented package. + +## Style Guide + +One of the primary goals of this project is that it is actually used by +developers. With this goal in mind the project strives to build +user-friendly and idiomatic Go code adhering to the Go community's best +practices. + +For a non-comprehensive but foundational overview of these best practices +the [Effective Go](https://golang.org/doc/effective_go.html) documentation +is an excellent starting place. + +As a convenience for developers building this project the `make precommit` +will format, lint, validate, and in some cases fix the changes you plan to +submit. This check will need to pass for your changes to be able to be +merged. + +In addition to idiomatic Go, the project has adopted certain standards for +implementations of common patterns. These standards should be followed as a +default, and if they are not followed documentation needs to be included as +to the reasons why. + +### Configuration + +When creating an instantiation function for a complex `type T struct`, it is +useful to allow variable number of options to be applied. However, the strong +type system of Go restricts the function design options. There are a few ways +to solve this problem, but we have landed on the following design. + +#### `config` + +Configuration should be held in a `struct` named `config`, or prefixed with +specific type name this Configuration applies to if there are multiple +`config` in the package. This type must contain configuration options. + +```go +// config contains configuration options for a thing. +type config struct { + // options ... +} +``` + +In general the `config` type will not need to be used externally to the +package and should be unexported. If, however, it is expected that the user +will likely want to build custom options for the configuration, the `config` +should be exported. Please, include in the documentation for the `config` +how the user can extend the configuration. + +It is important that internal `config` are not shared across package boundaries. +Meaning a `config` from one package should not be directly used by another. The +one exception is the API packages. The configs from the base API, eg. +`go.opentelemetry.io/otel/trace.TracerConfig` and +`go.opentelemetry.io/otel/metric.InstrumentConfig`, are intended to be consumed +by the SDK therefore it is expected that these are exported. + +When a config is exported we want to maintain forward and backward +compatibility, to achieve this no fields should be exported but should +instead be accessed by methods. + +Optionally, it is common to include a `newConfig` function (with the same +naming scheme). This function wraps any defaults setting and looping over +all options to create a configured `config`. + +```go +// newConfig returns an appropriately configured config. +func newConfig(options ...Option) config { + // Set default values for config. + config := config{/* […] */} + for _, option := range options { + config = option.apply(config) + } + // Perform any validation here. + return config +} +``` + +If validation of the `config` options is also performed this can return an +error as well that is expected to be handled by the instantiation function +or propagated to the user. + +Given the design goal of not having the user need to work with the `config`, +the `newConfig` function should also be unexported. + +#### `Option` + +To set the value of the options a `config` contains, a corresponding +`Option` interface type should be used. + +```go +type Option interface { + apply(config) config +} +``` + +Having `apply` unexported makes sure that it will not be used externally. +Moreover, the interface becomes sealed so the user cannot easily implement +the interface on its own. + +The `apply` method should return a modified version of the passed config. +This approach, instead of passing a pointer, is used to prevent the config from being allocated to the heap. + +The name of the interface should be prefixed in the same way the +corresponding `config` is (if at all). + +#### Options + +All user configurable options for a `config` must have a related unexported +implementation of the `Option` interface and an exported configuration +function that wraps this implementation. + +The wrapping function name should be prefixed with `With*` (or in the +special case of a boolean options `Without*`) and should have the following +function signature. + +```go +func With*(…) Option { … } +``` + +##### `bool` Options + +```go +type defaultFalseOption bool + +func (o defaultFalseOption) apply(c config) config { + c.Bool = bool(o) + return c +} + +// WithOption sets a T to have an option included. +func WithOption() Option { + return defaultFalseOption(true) +} +``` + +```go +type defaultTrueOption bool + +func (o defaultTrueOption) apply(c config) config { + c.Bool = bool(o) + return c +} + +// WithoutOption sets a T to have Bool option excluded. +func WithoutOption() Option { + return defaultTrueOption(false) +} +``` + +##### Declared Type Options + +```go +type myTypeOption struct { + MyType MyType +} + +func (o myTypeOption) apply(c config) config { + c.MyType = o.MyType + return c +} + +// WithMyType sets T to have include MyType. +func WithMyType(t MyType) Option { + return myTypeOption{t} +} +``` + +##### Functional Options + +```go +type optionFunc func(config) config + +func (fn optionFunc) apply(c config) config { + return fn(c) +} + +// WithMyType sets t as MyType. +func WithMyType(t MyType) Option { + return optionFunc(func(c config) config { + c.MyType = t + return c + }) +} +``` + +#### Instantiation + +Using this configuration pattern to configure instantiation with a `NewT` +function. + +```go +func NewT(options ...Option) T {…} +``` + +Any required parameters can be declared before the variadic `options`. + +#### Dealing with Overlap + +Sometimes there are multiple complex `struct` that share common +configuration and also have distinct configuration. To avoid repeated +portions of `config`s, a common `config` can be used with the union of +options being handled with the `Option` interface. + +For example. + +```go +// config holds options for all animals. +type config struct { + Weight float64 + Color string + MaxAltitude float64 +} + +// DogOption apply Dog specific options. +type DogOption interface { + applyDog(config) config +} + +// BirdOption apply Bird specific options. +type BirdOption interface { + applyBird(config) config +} + +// Option apply options for all animals. +type Option interface { + BirdOption + DogOption +} + +type weightOption float64 + +func (o weightOption) applyDog(c config) config { + c.Weight = float64(o) + return c +} + +func (o weightOption) applyBird(c config) config { + c.Weight = float64(o) + return c +} + +func WithWeight(w float64) Option { return weightOption(w) } + +type furColorOption string + +func (o furColorOption) applyDog(c config) config { + c.Color = string(o) + return c +} + +func WithFurColor(c string) DogOption { return furColorOption(c) } + +type maxAltitudeOption float64 + +func (o maxAltitudeOption) applyBird(c config) config { + c.MaxAltitude = float64(o) + return c +} + +func WithMaxAltitude(a float64) BirdOption { return maxAltitudeOption(a) } + +func NewDog(name string, o ...DogOption) Dog {…} +func NewBird(name string, o ...BirdOption) Bird {…} +``` + +### Interfaces + +To allow other developers to better comprehend the code, it is important +to ensure it is sufficiently documented. One simple measure that contributes +to this aim is self-documenting by naming method parameters. Therefore, +where appropriate, methods of every exported interface type should have +their parameters appropriately named. + +#### Interface Stability + +All exported stable interfaces that include the following warning in their +documentation are allowed to be extended with additional methods. + +> Warning: methods may be added to this interface in minor releases. + +These interfaces are defined by the OpenTelemetry specification and will be +updated as the specification evolves. + +Otherwise, stable interfaces MUST NOT be modified. + +#### How to Change Specification Interfaces + +When an API change must be made, we will update the SDK with the new method one +release before the API change. This will allow the SDK one version before the +API change to work seamlessly with the new API. + +If an incompatible version of the SDK is used with the new API the application +will fail to compile. + +#### How Not to Change Specification Interfaces + +We have explored using a v2 of the API to change interfaces and found that there +was no way to introduce a v2 and have it work seamlessly with the v1 of the API. +Problems happened with libraries that upgraded to v2 when an application did not, +and would not produce any telemetry. + +More detail of the approaches considered and their limitations can be found in +the [Use a V2 API to evolve interfaces](https://github.com/open-telemetry/opentelemetry-go/issues/3920) +issue. + +#### How to Change Other Interfaces + +If new functionality is needed for an interface that cannot be changed it MUST +be added by including an additional interface. That added interface can be a +simple interface for the specific functionality that you want to add or it can +be a super-set of the original interface. For example, if you wanted to a +`Close` method to the `Exporter` interface: + +```go +type Exporter interface { + Export() +} +``` + +A new interface, `Closer`, can be added: + +```go +type Closer interface { + Close() +} +``` + +Code that is passed the `Exporter` interface can now check to see if the passed +value also satisfies the new interface. E.g. + +```go +func caller(e Exporter) { + /* ... */ + if c, ok := e.(Closer); ok { + c.Close() + } + /* ... */ +} +``` + +Alternatively, a new type that is the super-set of an `Exporter` can be created. + +```go +type ClosingExporter struct { + Exporter + Close() +} +``` + +This new type can be used similar to the simple interface above in that a +passed `Exporter` type can be asserted to satisfy the `ClosingExporter` type +and the `Close` method called. + +This super-set approach can be useful if there is explicit behavior that needs +to be coupled with the original type and passed as a unified type to a new +function, but, because of this coupling, it also limits the applicability of +the added functionality. If there exist other interfaces where this +functionality should be added, each one will need their own super-set +interfaces and will duplicate the pattern. For this reason, the simple targeted +interface that defines the specific functionality should be preferred. + +### Testing + +The tests should never leak goroutines. + +Use the term `ConcurrentSafe` in the test name when it aims to verify the +absence of race conditions. + +### Internal packages + +The use of internal packages should be scoped to a single module. A sub-module +should never import from a parent internal package. This creates a coupling +between the two modules where a user can upgrade the parent without the child +and if the internal package API has changed it will fail to upgrade[^3]. + +There are two known exceptions to this rule: + +- `go.opentelemetry.io/otel/internal/global` + - This package manages global state for all of opentelemetry-go. It needs to + be a single package in order to ensure the uniqueness of the global state. +- `go.opentelemetry.io/otel/internal/baggage` + - This package provides values in a `context.Context` that need to be + recognized by `go.opentelemetry.io/otel/baggage` and + `go.opentelemetry.io/otel/bridge/opentracing` but remain private. + +If you have duplicate code in multiple modules, make that code into a Go +template stored in `go.opentelemetry.io/otel/internal/shared` and use [gotmpl] +to render the templates in the desired locations. See [#4404] for an example of +this. + +[^3]: https://github.com/open-telemetry/opentelemetry-go/issues/3548 + +## Approvers and Maintainers + +### Approvers + +- [Evan Torrie](https://github.com/evantorrie), Verizon Media +- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics +- [David Ashpole](https://github.com/dashpole), Google +- [Chester Cheung](https://github.com/hanyuancheung), Tencent +- [Damien Mathieu](https://github.com/dmathieu), Elastic +- [Anthony Mirabella](https://github.com/Aneurysm9), AWS + +### Maintainers + +- [Aaron Clawson](https://github.com/MadVikingGod), LightStep +- [Robert Pająk](https://github.com/pellared), Splunk +- [Tyler Yahn](https://github.com/MrAlias), Splunk + +### Emeritus + +- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep +- [Josh MacDonald](https://github.com/jmacd), LightStep + +### Become an Approver or a Maintainer + +See the [community membership document in OpenTelemetry community +repo](https://github.com/open-telemetry/community/blob/main/community-membership.md). + +[Approver]: #approvers +[Maintainer]: #maintainers +[gotmpl]: https://pkg.go.dev/go.opentelemetry.io/build-tools/gotmpl +[#4404]: https://github.com/open-telemetry/opentelemetry-go/pull/4404 diff --git a/vendor/go.opentelemetry.io/otel/LICENSE b/vendor/go.opentelemetry.io/otel/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile new file mode 100644 index 00000000..5c311706 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -0,0 +1,291 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +TOOLS_MOD_DIR := ./internal/tools + +ALL_DOCS := $(shell find . -name '*.md' -type f | sort) +ALL_GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort) +OTEL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(ALL_GO_MOD_DIRS)) +ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | grep -E -v '^./example|^$(TOOLS_MOD_DIR)' | sort) + +GO = go +TIMEOUT = 60 + +.DEFAULT_GOAL := precommit + +.PHONY: precommit ci +precommit: generate dependabot-generate license-check misspell go-mod-tidy golangci-lint-fix test-default +ci: generate dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage + +# Tools + +TOOLS = $(CURDIR)/.tools + +$(TOOLS): + @mkdir -p $@ +$(TOOLS)/%: | $(TOOLS) + cd $(TOOLS_MOD_DIR) && \ + $(GO) build -o $@ $(PACKAGE) + +MULTIMOD = $(TOOLS)/multimod +$(TOOLS)/multimod: PACKAGE=go.opentelemetry.io/build-tools/multimod + +SEMCONVGEN = $(TOOLS)/semconvgen +$(TOOLS)/semconvgen: PACKAGE=go.opentelemetry.io/build-tools/semconvgen + +CROSSLINK = $(TOOLS)/crosslink +$(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink + +SEMCONVKIT = $(TOOLS)/semconvkit +$(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit + +DBOTCONF = $(TOOLS)/dbotconf +$(TOOLS)/dbotconf: PACKAGE=go.opentelemetry.io/build-tools/dbotconf + +GOLANGCI_LINT = $(TOOLS)/golangci-lint +$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint + +MISSPELL = $(TOOLS)/misspell +$(TOOLS)/misspell: PACKAGE=github.com/client9/misspell/cmd/misspell + +GOCOVMERGE = $(TOOLS)/gocovmerge +$(TOOLS)/gocovmerge: PACKAGE=github.com/wadey/gocovmerge + +STRINGER = $(TOOLS)/stringer +$(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer + +PORTO = $(TOOLS)/porto +$(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto + +GOJQ = $(TOOLS)/gojq +$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq + +GOTMPL = $(TOOLS)/gotmpl +$(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl + +GORELEASE = $(TOOLS)/gorelease +$(GORELEASE): PACKAGE=golang.org/x/exp/cmd/gorelease + +.PHONY: tools +tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) + +# Virtualized python tools via docker + +# The directory where the virtual environment is created. +VENVDIR := venv + +# The directory where the python tools are installed. +PYTOOLS := $(VENVDIR)/bin + +# The pip executable in the virtual environment. +PIP := $(PYTOOLS)/pip + +# The directory in the docker image where the current directory is mounted. +WORKDIR := /workdir + +# The python image to use for the virtual environment. +PYTHONIMAGE := python:3.11.3-slim-bullseye + +# Run the python image with the current directory mounted. +DOCKERPY := docker run --rm -v "$(CURDIR):$(WORKDIR)" -w $(WORKDIR) $(PYTHONIMAGE) + +# Create a virtual environment for Python tools. +$(PYTOOLS): +# The `--upgrade` flag is needed to ensure that the virtual environment is +# created with the latest pip version. + @$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade pip" + +# Install python packages into the virtual environment. +$(PYTOOLS)/%: | $(PYTOOLS) + @$(DOCKERPY) $(PIP) install -r requirements.txt + +CODESPELL = $(PYTOOLS)/codespell +$(CODESPELL): PACKAGE=codespell + +# Generate + +.PHONY: generate +generate: go-generate vanity-import-fix + +.PHONY: go-generate +go-generate: $(OTEL_GO_MOD_DIRS:%=go-generate/%) +go-generate/%: DIR=$* +go-generate/%: | $(STRINGER) $(GOTMPL) + @echo "$(GO) generate $(DIR)/..." \ + && cd $(DIR) \ + && PATH="$(TOOLS):$${PATH}" $(GO) generate ./... + +.PHONY: vanity-import-fix +vanity-import-fix: | $(PORTO) + @$(PORTO) --include-internal -w . + +# Generate go.work file for local development. +.PHONY: go-work +go-work: | $(CROSSLINK) + $(CROSSLINK) work --root=$(shell pwd) + +# Build + +.PHONY: build + +build: $(OTEL_GO_MOD_DIRS:%=build/%) $(OTEL_GO_MOD_DIRS:%=build-tests/%) +build/%: DIR=$* +build/%: + @echo "$(GO) build $(DIR)/..." \ + && cd $(DIR) \ + && $(GO) build ./... + +build-tests/%: DIR=$* +build-tests/%: + @echo "$(GO) build tests $(DIR)/..." \ + && cd $(DIR) \ + && $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -vet=off -run xxxxxMatchNothingxxxxx >/dev/null + +# Tests + +TEST_TARGETS := test-default test-bench test-short test-verbose test-race +.PHONY: $(TEST_TARGETS) test +test-default test-race: ARGS=-race +test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=. +test-short: ARGS=-short +test-verbose: ARGS=-v -race +$(TEST_TARGETS): test +test: $(OTEL_GO_MOD_DIRS:%=test/%) +test/%: DIR=$* +test/%: + @echo "$(GO) test -timeout $(TIMEOUT)s $(ARGS) $(DIR)/..." \ + && cd $(DIR) \ + && $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -timeout $(TIMEOUT)s $(ARGS) + +COVERAGE_MODE = atomic +COVERAGE_PROFILE = coverage.out +.PHONY: test-coverage +test-coverage: | $(GOCOVMERGE) + @set -e; \ + printf "" > coverage.txt; \ + for dir in $(ALL_COVERAGE_MOD_DIRS); do \ + echo "$(GO) test -coverpkg=go.opentelemetry.io/otel/... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" $${dir}/..."; \ + (cd "$${dir}" && \ + $(GO) list ./... \ + | grep -v third_party \ + | grep -v 'semconv/v.*' \ + | xargs $(GO) test -coverpkg=./... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" && \ + $(GO) tool cover -html=coverage.out -o coverage.html); \ + done; \ + $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt + +.PHONY: golangci-lint golangci-lint-fix +golangci-lint-fix: ARGS=--fix +golangci-lint-fix: golangci-lint +golangci-lint: $(OTEL_GO_MOD_DIRS:%=golangci-lint/%) +golangci-lint/%: DIR=$* +golangci-lint/%: | $(GOLANGCI_LINT) + @echo 'golangci-lint $(if $(ARGS),$(ARGS) ,)$(DIR)' \ + && cd $(DIR) \ + && $(GOLANGCI_LINT) run --allow-serial-runners $(ARGS) + +.PHONY: crosslink +crosslink: | $(CROSSLINK) + @echo "Updating intra-repository dependencies in all go modules" \ + && $(CROSSLINK) --root=$(shell pwd) --prune + +.PHONY: go-mod-tidy +go-mod-tidy: $(ALL_GO_MOD_DIRS:%=go-mod-tidy/%) +go-mod-tidy/%: DIR=$* +go-mod-tidy/%: | crosslink + @echo "$(GO) mod tidy in $(DIR)" \ + && cd $(DIR) \ + && $(GO) mod tidy -compat=1.20 + +.PHONY: lint-modules +lint-modules: go-mod-tidy + +.PHONY: lint +lint: misspell lint-modules golangci-lint + +.PHONY: vanity-import-check +vanity-import-check: | $(PORTO) + @$(PORTO) --include-internal -l . || ( echo "(run: make vanity-import-fix)"; exit 1 ) + +.PHONY: misspell +misspell: | $(MISSPELL) + @$(MISSPELL) -w $(ALL_DOCS) + +.PHONY: codespell +codespell: | $(CODESPELL) + @$(DOCKERPY) $(CODESPELL) + +.PHONY: license-check +license-check: + @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \ + awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=4 { found=1; next } END { if (!found) print FILENAME }' $$f; \ + done); \ + if [ -n "$${licRes}" ]; then \ + echo "license header checking failed:"; echo "$${licRes}"; \ + exit 1; \ + fi + +DEPENDABOT_CONFIG = .github/dependabot.yml +.PHONY: dependabot-check +dependabot-check: | $(DBOTCONF) + @$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || ( echo "(run: make dependabot-generate)"; exit 1 ) + +.PHONY: dependabot-generate +dependabot-generate: | $(DBOTCONF) + @$(DBOTCONF) generate > $(DEPENDABOT_CONFIG) + +.PHONY: check-clean-work-tree +check-clean-work-tree: + @if ! git diff --quiet; then \ + echo; \ + echo 'Working tree is not clean, did you forget to run "make precommit"?'; \ + echo; \ + git status; \ + exit 1; \ + fi + +SEMCONVPKG ?= "semconv/" +.PHONY: semconv-generate +semconv-generate: | $(SEMCONVGEN) $(SEMCONVKIT) + [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 ) + [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 ) + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=span -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=event -p conventionType=event -f event.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" + +.PHONY: gorelease +gorelease: $(OTEL_GO_MOD_DIRS:%=gorelease/%) +gorelease/%: DIR=$* +gorelease/%:| $(GORELEASE) + @echo "gorelease in $(DIR):" \ + && cd $(DIR) \ + && $(GORELEASE) \ + || echo "" + +.PHONY: prerelease +prerelease: | $(MULTIMOD) + @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) + $(MULTIMOD) verify && $(MULTIMOD) prerelease -m ${MODSET} + +COMMIT ?= "HEAD" +.PHONY: add-tags +add-tags: | $(MULTIMOD) + @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) + $(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md new file mode 100644 index 00000000..634326ef --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -0,0 +1,111 @@ +# OpenTelemetry-Go + +[![CI](https://github.com/open-telemetry/opentelemetry-go/workflows/ci/badge.svg)](https://github.com/open-telemetry/opentelemetry-go/actions?query=workflow%3Aci+branch%3Amain) +[![codecov.io](https://codecov.io/gh/open-telemetry/opentelemetry-go/coverage.svg?branch=main)](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main) +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel)](https://pkg.go.dev/go.opentelemetry.io/otel) +[![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel) +[![Slack](https://img.shields.io/badge/slack-@cncf/otel--go-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01NPAXACKT) + +OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/). +It provides a set of APIs to directly measure performance and behavior of your software and send this data to observability platforms. + +## Project Status + +| Signal | Status | Project | +|---------|------------|-----------------------| +| Traces | Stable | N/A | +| Metrics | Mixed [1] | [Go: Metric SDK (GA)] | +| Logs | Frozen [2] | N/A | + +[Go: Metric SDK (GA)]: https://github.com/orgs/open-telemetry/projects/34 + +- [1]: [Metrics API](https://pkg.go.dev/go.opentelemetry.io/otel/metric) is Stable. [Metrics SDK](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric) is Beta. +- [2]: The Logs signal development is halted for this project while we stabilize the Metrics SDK. + No Logs Pull Requests are currently being accepted. + +Progress and status specific to this repository is tracked in our +[project boards](https://github.com/open-telemetry/opentelemetry-go/projects) +and +[milestones](https://github.com/open-telemetry/opentelemetry-go/milestones). + +Project versioning information and stability guarantees can be found in the +[versioning documentation](VERSIONING.md). + +### Compatibility + +OpenTelemetry-Go ensures compatibility with the current supported versions of +the [Go language](https://golang.org/doc/devel/release#policy): + +> Each major Go release is supported until there are two newer major releases. +> For example, Go 1.5 was supported until the Go 1.7 release, and Go 1.6 was supported until the Go 1.8 release. + +For versions of Go that are no longer supported upstream, opentelemetry-go will +stop ensuring compatibility with these versions in the following manner: + +- A minor release of opentelemetry-go will be made to add support for the new + supported release of Go. +- The following minor release of opentelemetry-go will remove compatibility + testing for the oldest (now archived upstream) version of Go. This, and + future, releases of opentelemetry-go may include features only supported by + the currently supported versions of Go. + +Currently, this project supports the following environments. + +| OS | Go Version | Architecture | +|---------|------------|--------------| +| Ubuntu | 1.21 | amd64 | +| Ubuntu | 1.20 | amd64 | +| Ubuntu | 1.21 | 386 | +| Ubuntu | 1.20 | 386 | +| MacOS | 1.21 | amd64 | +| MacOS | 1.20 | amd64 | +| Windows | 1.21 | amd64 | +| Windows | 1.20 | amd64 | +| Windows | 1.21 | 386 | +| Windows | 1.20 | 386 | + +While this project should work for other systems, no compatibility guarantees +are made for those systems currently. + +## Getting Started + +You can find a getting started guide on [opentelemetry.io](https://opentelemetry.io/docs/go/getting-started/). + +OpenTelemetry's goal is to provide a single set of APIs to capture distributed +traces and metrics from your application and send them to an observability +platform. This project allows you to do just that for applications written in +Go. There are two steps to this process: instrument your application, and +configure an exporter. + +### Instrumentation + +To start capturing distributed traces and metric events from your application +it first needs to be instrumented. The easiest way to do this is by using an +instrumentation library for your code. Be sure to check out [the officially +supported instrumentation +libraries](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/instrumentation). + +If you need to extend the telemetry an instrumentation library provides or want +to build your own instrumentation for your application directly you will need +to use the +[Go otel](https://pkg.go.dev/go.opentelemetry.io/otel) +package. The included [examples](./example/) are a good way to see some +practical uses of this process. + +### Export + +Now that your application is instrumented to collect telemetry, it needs an +export pipeline to send that telemetry to an observability platform. + +All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters). + +| Exporter | Metrics | Traces | +|---------------------------------------|:-------:|:------:| +| [OTLP](./exporters/otlp/) | ✓ | ✓ | +| [Prometheus](./exporters/prometheus/) | ✓ | | +| [stdout](./exporters/stdout/) | ✓ | ✓ | +| [Zipkin](./exporters/zipkin/) | | ✓ | + +## Contributing + +See the [contributing documentation](CONTRIBUTING.md). diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md new file mode 100644 index 00000000..82ce3ee4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -0,0 +1,139 @@ +# Release Process + +## Semantic Convention Generation + +New versions of the [OpenTelemetry Semantic Conventions] mean new versions of the `semconv` package need to be generated. +The `semconv-generate` make target is used for this. + +1. Checkout a local copy of the [OpenTelemetry Semantic Conventions] to the desired release tag. +2. Pull the latest `otel/semconvgen` image: `docker pull otel/semconvgen:latest` +3. Run the `make semconv-generate ...` target from this repository. + +For example, + +```sh +export TAG="v1.21.0" # Change to the release version you are generating. +export OTEL_SEMCONV_REPO="/absolute/path/to/opentelemetry/semantic-conventions" +docker pull otel/semconvgen:latest +make semconv-generate # Uses the exported TAG and OTEL_SEMCONV_REPO. +``` + +This should create a new sub-package of [`semconv`](./semconv). +Ensure things look correct before submitting a pull request to include the addition. + +## Breaking changes validation + +You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes done in the public API. + +You can check/report problems with `gorelease` [here](https://golang.org/issues/26420). + +## Pre-Release + +First, decide which module sets will be released and update their versions +in `versions.yaml`. Commit this change to a new branch. + +Update go.mod for submodules to depend on the new release which will happen in the next step. + +1. Run the `prerelease` make target. It creates a branch + `prerelease__` that will contain all release changes. + + ``` + make prerelease MODSET= + ``` + +2. Verify the changes. + + ``` + git diff ...prerelease__ + ``` + + This should have changed the version for all modules to be ``. + If these changes look correct, merge them into your pre-release branch: + + ```go + git merge prerelease__ + ``` + +3. Update the [Changelog](./CHANGELOG.md). + - Make sure all relevant changes for this release are included and are in language that non-contributors to the project can understand. + To verify this, you can look directly at the commits since the ``. + + ``` + git --no-pager log --pretty=oneline "..HEAD" + ``` + + - Move all the `Unreleased` changes into a new section following the title scheme (`[] - `). + - Update all the appropriate links at the bottom. + +4. Push the changes to upstream and create a Pull Request on GitHub. + Be sure to include the curated changes from the [Changelog](./CHANGELOG.md) in the description. + +## Tag + +Once the Pull Request with all the version changes has been approved and merged it is time to tag the merged commit. + +***IMPORTANT***: It is critical you use the same tag that you used in the Pre-Release step! +Failure to do so will leave things in a broken state. As long as you do not +change `versions.yaml` between pre-release and this step, things should be fine. + +***IMPORTANT***: [There is currently no way to remove an incorrectly tagged version of a Go module](https://github.com/golang/go/issues/34189). +It is critical you make sure the version you push upstream is correct. +[Failure to do so will lead to minor emergencies and tough to work around](https://github.com/open-telemetry/opentelemetry-go/issues/331). + +1. For each module set that will be released, run the `add-tags` make target + using the `` of the commit on the main branch for the merged Pull Request. + + ``` + make add-tags MODSET= COMMIT= + ``` + + It should only be necessary to provide an explicit `COMMIT` value if the + current `HEAD` of your working directory is not the correct commit. + +2. Push tags to the upstream remote (not your fork: `github.com/open-telemetry/opentelemetry-go.git`). + Make sure you push all sub-modules as well. + + ``` + git push upstream + git push upstream + ... + ``` + +## Release + +Finally create a Release for the new `` on GitHub. +The release body should include all the release notes from the Changelog for this release. + +## Verify Examples + +After releasing verify that examples build outside of the repository. + +``` +./verify_examples.sh +``` + +The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them. +This ensures they build with the published release, not the local copy. + +## Post-Release + +### Contrib Repository + +Once verified be sure to [make a release for the `contrib` repository](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md) that uses this release. + +### Website Documentation + +Update the [Go instrumentation documentation] in the OpenTelemetry website under [content/en/docs/instrumentation/go]. +Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate. + +[OpenTelemetry Semantic Conventions]: https://github.com/open-telemetry/semantic-conventions +[Go instrumentation documentation]: https://opentelemetry.io/docs/instrumentation/go/ +[content/en/docs/instrumentation/go]: https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/instrumentation/go + +### Demo Repository + +Bump the dependencies in the following Go services: + +- [`accountingservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accountingservice) +- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkoutservice) +- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/productcatalogservice) diff --git a/vendor/go.opentelemetry.io/otel/VERSIONING.md b/vendor/go.opentelemetry.io/otel/VERSIONING.md new file mode 100644 index 00000000..412f1e36 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/VERSIONING.md @@ -0,0 +1,224 @@ +# Versioning + +This document describes the versioning policy for this repository. This policy +is designed so the following goals can be achieved. + +**Users are provided a codebase of value that is stable and secure.** + +## Policy + +* Versioning of this project will be idiomatic of a Go project using [Go + modules](https://github.com/golang/go/wiki/Modules). + * [Semantic import + versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) + will be used. + * Versions will comply with [semver + 2.0](https://semver.org/spec/v2.0.0.html) with the following exceptions. + * New methods may be added to exported API interfaces. All exported + interfaces that fall within this exception will include the following + paragraph in their public documentation. + + > Warning: methods may be added to this interface in minor releases. + + * If a module is version `v2` or higher, the major version of the module + must be included as a `/vN` at the end of the module paths used in + `go.mod` files (e.g., `module go.opentelemetry.io/otel/v2`, `require + go.opentelemetry.io/otel/v2 v2.0.1`) and in the package import path + (e.g., `import "go.opentelemetry.io/otel/v2/trace"`). This includes the + paths used in `go get` commands (e.g., `go get + go.opentelemetry.io/otel/v2@v2.0.1`. Note there is both a `/v2` and a + `@v2.0.1` in that example. One way to think about it is that the module + name now includes the `/v2`, so include `/v2` whenever you are using the + module name). + * If a module is version `v0` or `v1`, do not include the major version in + either the module path or the import path. + * Modules will be used to encapsulate signals and components. + * Experimental modules still under active development will be versioned at + `v0` to imply the stability guarantee defined by + [semver](https://semver.org/spec/v2.0.0.html#spec-item-4). + + > Major version zero (0.y.z) is for initial development. Anything MAY + > change at any time. The public API SHOULD NOT be considered stable. + + * Mature modules for which we guarantee a stable public API will be versioned + with a major version greater than `v0`. + * The decision to make a module stable will be made on a case-by-case + basis by the maintainers of this project. + * Experimental modules will start their versioning at `v0.0.0` and will + increment their minor version when backwards incompatible changes are + released and increment their patch version when backwards compatible + changes are released. + * All stable modules that use the same major version number will use the + same entire version number. + * Stable modules may be released with an incremented minor or patch + version even though that module has not been changed, but rather so + that it will remain at the same version as other stable modules that + did undergo change. + * When an experimental module becomes stable a new stable module version + will be released and will include this now stable module. The new + stable module version will be an increment of the minor version number + and will be applied to all existing stable modules as well as the newly + stable module being released. +* Versioning of the associated [contrib + repository](https://github.com/open-telemetry/opentelemetry-go-contrib) of + this project will be idiomatic of a Go project using [Go + modules](https://github.com/golang/go/wiki/Modules). + * [Semantic import + versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) + will be used. + * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html). + * If a module is version `v2` or higher, the + major version of the module must be included as a `/vN` at the end of the + module paths used in `go.mod` files (e.g., `module + go.opentelemetry.io/contrib/instrumentation/host/v2`, `require + go.opentelemetry.io/contrib/instrumentation/host/v2 v2.0.1`) and in the + package import path (e.g., `import + "go.opentelemetry.io/contrib/instrumentation/host/v2"`). This includes + the paths used in `go get` commands (e.g., `go get + go.opentelemetry.io/contrib/instrumentation/host/v2@v2.0.1`. Note there + is both a `/v2` and a `@v2.0.1` in that example. One way to think about + it is that the module name now includes the `/v2`, so include `/v2` + whenever you are using the module name). + * If a module is version `v0` or `v1`, do not include the major version + in either the module path or the import path. + * In addition to public APIs, telemetry produced by stable instrumentation + will remain stable and backwards compatible. This is to avoid breaking + alerts and dashboard. + * Modules will be used to encapsulate instrumentation, detectors, exporters, + propagators, and any other independent sets of related components. + * Experimental modules still under active development will be versioned at + `v0` to imply the stability guarantee defined by + [semver](https://semver.org/spec/v2.0.0.html#spec-item-4). + + > Major version zero (0.y.z) is for initial development. Anything MAY + > change at any time. The public API SHOULD NOT be considered stable. + + * Mature modules for which we guarantee a stable public API and telemetry will + be versioned with a major version greater than `v0`. + * Experimental modules will start their versioning at `v0.0.0` and will + increment their minor version when backwards incompatible changes are + released and increment their patch version when backwards compatible + changes are released. + * Stable contrib modules cannot depend on experimental modules from this + project. + * All stable contrib modules of the same major version with this project + will use the same entire version as this project. + * Stable modules may be released with an incremented minor or patch + version even though that module's code has not been changed. Instead + the only change that will have been included is to have updated that + modules dependency on this project's stable APIs. + * When an experimental module in contrib becomes stable a new stable + module version will be released and will include this now stable + module. The new stable module version will be an increment of the minor + version number and will be applied to all existing stable contrib + modules, this project's modules, and the newly stable module being + released. + * Contrib modules will be kept up to date with this project's releases. + * Due to the dependency contrib modules will implicitly have on this + project's modules the release of stable contrib modules to match the + released version number will be staggered after this project's release. + There is no explicit time guarantee for how long after this projects + release the contrib release will be. Effort should be made to keep them + as close in time as possible. + * No additional stable release in this project can be made until the + contrib repository has a matching stable release. + * No release can be made in the contrib repository after this project's + stable release except for a stable release of the contrib repository. +* GitHub releases will be made for all releases. +* Go modules will be made available at Go package mirrors. + +## Example Versioning Lifecycle + +To better understand the implementation of the above policy the following +example is provided. This project is simplified to include only the following +modules and their versions: + +* `otel`: `v0.14.0` +* `otel/trace`: `v0.14.0` +* `otel/metric`: `v0.14.0` +* `otel/baggage`: `v0.14.0` +* `otel/sdk/trace`: `v0.14.0` +* `otel/sdk/metric`: `v0.14.0` + +These modules have been developed to a point where the `otel/trace`, +`otel/baggage`, and `otel/sdk/trace` modules have reached a point that they +should be considered for a stable release. The `otel/metric` and +`otel/sdk/metric` are still under active development and the `otel` module +depends on both `otel/trace` and `otel/metric`. + +The `otel` package is refactored to remove its dependencies on `otel/metric` so +it can be released as stable as well. With that done the following release +candidates are made: + +* `otel`: `v1.0.0-RC1` +* `otel/trace`: `v1.0.0-RC1` +* `otel/baggage`: `v1.0.0-RC1` +* `otel/sdk/trace`: `v1.0.0-RC1` + +The `otel/metric` and `otel/sdk/metric` modules remain at `v0.14.0`. + +A few minor issues are discovered in the `otel/trace` package. These issues are +resolved with some minor, but backwards incompatible, changes and are released +as a second release candidate: + +* `otel`: `v1.0.0-RC2` +* `otel/trace`: `v1.0.0-RC2` +* `otel/baggage`: `v1.0.0-RC2` +* `otel/sdk/trace`: `v1.0.0-RC2` + +Notice that all module version numbers are incremented to adhere to our +versioning policy. + +After these release candidates have been evaluated to satisfaction, they are +released as version `v1.0.0`. + +* `otel`: `v1.0.0` +* `otel/trace`: `v1.0.0` +* `otel/baggage`: `v1.0.0` +* `otel/sdk/trace`: `v1.0.0` + +Since both the `go` utility and the Go module system support [the semantic +versioning definition of +precedence](https://semver.org/spec/v2.0.0.html#spec-item-11), this release +will correctly be interpreted as the successor to the previous release +candidates. + +Active development of this project continues. The `otel/metric` module now has +backwards incompatible changes to its API that need to be released and the +`otel/baggage` module has a minor bug fix that needs to be released. The +following release is made: + +* `otel`: `v1.0.1` +* `otel/trace`: `v1.0.1` +* `otel/metric`: `v0.15.0` +* `otel/baggage`: `v1.0.1` +* `otel/sdk/trace`: `v1.0.1` +* `otel/sdk/metric`: `v0.15.0` + +Notice that, again, all stable module versions are incremented in unison and +the `otel/sdk/metric` package, which depends on the `otel/metric` package, also +bumped its version. This bump of the `otel/sdk/metric` package makes sense +given their coupling, though it is not explicitly required by our versioning +policy. + +As we progress, the `otel/metric` and `otel/sdk/metric` packages have reached a +point where they should be evaluated for stability. The `otel` module is +reintegrated with the `otel/metric` package and the following release is made: + +* `otel`: `v1.1.0-RC1` +* `otel/trace`: `v1.1.0-RC1` +* `otel/metric`: `v1.1.0-RC1` +* `otel/baggage`: `v1.1.0-RC1` +* `otel/sdk/trace`: `v1.1.0-RC1` +* `otel/sdk/metric`: `v1.1.0-RC1` + +All the modules are evaluated and determined to a viable stable release. They +are then released as version `v1.1.0` (the minor version is incremented to +indicate the addition of new signal). + +* `otel`: `v1.1.0` +* `otel/trace`: `v1.1.0` +* `otel/metric`: `v1.1.0` +* `otel/baggage`: `v1.1.0` +* `otel/sdk/trace`: `v1.1.0` +* `otel/sdk/metric`: `v1.1.0` diff --git a/vendor/github.com/go-openapi/swag/post_go18.go b/vendor/go.opentelemetry.io/otel/attribute/doc.go similarity index 68% rename from vendor/github.com/go-openapi/swag/post_go18.go rename to vendor/go.opentelemetry.io/otel/attribute/doc.go index f5228b82..dafe7424 100644 --- a/vendor/github.com/go-openapi/swag/post_go18.go +++ b/vendor/go.opentelemetry.io/otel/attribute/doc.go @@ -1,10 +1,10 @@ -// Copyright 2015 go-swagger maintainers +// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -12,13 +12,5 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build go1.8 -// +build go1.8 - -package swag - -import "net/url" - -func pathUnescape(path string) (string, error) { - return url.PathUnescape(path) -} +// Package attribute provides key and value attributes. +package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go new file mode 100644 index 00000000..fe2bc576 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go @@ -0,0 +1,146 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "bytes" + "sync" + "sync/atomic" +) + +type ( + // Encoder is a mechanism for serializing an attribute set into a specific + // string representation that supports caching, to avoid repeated + // serialization. An example could be an exporter encoding the attribute + // set into a wire representation. + Encoder interface { + // Encode returns the serialized encoding of the attribute set using + // its Iterator. This result may be cached by a attribute.Set. + Encode(iterator Iterator) string + + // ID returns a value that is unique for each class of attribute + // encoder. Attribute encoders allocate these using `NewEncoderID`. + ID() EncoderID + } + + // EncoderID is used to identify distinct Encoder + // implementations, for caching encoded results. + EncoderID struct { + value uint64 + } + + // defaultAttrEncoder uses a sync.Pool of buffers to reduce the number of + // allocations used in encoding attributes. This implementation encodes a + // comma-separated list of key=value, with '/'-escaping of '=', ',', and + // '\'. + defaultAttrEncoder struct { + // pool is a pool of attribute set builders. The buffers in this pool + // grow to a size that most attribute encodings will not allocate new + // memory. + pool sync.Pool // *bytes.Buffer + } +) + +// escapeChar is used to ensure uniqueness of the attribute encoding where +// keys or values contain either '=' or ','. Since there is no parser needed +// for this encoding and its only requirement is to be unique, this choice is +// arbitrary. Users will see these in some exporters (e.g., stdout), so the +// backslash ('\') is used as a conventional choice. +const escapeChar = '\\' + +var ( + _ Encoder = &defaultAttrEncoder{} + + // encoderIDCounter is for generating IDs for other attribute encoders. + encoderIDCounter uint64 + + defaultEncoderOnce sync.Once + defaultEncoderID = NewEncoderID() + defaultEncoderInstance *defaultAttrEncoder +) + +// NewEncoderID returns a unique attribute encoder ID. It should be called +// once per each type of attribute encoder. Preferably in init() or in var +// definition. +func NewEncoderID() EncoderID { + return EncoderID{value: atomic.AddUint64(&encoderIDCounter, 1)} +} + +// DefaultEncoder returns an attribute encoder that encodes attributes in such +// a way that each escaped attribute's key is followed by an equal sign and +// then by an escaped attribute's value. All key-value pairs are separated by +// a comma. +// +// Escaping is done by prepending a backslash before either a backslash, equal +// sign or a comma. +func DefaultEncoder() Encoder { + defaultEncoderOnce.Do(func() { + defaultEncoderInstance = &defaultAttrEncoder{ + pool: sync.Pool{ + New: func() interface{} { + return &bytes.Buffer{} + }, + }, + } + }) + return defaultEncoderInstance +} + +// Encode is a part of an implementation of the AttributeEncoder interface. +func (d *defaultAttrEncoder) Encode(iter Iterator) string { + buf := d.pool.Get().(*bytes.Buffer) + defer d.pool.Put(buf) + buf.Reset() + + for iter.Next() { + i, keyValue := iter.IndexedAttribute() + if i > 0 { + _, _ = buf.WriteRune(',') + } + copyAndEscape(buf, string(keyValue.Key)) + + _, _ = buf.WriteRune('=') + + if keyValue.Value.Type() == STRING { + copyAndEscape(buf, keyValue.Value.AsString()) + } else { + _, _ = buf.WriteString(keyValue.Value.Emit()) + } + } + return buf.String() +} + +// ID is a part of an implementation of the AttributeEncoder interface. +func (*defaultAttrEncoder) ID() EncoderID { + return defaultEncoderID +} + +// copyAndEscape escapes `=`, `,` and its own escape character (`\`), +// making the default encoding unique. +func copyAndEscape(buf *bytes.Buffer, val string) { + for _, ch := range val { + switch ch { + case '=', ',', escapeChar: + _, _ = buf.WriteRune(escapeChar) + } + _, _ = buf.WriteRune(ch) + } +} + +// Valid returns true if this encoder ID was allocated by +// `NewEncoderID`. Invalid encoder IDs will not be cached. +func (id EncoderID) Valid() bool { + return id.value != 0 +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/filter.go b/vendor/go.opentelemetry.io/otel/attribute/filter.go new file mode 100644 index 00000000..638c213d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/filter.go @@ -0,0 +1,60 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +// Filter supports removing certain attributes from attribute sets. When +// the filter returns true, the attribute will be kept in the filtered +// attribute set. When the filter returns false, the attribute is excluded +// from the filtered attribute set, and the attribute instead appears in +// the removed list of excluded attributes. +type Filter func(KeyValue) bool + +// NewAllowKeysFilter returns a Filter that only allows attributes with one of +// the provided keys. +// +// If keys is empty a deny-all filter is returned. +func NewAllowKeysFilter(keys ...Key) Filter { + if len(keys) <= 0 { + return func(kv KeyValue) bool { return false } + } + + allowed := make(map[Key]struct{}) + for _, k := range keys { + allowed[k] = struct{}{} + } + return func(kv KeyValue) bool { + _, ok := allowed[kv.Key] + return ok + } +} + +// NewDenyKeysFilter returns a Filter that only allows attributes +// that do not have one of the provided keys. +// +// If keys is empty an allow-all filter is returned. +func NewDenyKeysFilter(keys ...Key) Filter { + if len(keys) <= 0 { + return func(kv KeyValue) bool { return true } + } + + forbid := make(map[Key]struct{}) + for _, k := range keys { + forbid[k] = struct{}{} + } + return func(kv KeyValue) bool { + _, ok := forbid[kv.Key] + return !ok + } +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/vendor/go.opentelemetry.io/otel/attribute/iterator.go new file mode 100644 index 00000000..841b271f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/iterator.go @@ -0,0 +1,161 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +// Iterator allows iterating over the set of attributes in order, sorted by +// key. +type Iterator struct { + storage *Set + idx int +} + +// MergeIterator supports iterating over two sets of attributes while +// eliminating duplicate values from the combined set. The first iterator +// value takes precedence. +type MergeIterator struct { + one oneIterator + two oneIterator + current KeyValue +} + +type oneIterator struct { + iter Iterator + done bool + attr KeyValue +} + +// Next moves the iterator to the next position. Returns false if there are no +// more attributes. +func (i *Iterator) Next() bool { + i.idx++ + return i.idx < i.Len() +} + +// Label returns current KeyValue. Must be called only after Next returns +// true. +// +// Deprecated: Use Attribute instead. +func (i *Iterator) Label() KeyValue { + return i.Attribute() +} + +// Attribute returns the current KeyValue of the Iterator. It must be called +// only after Next returns true. +func (i *Iterator) Attribute() KeyValue { + kv, _ := i.storage.Get(i.idx) + return kv +} + +// IndexedLabel returns current index and attribute. Must be called only +// after Next returns true. +// +// Deprecated: Use IndexedAttribute instead. +func (i *Iterator) IndexedLabel() (int, KeyValue) { + return i.idx, i.Attribute() +} + +// IndexedAttribute returns current index and attribute. Must be called only +// after Next returns true. +func (i *Iterator) IndexedAttribute() (int, KeyValue) { + return i.idx, i.Attribute() +} + +// Len returns a number of attributes in the iterated set. +func (i *Iterator) Len() int { + return i.storage.Len() +} + +// ToSlice is a convenience function that creates a slice of attributes from +// the passed iterator. The iterator is set up to start from the beginning +// before creating the slice. +func (i *Iterator) ToSlice() []KeyValue { + l := i.Len() + if l == 0 { + return nil + } + i.idx = -1 + slice := make([]KeyValue, 0, l) + for i.Next() { + slice = append(slice, i.Attribute()) + } + return slice +} + +// NewMergeIterator returns a MergeIterator for merging two attribute sets. +// Duplicates are resolved by taking the value from the first set. +func NewMergeIterator(s1, s2 *Set) MergeIterator { + mi := MergeIterator{ + one: makeOne(s1.Iter()), + two: makeOne(s2.Iter()), + } + return mi +} + +func makeOne(iter Iterator) oneIterator { + oi := oneIterator{ + iter: iter, + } + oi.advance() + return oi +} + +func (oi *oneIterator) advance() { + if oi.done = !oi.iter.Next(); !oi.done { + oi.attr = oi.iter.Attribute() + } +} + +// Next returns true if there is another attribute available. +func (m *MergeIterator) Next() bool { + if m.one.done && m.two.done { + return false + } + if m.one.done { + m.current = m.two.attr + m.two.advance() + return true + } + if m.two.done { + m.current = m.one.attr + m.one.advance() + return true + } + if m.one.attr.Key == m.two.attr.Key { + m.current = m.one.attr // first iterator attribute value wins + m.one.advance() + m.two.advance() + return true + } + if m.one.attr.Key < m.two.attr.Key { + m.current = m.one.attr + m.one.advance() + return true + } + m.current = m.two.attr + m.two.advance() + return true +} + +// Label returns the current value after Next() returns true. +// +// Deprecated: Use Attribute instead. +func (m *MergeIterator) Label() KeyValue { + return m.current +} + +// Attribute returns the current value after Next() returns true. +func (m *MergeIterator) Attribute() KeyValue { + return m.current +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/key.go b/vendor/go.opentelemetry.io/otel/attribute/key.go new file mode 100644 index 00000000..0656a04e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/key.go @@ -0,0 +1,134 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +// Key represents the key part in key-value pairs. It's a string. The +// allowed character set in the key depends on the use of the key. +type Key string + +// Bool creates a KeyValue instance with a BOOL Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Bool(name, value). +func (k Key) Bool(v bool) KeyValue { + return KeyValue{ + Key: k, + Value: BoolValue(v), + } +} + +// BoolSlice creates a KeyValue instance with a BOOLSLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- BoolSlice(name, value). +func (k Key) BoolSlice(v []bool) KeyValue { + return KeyValue{ + Key: k, + Value: BoolSliceValue(v), + } +} + +// Int creates a KeyValue instance with an INT64 Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Int(name, value). +func (k Key) Int(v int) KeyValue { + return KeyValue{ + Key: k, + Value: IntValue(v), + } +} + +// IntSlice creates a KeyValue instance with an INT64SLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- IntSlice(name, value). +func (k Key) IntSlice(v []int) KeyValue { + return KeyValue{ + Key: k, + Value: IntSliceValue(v), + } +} + +// Int64 creates a KeyValue instance with an INT64 Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Int64(name, value). +func (k Key) Int64(v int64) KeyValue { + return KeyValue{ + Key: k, + Value: Int64Value(v), + } +} + +// Int64Slice creates a KeyValue instance with an INT64SLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Int64Slice(name, value). +func (k Key) Int64Slice(v []int64) KeyValue { + return KeyValue{ + Key: k, + Value: Int64SliceValue(v), + } +} + +// Float64 creates a KeyValue instance with a FLOAT64 Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Float64(name, value). +func (k Key) Float64(v float64) KeyValue { + return KeyValue{ + Key: k, + Value: Float64Value(v), + } +} + +// Float64Slice creates a KeyValue instance with a FLOAT64SLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Float64(name, value). +func (k Key) Float64Slice(v []float64) KeyValue { + return KeyValue{ + Key: k, + Value: Float64SliceValue(v), + } +} + +// String creates a KeyValue instance with a STRING Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- String(name, value). +func (k Key) String(v string) KeyValue { + return KeyValue{ + Key: k, + Value: StringValue(v), + } +} + +// StringSlice creates a KeyValue instance with a STRINGSLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- StringSlice(name, value). +func (k Key) StringSlice(v []string) KeyValue { + return KeyValue{ + Key: k, + Value: StringSliceValue(v), + } +} + +// Defined returns true for non-empty keys. +func (k Key) Defined() bool { + return len(k) != 0 +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/kv.go b/vendor/go.opentelemetry.io/otel/attribute/kv.go new file mode 100644 index 00000000..1ddf3ce0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/kv.go @@ -0,0 +1,86 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "fmt" +) + +// KeyValue holds a key and value pair. +type KeyValue struct { + Key Key + Value Value +} + +// Valid returns if kv is a valid OpenTelemetry attribute. +func (kv KeyValue) Valid() bool { + return kv.Key.Defined() && kv.Value.Type() != INVALID +} + +// Bool creates a KeyValue with a BOOL Value type. +func Bool(k string, v bool) KeyValue { + return Key(k).Bool(v) +} + +// BoolSlice creates a KeyValue with a BOOLSLICE Value type. +func BoolSlice(k string, v []bool) KeyValue { + return Key(k).BoolSlice(v) +} + +// Int creates a KeyValue with an INT64 Value type. +func Int(k string, v int) KeyValue { + return Key(k).Int(v) +} + +// IntSlice creates a KeyValue with an INT64SLICE Value type. +func IntSlice(k string, v []int) KeyValue { + return Key(k).IntSlice(v) +} + +// Int64 creates a KeyValue with an INT64 Value type. +func Int64(k string, v int64) KeyValue { + return Key(k).Int64(v) +} + +// Int64Slice creates a KeyValue with an INT64SLICE Value type. +func Int64Slice(k string, v []int64) KeyValue { + return Key(k).Int64Slice(v) +} + +// Float64 creates a KeyValue with a FLOAT64 Value type. +func Float64(k string, v float64) KeyValue { + return Key(k).Float64(v) +} + +// Float64Slice creates a KeyValue with a FLOAT64SLICE Value type. +func Float64Slice(k string, v []float64) KeyValue { + return Key(k).Float64Slice(v) +} + +// String creates a KeyValue with a STRING Value type. +func String(k, v string) KeyValue { + return Key(k).String(v) +} + +// StringSlice creates a KeyValue with a STRINGSLICE Value type. +func StringSlice(k string, v []string) KeyValue { + return Key(k).StringSlice(v) +} + +// Stringer creates a new key-value pair with a passed name and a string +// value generated by the passed Stringer interface. +func Stringer(k string, v fmt.Stringer) KeyValue { + return Key(k).String(v.String()) +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go new file mode 100644 index 00000000..9f9303d4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -0,0 +1,429 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "encoding/json" + "reflect" + "sort" + "sync" +) + +type ( + // Set is the representation for a distinct attribute set. It manages an + // immutable set of attributes, with an internal cache for storing + // attribute encodings. + // + // This type supports the Equivalent method of comparison using values of + // type Distinct. + Set struct { + equivalent Distinct + } + + // Distinct wraps a variable-size array of KeyValue, constructed with keys + // in sorted order. This can be used as a map key or for equality checking + // between Sets. + Distinct struct { + iface interface{} + } + + // Sortable implements sort.Interface, used for sorting KeyValue. This is + // an exported type to support a memory optimization. A pointer to one of + // these is needed for the call to sort.Stable(), which the caller may + // provide in order to avoid an allocation. See NewSetWithSortable(). + Sortable []KeyValue +) + +var ( + // keyValueType is used in computeDistinctReflect. + keyValueType = reflect.TypeOf(KeyValue{}) + + // emptySet is returned for empty attribute sets. + emptySet = &Set{ + equivalent: Distinct{ + iface: [0]KeyValue{}, + }, + } + + // sortables is a pool of Sortables used to create Sets with a user does + // not provide one. + sortables = sync.Pool{ + New: func() interface{} { return new(Sortable) }, + } +) + +// EmptySet returns a reference to a Set with no elements. +// +// This is a convenience provided for optimized calling utility. +func EmptySet() *Set { + return emptySet +} + +// reflectValue abbreviates reflect.ValueOf(d). +func (d Distinct) reflectValue() reflect.Value { + return reflect.ValueOf(d.iface) +} + +// Valid returns true if this value refers to a valid Set. +func (d Distinct) Valid() bool { + return d.iface != nil +} + +// Len returns the number of attributes in this set. +func (l *Set) Len() int { + if l == nil || !l.equivalent.Valid() { + return 0 + } + return l.equivalent.reflectValue().Len() +} + +// Get returns the KeyValue at ordered position idx in this set. +func (l *Set) Get(idx int) (KeyValue, bool) { + if l == nil || !l.equivalent.Valid() { + return KeyValue{}, false + } + value := l.equivalent.reflectValue() + + if idx >= 0 && idx < value.Len() { + // Note: The Go compiler successfully avoids an allocation for + // the interface{} conversion here: + return value.Index(idx).Interface().(KeyValue), true + } + + return KeyValue{}, false +} + +// Value returns the value of a specified key in this set. +func (l *Set) Value(k Key) (Value, bool) { + if l == nil || !l.equivalent.Valid() { + return Value{}, false + } + rValue := l.equivalent.reflectValue() + vlen := rValue.Len() + + idx := sort.Search(vlen, func(idx int) bool { + return rValue.Index(idx).Interface().(KeyValue).Key >= k + }) + if idx >= vlen { + return Value{}, false + } + keyValue := rValue.Index(idx).Interface().(KeyValue) + if k == keyValue.Key { + return keyValue.Value, true + } + return Value{}, false +} + +// HasValue tests whether a key is defined in this set. +func (l *Set) HasValue(k Key) bool { + if l == nil { + return false + } + _, ok := l.Value(k) + return ok +} + +// Iter returns an iterator for visiting the attributes in this set. +func (l *Set) Iter() Iterator { + return Iterator{ + storage: l, + idx: -1, + } +} + +// ToSlice returns the set of attributes belonging to this set, sorted, where +// keys appear no more than once. +func (l *Set) ToSlice() []KeyValue { + iter := l.Iter() + return iter.ToSlice() +} + +// Equivalent returns a value that may be used as a map key. The Distinct type +// guarantees that the result will equal the equivalent. Distinct value of any +// attribute set with the same elements as this, where sets are made unique by +// choosing the last value in the input for any given key. +func (l *Set) Equivalent() Distinct { + if l == nil || !l.equivalent.Valid() { + return emptySet.equivalent + } + return l.equivalent +} + +// Equals returns true if the argument set is equivalent to this set. +func (l *Set) Equals(o *Set) bool { + return l.Equivalent() == o.Equivalent() +} + +// Encoded returns the encoded form of this set, according to encoder. +func (l *Set) Encoded(encoder Encoder) string { + if l == nil || encoder == nil { + return "" + } + + return encoder.Encode(l.Iter()) +} + +func empty() Set { + return Set{ + equivalent: emptySet.equivalent, + } +} + +// NewSet returns a new Set. See the documentation for +// NewSetWithSortableFiltered for more details. +// +// Except for empty sets, this method adds an additional allocation compared +// with calls that include a Sortable. +func NewSet(kvs ...KeyValue) Set { + // Check for empty set. + if len(kvs) == 0 { + return empty() + } + srt := sortables.Get().(*Sortable) + s, _ := NewSetWithSortableFiltered(kvs, srt, nil) + sortables.Put(srt) + return s +} + +// NewSetWithSortable returns a new Set. See the documentation for +// NewSetWithSortableFiltered for more details. +// +// This call includes a Sortable option as a memory optimization. +func NewSetWithSortable(kvs []KeyValue, tmp *Sortable) Set { + // Check for empty set. + if len(kvs) == 0 { + return empty() + } + s, _ := NewSetWithSortableFiltered(kvs, tmp, nil) + return s +} + +// NewSetWithFiltered returns a new Set. See the documentation for +// NewSetWithSortableFiltered for more details. +// +// This call includes a Filter to include/exclude attribute keys from the +// return value. Excluded keys are returned as a slice of attribute values. +func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { + // Check for empty set. + if len(kvs) == 0 { + return empty(), nil + } + srt := sortables.Get().(*Sortable) + s, filtered := NewSetWithSortableFiltered(kvs, srt, filter) + sortables.Put(srt) + return s, filtered +} + +// NewSetWithSortableFiltered returns a new Set. +// +// Duplicate keys are eliminated by taking the last value. This +// re-orders the input slice so that unique last-values are contiguous +// at the end of the slice. +// +// This ensures the following: +// +// - Last-value-wins semantics +// - Caller sees the reordering, but doesn't lose values +// - Repeated call preserve last-value wins. +// +// Note that methods are defined on Set, although this returns Set. Callers +// can avoid memory allocations by: +// +// - allocating a Sortable for use as a temporary in this method +// - allocating a Set for storing the return value of this constructor. +// +// The result maintains a cache of encoded attributes, by attribute.EncoderID. +// This value should not be copied after its first use. +// +// The second []KeyValue return value is a list of attributes that were +// excluded by the Filter (if non-nil). +func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (Set, []KeyValue) { + // Check for empty set. + if len(kvs) == 0 { + return empty(), nil + } + + *tmp = kvs + + // Stable sort so the following de-duplication can implement + // last-value-wins semantics. + sort.Stable(tmp) + + *tmp = nil + + position := len(kvs) - 1 + offset := position - 1 + + // The requirements stated above require that the stable + // result be placed in the end of the input slice, while + // overwritten values are swapped to the beginning. + // + // De-duplicate with last-value-wins semantics. Preserve + // duplicate values at the beginning of the input slice. + for ; offset >= 0; offset-- { + if kvs[offset].Key == kvs[position].Key { + continue + } + position-- + kvs[offset], kvs[position] = kvs[position], kvs[offset] + } + if filter != nil { + return filterSet(kvs[position:], filter) + } + return Set{ + equivalent: computeDistinct(kvs[position:]), + }, nil +} + +// filterSet reorders kvs so that included keys are contiguous at the end of +// the slice, while excluded keys precede the included keys. +func filterSet(kvs []KeyValue, filter Filter) (Set, []KeyValue) { + var excluded []KeyValue + + // Move attributes that do not match the filter so they're adjacent before + // calling computeDistinct(). + distinctPosition := len(kvs) + + // Swap indistinct keys forward and distinct keys toward the + // end of the slice. + offset := len(kvs) - 1 + for ; offset >= 0; offset-- { + if filter(kvs[offset]) { + distinctPosition-- + kvs[offset], kvs[distinctPosition] = kvs[distinctPosition], kvs[offset] + continue + } + } + excluded = kvs[:distinctPosition] + + return Set{ + equivalent: computeDistinct(kvs[distinctPosition:]), + }, excluded +} + +// Filter returns a filtered copy of this Set. See the documentation for +// NewSetWithSortableFiltered for more details. +func (l *Set) Filter(re Filter) (Set, []KeyValue) { + if re == nil { + return Set{ + equivalent: l.equivalent, + }, nil + } + + // Note: This could be refactored to avoid the temporary slice + // allocation, if it proves to be expensive. + return filterSet(l.ToSlice(), re) +} + +// computeDistinct returns a Distinct using either the fixed- or +// reflect-oriented code path, depending on the size of the input. The input +// slice is assumed to already be sorted and de-duplicated. +func computeDistinct(kvs []KeyValue) Distinct { + iface := computeDistinctFixed(kvs) + if iface == nil { + iface = computeDistinctReflect(kvs) + } + return Distinct{ + iface: iface, + } +} + +// computeDistinctFixed computes a Distinct for small slices. It returns nil +// if the input is too large for this code path. +func computeDistinctFixed(kvs []KeyValue) interface{} { + switch len(kvs) { + case 1: + ptr := new([1]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 2: + ptr := new([2]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 3: + ptr := new([3]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 4: + ptr := new([4]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 5: + ptr := new([5]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 6: + ptr := new([6]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 7: + ptr := new([7]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 8: + ptr := new([8]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 9: + ptr := new([9]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 10: + ptr := new([10]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + default: + return nil + } +} + +// computeDistinctReflect computes a Distinct using reflection, works for any +// size input. +func computeDistinctReflect(kvs []KeyValue) interface{} { + at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem() + for i, keyValue := range kvs { + *(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue + } + return at.Interface() +} + +// MarshalJSON returns the JSON encoding of the Set. +func (l *Set) MarshalJSON() ([]byte, error) { + return json.Marshal(l.equivalent.iface) +} + +// MarshalLog is the marshaling function used by the logging system to represent this exporter. +func (l Set) MarshalLog() interface{} { + kvs := make(map[string]string) + for _, kv := range l.ToSlice() { + kvs[string(kv.Key)] = kv.Value.Emit() + } + return kvs +} + +// Len implements sort.Interface. +func (l *Sortable) Len() int { + return len(*l) +} + +// Swap implements sort.Interface. +func (l *Sortable) Swap(i, j int) { + (*l)[i], (*l)[j] = (*l)[j], (*l)[i] +} + +// Less implements sort.Interface. +func (l *Sortable) Less(i, j int) bool { + return (*l)[i].Key < (*l)[j].Key +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/type_string.go b/vendor/go.opentelemetry.io/otel/attribute/type_string.go new file mode 100644 index 00000000..e584b247 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/type_string.go @@ -0,0 +1,31 @@ +// Code generated by "stringer -type=Type"; DO NOT EDIT. + +package attribute + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[INVALID-0] + _ = x[BOOL-1] + _ = x[INT64-2] + _ = x[FLOAT64-3] + _ = x[STRING-4] + _ = x[BOOLSLICE-5] + _ = x[INT64SLICE-6] + _ = x[FLOAT64SLICE-7] + _ = x[STRINGSLICE-8] +} + +const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICESTRINGSLICE" + +var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71} + +func (i Type) String() string { + if i < 0 || i >= Type(len(_Type_index)-1) { + return "Type(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Type_name[_Type_index[i]:_Type_index[i+1]] +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go new file mode 100644 index 00000000..cb21dd5c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/value.go @@ -0,0 +1,270 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" + + "go.opentelemetry.io/otel/internal" + "go.opentelemetry.io/otel/internal/attribute" +) + +//go:generate stringer -type=Type + +// Type describes the type of the data Value holds. +type Type int // nolint: revive // redefines builtin Type. + +// Value represents the value part in key-value pairs. +type Value struct { + vtype Type + numeric uint64 + stringly string + slice interface{} +} + +const ( + // INVALID is used for a Value with no value set. + INVALID Type = iota + // BOOL is a boolean Type Value. + BOOL + // INT64 is a 64-bit signed integral Type Value. + INT64 + // FLOAT64 is a 64-bit floating point Type Value. + FLOAT64 + // STRING is a string Type Value. + STRING + // BOOLSLICE is a slice of booleans Type Value. + BOOLSLICE + // INT64SLICE is a slice of 64-bit signed integral numbers Type Value. + INT64SLICE + // FLOAT64SLICE is a slice of 64-bit floating point numbers Type Value. + FLOAT64SLICE + // STRINGSLICE is a slice of strings Type Value. + STRINGSLICE +) + +// BoolValue creates a BOOL Value. +func BoolValue(v bool) Value { + return Value{ + vtype: BOOL, + numeric: internal.BoolToRaw(v), + } +} + +// BoolSliceValue creates a BOOLSLICE Value. +func BoolSliceValue(v []bool) Value { + return Value{vtype: BOOLSLICE, slice: attribute.BoolSliceValue(v)} +} + +// IntValue creates an INT64 Value. +func IntValue(v int) Value { + return Int64Value(int64(v)) +} + +// IntSliceValue creates an INTSLICE Value. +func IntSliceValue(v []int) Value { + var int64Val int64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(int64Val))) + for i, val := range v { + cp.Elem().Index(i).SetInt(int64(val)) + } + return Value{ + vtype: INT64SLICE, + slice: cp.Elem().Interface(), + } +} + +// Int64Value creates an INT64 Value. +func Int64Value(v int64) Value { + return Value{ + vtype: INT64, + numeric: internal.Int64ToRaw(v), + } +} + +// Int64SliceValue creates an INT64SLICE Value. +func Int64SliceValue(v []int64) Value { + return Value{vtype: INT64SLICE, slice: attribute.Int64SliceValue(v)} +} + +// Float64Value creates a FLOAT64 Value. +func Float64Value(v float64) Value { + return Value{ + vtype: FLOAT64, + numeric: internal.Float64ToRaw(v), + } +} + +// Float64SliceValue creates a FLOAT64SLICE Value. +func Float64SliceValue(v []float64) Value { + return Value{vtype: FLOAT64SLICE, slice: attribute.Float64SliceValue(v)} +} + +// StringValue creates a STRING Value. +func StringValue(v string) Value { + return Value{ + vtype: STRING, + stringly: v, + } +} + +// StringSliceValue creates a STRINGSLICE Value. +func StringSliceValue(v []string) Value { + return Value{vtype: STRINGSLICE, slice: attribute.StringSliceValue(v)} +} + +// Type returns a type of the Value. +func (v Value) Type() Type { + return v.vtype +} + +// AsBool returns the bool value. Make sure that the Value's type is +// BOOL. +func (v Value) AsBool() bool { + return internal.RawToBool(v.numeric) +} + +// AsBoolSlice returns the []bool value. Make sure that the Value's type is +// BOOLSLICE. +func (v Value) AsBoolSlice() []bool { + if v.vtype != BOOLSLICE { + return nil + } + return v.asBoolSlice() +} + +func (v Value) asBoolSlice() []bool { + return attribute.AsBoolSlice(v.slice) +} + +// AsInt64 returns the int64 value. Make sure that the Value's type is +// INT64. +func (v Value) AsInt64() int64 { + return internal.RawToInt64(v.numeric) +} + +// AsInt64Slice returns the []int64 value. Make sure that the Value's type is +// INT64SLICE. +func (v Value) AsInt64Slice() []int64 { + if v.vtype != INT64SLICE { + return nil + } + return v.asInt64Slice() +} + +func (v Value) asInt64Slice() []int64 { + return attribute.AsInt64Slice(v.slice) +} + +// AsFloat64 returns the float64 value. Make sure that the Value's +// type is FLOAT64. +func (v Value) AsFloat64() float64 { + return internal.RawToFloat64(v.numeric) +} + +// AsFloat64Slice returns the []float64 value. Make sure that the Value's type is +// FLOAT64SLICE. +func (v Value) AsFloat64Slice() []float64 { + if v.vtype != FLOAT64SLICE { + return nil + } + return v.asFloat64Slice() +} + +func (v Value) asFloat64Slice() []float64 { + return attribute.AsFloat64Slice(v.slice) +} + +// AsString returns the string value. Make sure that the Value's type +// is STRING. +func (v Value) AsString() string { + return v.stringly +} + +// AsStringSlice returns the []string value. Make sure that the Value's type is +// STRINGSLICE. +func (v Value) AsStringSlice() []string { + if v.vtype != STRINGSLICE { + return nil + } + return v.asStringSlice() +} + +func (v Value) asStringSlice() []string { + return attribute.AsStringSlice(v.slice) +} + +type unknownValueType struct{} + +// AsInterface returns Value's data as interface{}. +func (v Value) AsInterface() interface{} { + switch v.Type() { + case BOOL: + return v.AsBool() + case BOOLSLICE: + return v.asBoolSlice() + case INT64: + return v.AsInt64() + case INT64SLICE: + return v.asInt64Slice() + case FLOAT64: + return v.AsFloat64() + case FLOAT64SLICE: + return v.asFloat64Slice() + case STRING: + return v.stringly + case STRINGSLICE: + return v.asStringSlice() + } + return unknownValueType{} +} + +// Emit returns a string representation of Value's data. +func (v Value) Emit() string { + switch v.Type() { + case BOOLSLICE: + return fmt.Sprint(v.asBoolSlice()) + case BOOL: + return strconv.FormatBool(v.AsBool()) + case INT64SLICE: + return fmt.Sprint(v.asInt64Slice()) + case INT64: + return strconv.FormatInt(v.AsInt64(), 10) + case FLOAT64SLICE: + return fmt.Sprint(v.asFloat64Slice()) + case FLOAT64: + return fmt.Sprint(v.AsFloat64()) + case STRINGSLICE: + return fmt.Sprint(v.asStringSlice()) + case STRING: + return v.stringly + default: + return "unknown" + } +} + +// MarshalJSON returns the JSON encoding of the Value. +func (v Value) MarshalJSON() ([]byte, error) { + var jsonVal struct { + Type string + Value interface{} + } + jsonVal.Type = v.Type().String() + jsonVal.Value = v.AsInterface() + return json.Marshal(jsonVal) +} diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go new file mode 100644 index 00000000..9e6b3b7b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -0,0 +1,552 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package baggage // import "go.opentelemetry.io/otel/baggage" + +import ( + "errors" + "fmt" + "net/url" + "regexp" + "strings" + + "go.opentelemetry.io/otel/internal/baggage" +) + +const ( + maxMembers = 180 + maxBytesPerMembers = 4096 + maxBytesPerBaggageString = 8192 + + listDelimiter = "," + keyValueDelimiter = "=" + propertyDelimiter = ";" + + keyDef = `([\x21\x23-\x27\x2A\x2B\x2D\x2E\x30-\x39\x41-\x5a\x5e-\x7a\x7c\x7e]+)` + valueDef = `([\x21\x23-\x2b\x2d-\x3a\x3c-\x5B\x5D-\x7e]*)` + keyValueDef = `\s*` + keyDef + `\s*` + keyValueDelimiter + `\s*` + valueDef + `\s*` +) + +var ( + keyRe = regexp.MustCompile(`^` + keyDef + `$`) + valueRe = regexp.MustCompile(`^` + valueDef + `$`) + propertyRe = regexp.MustCompile(`^(?:\s*` + keyDef + `\s*|` + keyValueDef + `)$`) +) + +var ( + errInvalidKey = errors.New("invalid key") + errInvalidValue = errors.New("invalid value") + errInvalidProperty = errors.New("invalid baggage list-member property") + errInvalidMember = errors.New("invalid baggage list-member") + errMemberNumber = errors.New("too many list-members in baggage-string") + errMemberBytes = errors.New("list-member too large") + errBaggageBytes = errors.New("baggage-string too large") +) + +// Property is an additional metadata entry for a baggage list-member. +type Property struct { + key, value string + + // hasValue indicates if a zero-value value means the property does not + // have a value or if it was the zero-value. + hasValue bool +} + +// NewKeyProperty returns a new Property for key. +// +// If key is invalid, an error will be returned. +func NewKeyProperty(key string) (Property, error) { + if !keyRe.MatchString(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + + p := Property{key: key} + return p, nil +} + +// NewKeyValueProperty returns a new Property for key with value. +// +// If key or value are invalid, an error will be returned. +func NewKeyValueProperty(key, value string) (Property, error) { + if !keyRe.MatchString(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !valueRe.MatchString(value) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + + p := Property{ + key: key, + value: value, + hasValue: true, + } + return p, nil +} + +func newInvalidProperty() Property { + return Property{} +} + +// parseProperty attempts to decode a Property from the passed string. It +// returns an error if the input is invalid according to the W3C Baggage +// specification. +func parseProperty(property string) (Property, error) { + if property == "" { + return newInvalidProperty(), nil + } + + match := propertyRe.FindStringSubmatch(property) + if len(match) != 4 { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidProperty, property) + } + + var p Property + if match[1] != "" { + p.key = match[1] + } else { + p.key = match[2] + p.value = match[3] + p.hasValue = true + } + + return p, nil +} + +// validate ensures p conforms to the W3C Baggage specification, returning an +// error otherwise. +func (p Property) validate() error { + errFunc := func(err error) error { + return fmt.Errorf("invalid property: %w", err) + } + + if !keyRe.MatchString(p.key) { + return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key)) + } + if p.hasValue && !valueRe.MatchString(p.value) { + return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value)) + } + if !p.hasValue && p.value != "" { + return errFunc(errors.New("inconsistent value")) + } + return nil +} + +// Key returns the Property key. +func (p Property) Key() string { + return p.key +} + +// Value returns the Property value. Additionally, a boolean value is returned +// indicating if the returned value is the empty if the Property has a value +// that is empty or if the value is not set. +func (p Property) Value() (string, bool) { + return p.value, p.hasValue +} + +// String encodes Property into a string compliant with the W3C Baggage +// specification. +func (p Property) String() string { + if p.hasValue { + return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, p.value) + } + return p.key +} + +type properties []Property + +func fromInternalProperties(iProps []baggage.Property) properties { + if len(iProps) == 0 { + return nil + } + + props := make(properties, len(iProps)) + for i, p := range iProps { + props[i] = Property{ + key: p.Key, + value: p.Value, + hasValue: p.HasValue, + } + } + return props +} + +func (p properties) asInternal() []baggage.Property { + if len(p) == 0 { + return nil + } + + iProps := make([]baggage.Property, len(p)) + for i, prop := range p { + iProps[i] = baggage.Property{ + Key: prop.key, + Value: prop.value, + HasValue: prop.hasValue, + } + } + return iProps +} + +func (p properties) Copy() properties { + if len(p) == 0 { + return nil + } + + props := make(properties, len(p)) + copy(props, p) + return props +} + +// validate ensures each Property in p conforms to the W3C Baggage +// specification, returning an error otherwise. +func (p properties) validate() error { + for _, prop := range p { + if err := prop.validate(); err != nil { + return err + } + } + return nil +} + +// String encodes properties into a string compliant with the W3C Baggage +// specification. +func (p properties) String() string { + props := make([]string, len(p)) + for i, prop := range p { + props[i] = prop.String() + } + return strings.Join(props, propertyDelimiter) +} + +// Member is a list-member of a baggage-string as defined by the W3C Baggage +// specification. +type Member struct { + key, value string + properties properties + + // hasData indicates whether the created property contains data or not. + // Properties that do not contain data are invalid with no other check + // required. + hasData bool +} + +// NewMember returns a new Member from the passed arguments. The key will be +// used directly while the value will be url decoded after validation. An error +// is returned if the created Member would be invalid according to the W3C +// Baggage specification. +func NewMember(key, value string, props ...Property) (Member, error) { + m := Member{ + key: key, + value: value, + properties: properties(props).Copy(), + hasData: true, + } + if err := m.validate(); err != nil { + return newInvalidMember(), err + } + decodedValue, err := url.QueryUnescape(value) + if err != nil { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + m.value = decodedValue + return m, nil +} + +func newInvalidMember() Member { + return Member{} +} + +// parseMember attempts to decode a Member from the passed string. It returns +// an error if the input is invalid according to the W3C Baggage +// specification. +func parseMember(member string) (Member, error) { + if n := len(member); n > maxBytesPerMembers { + return newInvalidMember(), fmt.Errorf("%w: %d", errMemberBytes, n) + } + + var ( + key, value string + props properties + ) + + keyValue, properties, found := strings.Cut(member, propertyDelimiter) + if found { + // Parse the member properties. + for _, pStr := range strings.Split(properties, propertyDelimiter) { + p, err := parseProperty(pStr) + if err != nil { + return newInvalidMember(), err + } + props = append(props, p) + } + } + // Parse the member key/value pair. + + // Take into account a value can contain equal signs (=). + k, v, found := strings.Cut(keyValue, keyValueDelimiter) + if !found { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidMember, member) + } + // "Leading and trailing whitespaces are allowed but MUST be trimmed + // when converting the header into a data structure." + key = strings.TrimSpace(k) + var err error + value, err = url.QueryUnescape(strings.TrimSpace(v)) + if err != nil { + return newInvalidMember(), fmt.Errorf("%w: %q", err, value) + } + if !keyRe.MatchString(key) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !valueRe.MatchString(value) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + + return Member{key: key, value: value, properties: props, hasData: true}, nil +} + +// validate ensures m conforms to the W3C Baggage specification. +// A key is just an ASCII string, but a value must be URL encoded UTF-8, +// returning an error otherwise. +func (m Member) validate() error { + if !m.hasData { + return fmt.Errorf("%w: %q", errInvalidMember, m) + } + + if !keyRe.MatchString(m.key) { + return fmt.Errorf("%w: %q", errInvalidKey, m.key) + } + if !valueRe.MatchString(m.value) { + return fmt.Errorf("%w: %q", errInvalidValue, m.value) + } + return m.properties.validate() +} + +// Key returns the Member key. +func (m Member) Key() string { return m.key } + +// Value returns the Member value. +func (m Member) Value() string { return m.value } + +// Properties returns a copy of the Member properties. +func (m Member) Properties() []Property { return m.properties.Copy() } + +// String encodes Member into a string compliant with the W3C Baggage +// specification. +func (m Member) String() string { + // A key is just an ASCII string, but a value is URL encoded UTF-8. + s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, url.QueryEscape(m.value)) + if len(m.properties) > 0 { + s = fmt.Sprintf("%s%s%s", s, propertyDelimiter, m.properties.String()) + } + return s +} + +// Baggage is a list of baggage members representing the baggage-string as +// defined by the W3C Baggage specification. +type Baggage struct { //nolint:golint + list baggage.List +} + +// New returns a new valid Baggage. It returns an error if it results in a +// Baggage exceeding limits set in that specification. +// +// It expects all the provided members to have already been validated. +func New(members ...Member) (Baggage, error) { + if len(members) == 0 { + return Baggage{}, nil + } + + b := make(baggage.List) + for _, m := range members { + if !m.hasData { + return Baggage{}, errInvalidMember + } + + // OpenTelemetry resolves duplicates by last-one-wins. + b[m.key] = baggage.Item{ + Value: m.value, + Properties: m.properties.asInternal(), + } + } + + // Check member numbers after deduplication. + if len(b) > maxMembers { + return Baggage{}, errMemberNumber + } + + bag := Baggage{b} + if n := len(bag.String()); n > maxBytesPerBaggageString { + return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n) + } + + return bag, nil +} + +// Parse attempts to decode a baggage-string from the passed string. It +// returns an error if the input is invalid according to the W3C Baggage +// specification. +// +// If there are duplicate list-members contained in baggage, the last one +// defined (reading left-to-right) will be the only one kept. This diverges +// from the W3C Baggage specification which allows duplicate list-members, but +// conforms to the OpenTelemetry Baggage specification. +func Parse(bStr string) (Baggage, error) { + if bStr == "" { + return Baggage{}, nil + } + + if n := len(bStr); n > maxBytesPerBaggageString { + return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n) + } + + b := make(baggage.List) + for _, memberStr := range strings.Split(bStr, listDelimiter) { + m, err := parseMember(memberStr) + if err != nil { + return Baggage{}, err + } + // OpenTelemetry resolves duplicates by last-one-wins. + b[m.key] = baggage.Item{ + Value: m.value, + Properties: m.properties.asInternal(), + } + } + + // OpenTelemetry does not allow for duplicate list-members, but the W3C + // specification does. Now that we have deduplicated, ensure the baggage + // does not exceed list-member limits. + if len(b) > maxMembers { + return Baggage{}, errMemberNumber + } + + return Baggage{b}, nil +} + +// Member returns the baggage list-member identified by key. +// +// If there is no list-member matching the passed key the returned Member will +// be a zero-value Member. +// The returned member is not validated, as we assume the validation happened +// when it was added to the Baggage. +func (b Baggage) Member(key string) Member { + v, ok := b.list[key] + if !ok { + // We do not need to worry about distinguishing between the situation + // where a zero-valued Member is included in the Baggage because a + // zero-valued Member is invalid according to the W3C Baggage + // specification (it has an empty key). + return newInvalidMember() + } + + return Member{ + key: key, + value: v.Value, + properties: fromInternalProperties(v.Properties), + hasData: true, + } +} + +// Members returns all the baggage list-members. +// The order of the returned list-members does not have significance. +// +// The returned members are not validated, as we assume the validation happened +// when they were added to the Baggage. +func (b Baggage) Members() []Member { + if len(b.list) == 0 { + return nil + } + + members := make([]Member, 0, len(b.list)) + for k, v := range b.list { + members = append(members, Member{ + key: k, + value: v.Value, + properties: fromInternalProperties(v.Properties), + hasData: true, + }) + } + return members +} + +// SetMember returns a copy the Baggage with the member included. If the +// baggage contains a Member with the same key the existing Member is +// replaced. +// +// If member is invalid according to the W3C Baggage specification, an error +// is returned with the original Baggage. +func (b Baggage) SetMember(member Member) (Baggage, error) { + if !member.hasData { + return b, errInvalidMember + } + + n := len(b.list) + if _, ok := b.list[member.key]; !ok { + n++ + } + list := make(baggage.List, n) + + for k, v := range b.list { + // Do not copy if we are just going to overwrite. + if k == member.key { + continue + } + list[k] = v + } + + list[member.key] = baggage.Item{ + Value: member.value, + Properties: member.properties.asInternal(), + } + + return Baggage{list: list}, nil +} + +// DeleteMember returns a copy of the Baggage with the list-member identified +// by key removed. +func (b Baggage) DeleteMember(key string) Baggage { + n := len(b.list) + if _, ok := b.list[key]; ok { + n-- + } + list := make(baggage.List, n) + + for k, v := range b.list { + if k == key { + continue + } + list[k] = v + } + + return Baggage{list: list} +} + +// Len returns the number of list-members in the Baggage. +func (b Baggage) Len() int { + return len(b.list) +} + +// String encodes Baggage into a string compliant with the W3C Baggage +// specification. The returned string will be invalid if the Baggage contains +// any invalid list-members. +func (b Baggage) String() string { + members := make([]string, 0, len(b.list)) + for k, v := range b.list { + members = append(members, Member{ + key: k, + value: v.Value, + properties: fromInternalProperties(v.Properties), + }.String()) + } + return strings.Join(members, listDelimiter) +} diff --git a/vendor/go.opentelemetry.io/otel/baggage/context.go b/vendor/go.opentelemetry.io/otel/baggage/context.go new file mode 100644 index 00000000..24b34b75 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/baggage/context.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package baggage // import "go.opentelemetry.io/otel/baggage" + +import ( + "context" + + "go.opentelemetry.io/otel/internal/baggage" +) + +// ContextWithBaggage returns a copy of parent with baggage. +func ContextWithBaggage(parent context.Context, b Baggage) context.Context { + // Delegate so any hooks for the OpenTracing bridge are handled. + return baggage.ContextWithList(parent, b.list) +} + +// ContextWithoutBaggage returns a copy of parent with no baggage. +func ContextWithoutBaggage(parent context.Context) context.Context { + // Delegate so any hooks for the OpenTracing bridge are handled. + return baggage.ContextWithList(parent, nil) +} + +// FromContext returns the baggage contained in ctx. +func FromContext(ctx context.Context) Baggage { + // Delegate so any hooks for the OpenTracing bridge are handled. + return Baggage{list: baggage.ListFromContext(ctx)} +} diff --git a/vendor/go.opentelemetry.io/otel/baggage/doc.go b/vendor/go.opentelemetry.io/otel/baggage/doc.go new file mode 100644 index 00000000..4545100d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/baggage/doc.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package baggage provides functionality for storing and retrieving +baggage items in Go context. For propagating the baggage, see the +go.opentelemetry.io/otel/propagation package. +*/ +package baggage // import "go.opentelemetry.io/otel/baggage" diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go new file mode 100644 index 00000000..587ebae4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -0,0 +1,116 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package codes // import "go.opentelemetry.io/otel/codes" + +import ( + "encoding/json" + "fmt" + "strconv" +) + +const ( + // Unset is the default status code. + Unset Code = 0 + + // Error indicates the operation contains an error. + // + // NOTE: The error code in OTLP is 2. + // The value of this enum is only relevant to the internals + // of the Go SDK. + Error Code = 1 + + // Ok indicates operation has been validated by an Application developers + // or Operator to have completed successfully, or contain no error. + // + // NOTE: The Ok code in OTLP is 1. + // The value of this enum is only relevant to the internals + // of the Go SDK. + Ok Code = 2 + + maxCode = 3 +) + +// Code is an 32-bit representation of a status state. +type Code uint32 + +var codeToStr = map[Code]string{ + Unset: "Unset", + Error: "Error", + Ok: "Ok", +} + +var strToCode = map[string]Code{ + `"Unset"`: Unset, + `"Error"`: Error, + `"Ok"`: Ok, +} + +// String returns the Code as a string. +func (c Code) String() string { + return codeToStr[c] +} + +// UnmarshalJSON unmarshals b into the Code. +// +// This is based on the functionality in the gRPC codes package: +// https://github.com/grpc/grpc-go/blob/bb64fee312b46ebee26be43364a7a966033521b1/codes/codes.go#L218-L244 +func (c *Code) UnmarshalJSON(b []byte) error { + // From json.Unmarshaler: By convention, to approximate the behavior of + // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as + // a no-op. + if string(b) == "null" { + return nil + } + if c == nil { + return fmt.Errorf("nil receiver passed to UnmarshalJSON") + } + + var x interface{} + if err := json.Unmarshal(b, &x); err != nil { + return err + } + switch x.(type) { + case string: + if jc, ok := strToCode[string(b)]; ok { + *c = jc + return nil + } + return fmt.Errorf("invalid code: %q", string(b)) + case float64: + if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { + if ci >= maxCode { + return fmt.Errorf("invalid code: %q", ci) + } + + *c = Code(ci) + return nil + } + return fmt.Errorf("invalid code: %q", string(b)) + default: + return fmt.Errorf("invalid code: %q", string(b)) + } +} + +// MarshalJSON returns c as the JSON encoding of c. +func (c *Code) MarshalJSON() ([]byte, error) { + if c == nil { + return []byte("null"), nil + } + str, ok := codeToStr[*c] + if !ok { + return nil, fmt.Errorf("invalid code: %d", *c) + } + return []byte(fmt.Sprintf("%q", str)), nil +} diff --git a/vendor/go.opentelemetry.io/otel/codes/doc.go b/vendor/go.opentelemetry.io/otel/codes/doc.go new file mode 100644 index 00000000..4e328fbb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/codes/doc.go @@ -0,0 +1,21 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package codes defines the canonical error codes used by OpenTelemetry. + +It conforms to [the OpenTelemetry +specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/trace/api.md#set-status). +*/ +package codes // import "go.opentelemetry.io/otel/codes" diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go new file mode 100644 index 00000000..daa36c89 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/doc.go @@ -0,0 +1,34 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package otel provides global access to the OpenTelemetry API. The subpackages of +the otel package provide an implementation of the OpenTelemetry API. + +The provided API is used to instrument code and measure data about that code's +performance and operation. The measured data, by default, is not processed or +transmitted anywhere. An implementation of the OpenTelemetry SDK, like the +default SDK implementation (go.opentelemetry.io/otel/sdk), and associated +exporters are used to process and transport this data. + +To read the getting started guide, see https://opentelemetry.io/docs/go/getting-started/. + +To read more about tracing, see go.opentelemetry.io/otel/trace. + +To read more about metrics, see go.opentelemetry.io/otel/metric. + +To read more about propagation, see go.opentelemetry.io/otel/propagation and +go.opentelemetry.io/otel/baggage. +*/ +package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/error_handler.go b/vendor/go.opentelemetry.io/otel/error_handler.go new file mode 100644 index 00000000..72fad854 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/error_handler.go @@ -0,0 +1,38 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +// ErrorHandler handles irremediable events. +type ErrorHandler interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Handle handles any error deemed irremediable by an OpenTelemetry + // component. + Handle(error) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// ErrorHandlerFunc is a convenience adapter to allow the use of a function +// as an ErrorHandler. +type ErrorHandlerFunc func(error) + +var _ ErrorHandler = ErrorHandlerFunc(nil) + +// Handle handles the irremediable error by calling the ErrorHandlerFunc itself. +func (f ErrorHandlerFunc) Handle(err error) { + f(err) +} diff --git a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh new file mode 100644 index 00000000..9a58fb1d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh @@ -0,0 +1,41 @@ +#!/usr/bin/env bash + +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -euo pipefail + +top_dir='.' +if [[ $# -gt 0 ]]; then + top_dir="${1}" +fi + +p=$(pwd) +mod_dirs=() + +# Note `mapfile` does not exist in older bash versions: +# https://stackoverflow.com/questions/41475261/need-alternative-to-readarray-mapfile-for-script-on-older-version-of-bash + +while IFS= read -r line; do + mod_dirs+=("$line") +done < <(find "${top_dir}" -type f -name 'go.mod' -exec dirname {} \; | sort) + +for mod_dir in "${mod_dirs[@]}"; do + cd "${mod_dir}" + + while IFS= read -r line; do + echo ".${line#${p}}" + done < <(go list --find -f '{{.Name}}|{{.Dir}}' ./... | grep '^main|' | cut -f 2- -d '|') + cd "${p}" +done diff --git a/vendor/go.opentelemetry.io/otel/handler.go b/vendor/go.opentelemetry.io/otel/handler.go new file mode 100644 index 00000000..4115fe3b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/handler.go @@ -0,0 +1,48 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +import ( + "go.opentelemetry.io/otel/internal/global" +) + +var ( + // Compile-time check global.ErrDelegator implements ErrorHandler. + _ ErrorHandler = (*global.ErrDelegator)(nil) + // Compile-time check global.ErrLogger implements ErrorHandler. + _ ErrorHandler = (*global.ErrLogger)(nil) +) + +// GetErrorHandler returns the global ErrorHandler instance. +// +// The default ErrorHandler instance returned will log all errors to STDERR +// until an override ErrorHandler is set with SetErrorHandler. All +// ErrorHandler returned prior to this will automatically forward errors to +// the set instance instead of logging. +// +// Subsequent calls to SetErrorHandler after the first will not forward errors +// to the new ErrorHandler for prior returned instances. +func GetErrorHandler() ErrorHandler { return global.GetErrorHandler() } + +// SetErrorHandler sets the global ErrorHandler to h. +// +// The first time this is called all ErrorHandler previously returned from +// GetErrorHandler will send errors to h instead of the default logging +// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not +// delegate errors to h. +func SetErrorHandler(h ErrorHandler) { global.SetErrorHandler(h) } + +// Handle is a convenience function for ErrorHandler().Handle(err). +func Handle(err error) { global.Handle(err) } diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go new file mode 100644 index 00000000..622c3ee3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go @@ -0,0 +1,111 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package attribute provide several helper functions for some commonly used +logic of processing attributes. +*/ +package attribute // import "go.opentelemetry.io/otel/internal/attribute" + +import ( + "reflect" +) + +// BoolSliceValue converts a bool slice into an array with same elements as slice. +func BoolSliceValue(v []bool) interface{} { + var zero bool + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]bool), v) + return cp.Elem().Interface() +} + +// Int64SliceValue converts an int64 slice into an array with same elements as slice. +func Int64SliceValue(v []int64) interface{} { + var zero int64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]int64), v) + return cp.Elem().Interface() +} + +// Float64SliceValue converts a float64 slice into an array with same elements as slice. +func Float64SliceValue(v []float64) interface{} { + var zero float64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]float64), v) + return cp.Elem().Interface() +} + +// StringSliceValue converts a string slice into an array with same elements as slice. +func StringSliceValue(v []string) interface{} { + var zero string + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]string), v) + return cp.Elem().Interface() +} + +// AsBoolSlice converts a bool array into a slice into with same elements as array. +func AsBoolSlice(v interface{}) []bool { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero bool + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]bool) +} + +// AsInt64Slice converts an int64 array into a slice into with same elements as array. +func AsInt64Slice(v interface{}) []int64 { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero int64 + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]int64) +} + +// AsFloat64Slice converts a float64 array into a slice into with same elements as array. +func AsFloat64Slice(v interface{}) []float64 { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero float64 + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]float64) +} + +// AsStringSlice converts a string array into a slice into with same elements as array. +func AsStringSlice(v interface{}) []string { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero string + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]string) +} diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go new file mode 100644 index 00000000..b96e5408 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go @@ -0,0 +1,43 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package baggage provides base types and functionality to store and retrieve +baggage in Go context. This package exists because the OpenTracing bridge to +OpenTelemetry needs to synchronize state whenever baggage for a context is +modified and that context contains an OpenTracing span. If it were not for +this need this package would not need to exist and the +`go.opentelemetry.io/otel/baggage` package would be the singular place where +W3C baggage is handled. +*/ +package baggage // import "go.opentelemetry.io/otel/internal/baggage" + +// List is the collection of baggage members. The W3C allows for duplicates, +// but OpenTelemetry does not, therefore, this is represented as a map. +type List map[string]Item + +// Item is the value and metadata properties part of a list-member. +type Item struct { + Value string + Properties []Property +} + +// Property is a metadata entry for a list-member. +type Property struct { + Key, Value string + + // HasValue indicates if a zero-value value means the property does not + // have a value or if it was the zero-value. + HasValue bool +} diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go new file mode 100644 index 00000000..4469700d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go @@ -0,0 +1,92 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package baggage // import "go.opentelemetry.io/otel/internal/baggage" + +import "context" + +type baggageContextKeyType int + +const baggageKey baggageContextKeyType = iota + +// SetHookFunc is a callback called when storing baggage in the context. +type SetHookFunc func(context.Context, List) context.Context + +// GetHookFunc is a callback called when getting baggage from the context. +type GetHookFunc func(context.Context, List) List + +type baggageState struct { + list List + + setHook SetHookFunc + getHook GetHookFunc +} + +// ContextWithSetHook returns a copy of parent with hook configured to be +// invoked every time ContextWithBaggage is called. +// +// Passing nil SetHookFunc creates a context with no set hook to call. +func ContextWithSetHook(parent context.Context, hook SetHookFunc) context.Context { + var s baggageState + if v, ok := parent.Value(baggageKey).(baggageState); ok { + s = v + } + + s.setHook = hook + return context.WithValue(parent, baggageKey, s) +} + +// ContextWithGetHook returns a copy of parent with hook configured to be +// invoked every time FromContext is called. +// +// Passing nil GetHookFunc creates a context with no get hook to call. +func ContextWithGetHook(parent context.Context, hook GetHookFunc) context.Context { + var s baggageState + if v, ok := parent.Value(baggageKey).(baggageState); ok { + s = v + } + + s.getHook = hook + return context.WithValue(parent, baggageKey, s) +} + +// ContextWithList returns a copy of parent with baggage. Passing nil list +// returns a context without any baggage. +func ContextWithList(parent context.Context, list List) context.Context { + var s baggageState + if v, ok := parent.Value(baggageKey).(baggageState); ok { + s = v + } + + s.list = list + ctx := context.WithValue(parent, baggageKey, s) + if s.setHook != nil { + ctx = s.setHook(ctx, list) + } + + return ctx +} + +// ListFromContext returns the baggage contained in ctx. +func ListFromContext(ctx context.Context) List { + switch v := ctx.Value(baggageKey).(type) { + case baggageState: + if v.getHook != nil { + return v.getHook(ctx, v.list) + } + return v.list + default: + return nil + } +} diff --git a/vendor/go.opentelemetry.io/otel/internal/gen.go b/vendor/go.opentelemetry.io/otel/internal/gen.go new file mode 100644 index 00000000..f532f07e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/gen.go @@ -0,0 +1,29 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opentelemetry.io/otel/internal" + +//go:generate gotmpl --body=./shared/matchers/expectation.go.tmpl "--data={}" --out=matchers/expectation.go +//go:generate gotmpl --body=./shared/matchers/expecter.go.tmpl "--data={}" --out=matchers/expecter.go +//go:generate gotmpl --body=./shared/matchers/temporal_matcher.go.tmpl "--data={}" --out=matchers/temporal_matcher.go + +//go:generate gotmpl --body=./shared/internaltest/alignment.go.tmpl "--data={}" --out=internaltest/alignment.go +//go:generate gotmpl --body=./shared/internaltest/env.go.tmpl "--data={}" --out=internaltest/env.go +//go:generate gotmpl --body=./shared/internaltest/env_test.go.tmpl "--data={}" --out=internaltest/env_test.go +//go:generate gotmpl --body=./shared/internaltest/errors.go.tmpl "--data={}" --out=internaltest/errors.go +//go:generate gotmpl --body=./shared/internaltest/harness.go.tmpl "--data={\"matchersImportPath\": \"go.opentelemetry.io/otel/internal/matchers\"}" --out=internaltest/harness.go +//go:generate gotmpl --body=./shared/internaltest/text_map_carrier.go.tmpl "--data={}" --out=internaltest/text_map_carrier.go +//go:generate gotmpl --body=./shared/internaltest/text_map_carrier_test.go.tmpl "--data={}" --out=internaltest/text_map_carrier_test.go +//go:generate gotmpl --body=./shared/internaltest/text_map_propagator.go.tmpl "--data={}" --out=internaltest/text_map_propagator.go +//go:generate gotmpl --body=./shared/internaltest/text_map_propagator_test.go.tmpl "--data={}" --out=internaltest/text_map_propagator_test.go diff --git a/vendor/go.opentelemetry.io/otel/internal/global/handler.go b/vendor/go.opentelemetry.io/otel/internal/global/handler.go new file mode 100644 index 00000000..5e9b8304 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/handler.go @@ -0,0 +1,102 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "log" + "os" + "sync/atomic" +) + +var ( + // GlobalErrorHandler provides an ErrorHandler that can be used + // throughout an OpenTelemetry instrumented project. When a user + // specified ErrorHandler is registered (`SetErrorHandler`) all calls to + // `Handle` and will be delegated to the registered ErrorHandler. + GlobalErrorHandler = defaultErrorHandler() + + // Compile-time check that delegator implements ErrorHandler. + _ ErrorHandler = (*ErrDelegator)(nil) + // Compile-time check that errLogger implements ErrorHandler. + _ ErrorHandler = (*ErrLogger)(nil) +) + +// ErrorHandler handles irremediable events. +type ErrorHandler interface { + // Handle handles any error deemed irremediable by an OpenTelemetry + // component. + Handle(error) +} + +type ErrDelegator struct { + delegate atomic.Pointer[ErrorHandler] +} + +func (d *ErrDelegator) Handle(err error) { + d.getDelegate().Handle(err) +} + +func (d *ErrDelegator) getDelegate() ErrorHandler { + return *d.delegate.Load() +} + +// setDelegate sets the ErrorHandler delegate. +func (d *ErrDelegator) setDelegate(eh ErrorHandler) { + d.delegate.Store(&eh) +} + +func defaultErrorHandler() *ErrDelegator { + d := &ErrDelegator{} + d.setDelegate(&ErrLogger{l: log.New(os.Stderr, "", log.LstdFlags)}) + return d +} + +// ErrLogger logs errors if no delegate is set, otherwise they are delegated. +type ErrLogger struct { + l *log.Logger +} + +// Handle logs err if no delegate is set, otherwise it is delegated. +func (h *ErrLogger) Handle(err error) { + h.l.Print(err) +} + +// GetErrorHandler returns the global ErrorHandler instance. +// +// The default ErrorHandler instance returned will log all errors to STDERR +// until an override ErrorHandler is set with SetErrorHandler. All +// ErrorHandler returned prior to this will automatically forward errors to +// the set instance instead of logging. +// +// Subsequent calls to SetErrorHandler after the first will not forward errors +// to the new ErrorHandler for prior returned instances. +func GetErrorHandler() ErrorHandler { + return GlobalErrorHandler +} + +// SetErrorHandler sets the global ErrorHandler to h. +// +// The first time this is called all ErrorHandler previously returned from +// GetErrorHandler will send errors to h instead of the default logging +// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not +// delegate errors to h. +func SetErrorHandler(h ErrorHandler) { + GlobalErrorHandler.setDelegate(h) +} + +// Handle is a convenience function for ErrorHandler().Handle(err). +func Handle(err error) { + GetErrorHandler().Handle(err) +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go new file mode 100644 index 00000000..a33eded8 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go @@ -0,0 +1,359 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "context" + "sync/atomic" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" +) + +// unwrapper unwraps to return the underlying instrument implementation. +type unwrapper interface { + Unwrap() metric.Observable +} + +type afCounter struct { + embedded.Float64ObservableCounter + metric.Float64Observable + + name string + opts []metric.Float64ObservableCounterOption + + delegate atomic.Value //metric.Float64ObservableCounter +} + +var _ unwrapper = (*afCounter)(nil) +var _ metric.Float64ObservableCounter = (*afCounter)(nil) + +func (i *afCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64ObservableCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *afCounter) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Float64ObservableCounter) + } + return nil +} + +type afUpDownCounter struct { + embedded.Float64ObservableUpDownCounter + metric.Float64Observable + + name string + opts []metric.Float64ObservableUpDownCounterOption + + delegate atomic.Value //metric.Float64ObservableUpDownCounter +} + +var _ unwrapper = (*afUpDownCounter)(nil) +var _ metric.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil) + +func (i *afUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64ObservableUpDownCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *afUpDownCounter) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Float64ObservableUpDownCounter) + } + return nil +} + +type afGauge struct { + embedded.Float64ObservableGauge + metric.Float64Observable + + name string + opts []metric.Float64ObservableGaugeOption + + delegate atomic.Value //metric.Float64ObservableGauge +} + +var _ unwrapper = (*afGauge)(nil) +var _ metric.Float64ObservableGauge = (*afGauge)(nil) + +func (i *afGauge) setDelegate(m metric.Meter) { + ctr, err := m.Float64ObservableGauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *afGauge) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Float64ObservableGauge) + } + return nil +} + +type aiCounter struct { + embedded.Int64ObservableCounter + metric.Int64Observable + + name string + opts []metric.Int64ObservableCounterOption + + delegate atomic.Value //metric.Int64ObservableCounter +} + +var _ unwrapper = (*aiCounter)(nil) +var _ metric.Int64ObservableCounter = (*aiCounter)(nil) + +func (i *aiCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64ObservableCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *aiCounter) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Int64ObservableCounter) + } + return nil +} + +type aiUpDownCounter struct { + embedded.Int64ObservableUpDownCounter + metric.Int64Observable + + name string + opts []metric.Int64ObservableUpDownCounterOption + + delegate atomic.Value //metric.Int64ObservableUpDownCounter +} + +var _ unwrapper = (*aiUpDownCounter)(nil) +var _ metric.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil) + +func (i *aiUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64ObservableUpDownCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *aiUpDownCounter) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Int64ObservableUpDownCounter) + } + return nil +} + +type aiGauge struct { + embedded.Int64ObservableGauge + metric.Int64Observable + + name string + opts []metric.Int64ObservableGaugeOption + + delegate atomic.Value //metric.Int64ObservableGauge +} + +var _ unwrapper = (*aiGauge)(nil) +var _ metric.Int64ObservableGauge = (*aiGauge)(nil) + +func (i *aiGauge) setDelegate(m metric.Meter) { + ctr, err := m.Int64ObservableGauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *aiGauge) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Int64ObservableGauge) + } + return nil +} + +// Sync Instruments. +type sfCounter struct { + embedded.Float64Counter + + name string + opts []metric.Float64CounterOption + + delegate atomic.Value //metric.Float64Counter +} + +var _ metric.Float64Counter = (*sfCounter)(nil) + +func (i *sfCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64Counter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Float64Counter).Add(ctx, incr, opts...) + } +} + +type sfUpDownCounter struct { + embedded.Float64UpDownCounter + + name string + opts []metric.Float64UpDownCounterOption + + delegate atomic.Value //metric.Float64UpDownCounter +} + +var _ metric.Float64UpDownCounter = (*sfUpDownCounter)(nil) + +func (i *sfUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64UpDownCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfUpDownCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Float64UpDownCounter).Add(ctx, incr, opts...) + } +} + +type sfHistogram struct { + embedded.Float64Histogram + + name string + opts []metric.Float64HistogramOption + + delegate atomic.Value //metric.Float64Histogram +} + +var _ metric.Float64Histogram = (*sfHistogram)(nil) + +func (i *sfHistogram) setDelegate(m metric.Meter) { + ctr, err := m.Float64Histogram(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfHistogram) Record(ctx context.Context, x float64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Float64Histogram).Record(ctx, x, opts...) + } +} + +type siCounter struct { + embedded.Int64Counter + + name string + opts []metric.Int64CounterOption + + delegate atomic.Value //metric.Int64Counter +} + +var _ metric.Int64Counter = (*siCounter)(nil) + +func (i *siCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64Counter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Int64Counter).Add(ctx, x, opts...) + } +} + +type siUpDownCounter struct { + embedded.Int64UpDownCounter + + name string + opts []metric.Int64UpDownCounterOption + + delegate atomic.Value //metric.Int64UpDownCounter +} + +var _ metric.Int64UpDownCounter = (*siUpDownCounter)(nil) + +func (i *siUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64UpDownCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siUpDownCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Int64UpDownCounter).Add(ctx, x, opts...) + } +} + +type siHistogram struct { + embedded.Int64Histogram + + name string + opts []metric.Int64HistogramOption + + delegate atomic.Value //metric.Int64Histogram +} + +var _ metric.Int64Histogram = (*siHistogram)(nil) + +func (i *siHistogram) setDelegate(m metric.Meter) { + ctr, err := m.Int64Histogram(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siHistogram) Record(ctx context.Context, x int64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Int64Histogram).Record(ctx, x, opts...) + } +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go new file mode 100644 index 00000000..c6f305a2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go @@ -0,0 +1,69 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "log" + "os" + "sync/atomic" + + "github.com/go-logr/logr" + "github.com/go-logr/stdr" +) + +// globalLogger is the logging interface used within the otel api and sdk provide details of the internals. +// +// The default logger uses stdr which is backed by the standard `log.Logger` +// interface. This logger will only show messages at the Error Level. +var globalLogger atomic.Pointer[logr.Logger] + +func init() { + SetLogger(stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) +} + +// SetLogger overrides the globalLogger with l. +// +// To see Warn messages use a logger with `l.V(1).Enabled() == true` +// To see Info messages use a logger with `l.V(4).Enabled() == true` +// To see Debug messages use a logger with `l.V(8).Enabled() == true`. +func SetLogger(l logr.Logger) { + globalLogger.Store(&l) +} + +func getLogger() logr.Logger { + return *globalLogger.Load() +} + +// Info prints messages about the general state of the API or SDK. +// This should usually be less than 5 messages a minute. +func Info(msg string, keysAndValues ...interface{}) { + getLogger().V(4).Info(msg, keysAndValues...) +} + +// Error prints messages about exceptional states of the API or SDK. +func Error(err error, msg string, keysAndValues ...interface{}) { + getLogger().Error(err, msg, keysAndValues...) +} + +// Debug prints messages about all internal changes in the API or SDK. +func Debug(msg string, keysAndValues ...interface{}) { + getLogger().V(8).Info(msg, keysAndValues...) +} + +// Warn prints messages about warnings in the API or SDK. +// Not an error but is likely more important than an informational event. +func Warn(msg string, keysAndValues ...interface{}) { + getLogger().V(1).Info(msg, keysAndValues...) +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go new file mode 100644 index 00000000..0097db47 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -0,0 +1,354 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "container/list" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" +) + +// meterProvider is a placeholder for a configured SDK MeterProvider. +// +// All MeterProvider functionality is forwarded to a delegate once +// configured. +type meterProvider struct { + embedded.MeterProvider + + mtx sync.Mutex + meters map[il]*meter + + delegate metric.MeterProvider +} + +// setDelegate configures p to delegate all MeterProvider functionality to +// provider. +// +// All Meters provided prior to this function call are switched out to be +// Meters provided by provider. All instruments and callbacks are recreated and +// delegated. +// +// It is guaranteed by the caller that this happens only once. +func (p *meterProvider) setDelegate(provider metric.MeterProvider) { + p.mtx.Lock() + defer p.mtx.Unlock() + + p.delegate = provider + + if len(p.meters) == 0 { + return + } + + for _, meter := range p.meters { + meter.setDelegate(provider) + } + + p.meters = nil +} + +// Meter implements MeterProvider. +func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Meter { + p.mtx.Lock() + defer p.mtx.Unlock() + + if p.delegate != nil { + return p.delegate.Meter(name, opts...) + } + + // At this moment it is guaranteed that no sdk is installed, save the meter in the meters map. + + c := metric.NewMeterConfig(opts...) + key := il{ + name: name, + version: c.InstrumentationVersion(), + } + + if p.meters == nil { + p.meters = make(map[il]*meter) + } + + if val, ok := p.meters[key]; ok { + return val + } + + t := &meter{name: name, opts: opts} + p.meters[key] = t + return t +} + +// meter is a placeholder for a metric.Meter. +// +// All Meter functionality is forwarded to a delegate once configured. +// Otherwise, all functionality is forwarded to a NoopMeter. +type meter struct { + embedded.Meter + + name string + opts []metric.MeterOption + + mtx sync.Mutex + instruments []delegatedInstrument + + registry list.List + + delegate atomic.Value // metric.Meter +} + +type delegatedInstrument interface { + setDelegate(metric.Meter) +} + +// setDelegate configures m to delegate all Meter functionality to Meters +// created by provider. +// +// All subsequent calls to the Meter methods will be passed to the delegate. +// +// It is guaranteed by the caller that this happens only once. +func (m *meter) setDelegate(provider metric.MeterProvider) { + meter := provider.Meter(m.name, m.opts...) + m.delegate.Store(meter) + + m.mtx.Lock() + defer m.mtx.Unlock() + + for _, inst := range m.instruments { + inst.setDelegate(meter) + } + + for e := m.registry.Front(); e != nil; e = e.Next() { + r := e.Value.(*registration) + r.setDelegate(meter) + m.registry.Remove(e) + } + + m.instruments = nil + m.registry.Init() +} + +func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64Counter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &siCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64UpDownCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &siUpDownCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64Histogram(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &siHistogram{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64ObservableCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &aiCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64ObservableUpDownCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &aiUpDownCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64ObservableGauge(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &aiGauge{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64Counter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &sfCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64UpDownCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &sfUpDownCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64Histogram(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &sfHistogram{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64ObservableCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &afCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64ObservableUpDownCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &afUpDownCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64ObservableGauge(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &afGauge{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +// RegisterCallback captures the function that will be called during Collect. +func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + insts = unwrapInstruments(insts) + return del.RegisterCallback(f, insts...) + } + + m.mtx.Lock() + defer m.mtx.Unlock() + + reg := ®istration{instruments: insts, function: f} + e := m.registry.PushBack(reg) + reg.unreg = func() error { + m.mtx.Lock() + _ = m.registry.Remove(e) + m.mtx.Unlock() + return nil + } + return reg, nil +} + +type wrapped interface { + unwrap() metric.Observable +} + +func unwrapInstruments(instruments []metric.Observable) []metric.Observable { + out := make([]metric.Observable, 0, len(instruments)) + + for _, inst := range instruments { + if in, ok := inst.(wrapped); ok { + out = append(out, in.unwrap()) + } else { + out = append(out, inst) + } + } + + return out +} + +type registration struct { + embedded.Registration + + instruments []metric.Observable + function metric.Callback + + unreg func() error + unregMu sync.Mutex +} + +func (c *registration) setDelegate(m metric.Meter) { + insts := unwrapInstruments(c.instruments) + + c.unregMu.Lock() + defer c.unregMu.Unlock() + + if c.unreg == nil { + // Unregister already called. + return + } + + reg, err := m.RegisterCallback(c.function, insts...) + if err != nil { + GetErrorHandler().Handle(err) + } + + c.unreg = reg.Unregister +} + +func (c *registration) Unregister() error { + c.unregMu.Lock() + defer c.unregMu.Unlock() + if c.unreg == nil { + // Unregister already called. + return nil + } + + var err error + err, c.unreg = c.unreg(), nil + return err +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/propagator.go b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go new file mode 100644 index 00000000..06bac35c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go @@ -0,0 +1,82 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/propagation" +) + +// textMapPropagator is a default TextMapPropagator that delegates calls to a +// registered delegate if one is set, otherwise it defaults to delegating the +// calls to a the default no-op propagation.TextMapPropagator. +type textMapPropagator struct { + mtx sync.Mutex + once sync.Once + delegate propagation.TextMapPropagator + noop propagation.TextMapPropagator +} + +// Compile-time guarantee that textMapPropagator implements the +// propagation.TextMapPropagator interface. +var _ propagation.TextMapPropagator = (*textMapPropagator)(nil) + +func newTextMapPropagator() *textMapPropagator { + return &textMapPropagator{ + noop: propagation.NewCompositeTextMapPropagator(), + } +} + +// SetDelegate sets a delegate propagation.TextMapPropagator that all calls are +// forwarded to. Delegation can only be performed once, all subsequent calls +// perform no delegation. +func (p *textMapPropagator) SetDelegate(delegate propagation.TextMapPropagator) { + if delegate == nil { + return + } + + p.mtx.Lock() + p.once.Do(func() { p.delegate = delegate }) + p.mtx.Unlock() +} + +// effectiveDelegate returns the current delegate of p if one is set, +// otherwise the default noop TextMapPropagator is returned. This method +// can be called concurrently. +func (p *textMapPropagator) effectiveDelegate() propagation.TextMapPropagator { + p.mtx.Lock() + defer p.mtx.Unlock() + if p.delegate != nil { + return p.delegate + } + return p.noop +} + +// Inject set cross-cutting concerns from the Context into the carrier. +func (p *textMapPropagator) Inject(ctx context.Context, carrier propagation.TextMapCarrier) { + p.effectiveDelegate().Inject(ctx, carrier) +} + +// Extract reads cross-cutting concerns from the carrier into a Context. +func (p *textMapPropagator) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context { + return p.effectiveDelegate().Extract(ctx, carrier) +} + +// Fields returns the keys whose values are set with Inject. +func (p *textMapPropagator) Fields() []string { + return p.effectiveDelegate().Fields() +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/state.go b/vendor/go.opentelemetry.io/otel/internal/global/state.go new file mode 100644 index 00000000..7985005b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/state.go @@ -0,0 +1,156 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "errors" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" +) + +type ( + tracerProviderHolder struct { + tp trace.TracerProvider + } + + propagatorsHolder struct { + tm propagation.TextMapPropagator + } + + meterProviderHolder struct { + mp metric.MeterProvider + } +) + +var ( + globalTracer = defaultTracerValue() + globalPropagators = defaultPropagatorsValue() + globalMeterProvider = defaultMeterProvider() + + delegateTraceOnce sync.Once + delegateTextMapPropagatorOnce sync.Once + delegateMeterOnce sync.Once +) + +// TracerProvider is the internal implementation for global.TracerProvider. +func TracerProvider() trace.TracerProvider { + return globalTracer.Load().(tracerProviderHolder).tp +} + +// SetTracerProvider is the internal implementation for global.SetTracerProvider. +func SetTracerProvider(tp trace.TracerProvider) { + current := TracerProvider() + + if _, cOk := current.(*tracerProvider); cOk { + if _, tpOk := tp.(*tracerProvider); tpOk && current == tp { + // Do not assign the default delegating TracerProvider to delegate + // to itself. + Error( + errors.New("no delegate configured in tracer provider"), + "Setting tracer provider to it's current value. No delegate will be configured", + ) + return + } + } + + delegateTraceOnce.Do(func() { + if def, ok := current.(*tracerProvider); ok { + def.setDelegate(tp) + } + }) + globalTracer.Store(tracerProviderHolder{tp: tp}) +} + +// TextMapPropagator is the internal implementation for global.TextMapPropagator. +func TextMapPropagator() propagation.TextMapPropagator { + return globalPropagators.Load().(propagatorsHolder).tm +} + +// SetTextMapPropagator is the internal implementation for global.SetTextMapPropagator. +func SetTextMapPropagator(p propagation.TextMapPropagator) { + current := TextMapPropagator() + + if _, cOk := current.(*textMapPropagator); cOk { + if _, pOk := p.(*textMapPropagator); pOk && current == p { + // Do not assign the default delegating TextMapPropagator to + // delegate to itself. + Error( + errors.New("no delegate configured in text map propagator"), + "Setting text map propagator to it's current value. No delegate will be configured", + ) + return + } + } + + // For the textMapPropagator already returned by TextMapPropagator + // delegate to p. + delegateTextMapPropagatorOnce.Do(func() { + if def, ok := current.(*textMapPropagator); ok { + def.SetDelegate(p) + } + }) + // Return p when subsequent calls to TextMapPropagator are made. + globalPropagators.Store(propagatorsHolder{tm: p}) +} + +// MeterProvider is the internal implementation for global.MeterProvider. +func MeterProvider() metric.MeterProvider { + return globalMeterProvider.Load().(meterProviderHolder).mp +} + +// SetMeterProvider is the internal implementation for global.SetMeterProvider. +func SetMeterProvider(mp metric.MeterProvider) { + current := MeterProvider() + if _, cOk := current.(*meterProvider); cOk { + if _, mpOk := mp.(*meterProvider); mpOk && current == mp { + // Do not assign the default delegating MeterProvider to delegate + // to itself. + Error( + errors.New("no delegate configured in meter provider"), + "Setting meter provider to it's current value. No delegate will be configured", + ) + return + } + } + + delegateMeterOnce.Do(func() { + if def, ok := current.(*meterProvider); ok { + def.setDelegate(mp) + } + }) + globalMeterProvider.Store(meterProviderHolder{mp: mp}) +} + +func defaultTracerValue() *atomic.Value { + v := &atomic.Value{} + v.Store(tracerProviderHolder{tp: &tracerProvider{}}) + return v +} + +func defaultPropagatorsValue() *atomic.Value { + v := &atomic.Value{} + v.Store(propagatorsHolder{tm: newTextMapPropagator()}) + return v +} + +func defaultMeterProvider() *atomic.Value { + v := &atomic.Value{} + v.Store(meterProviderHolder{mp: &meterProvider{}}) + return v +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go new file mode 100644 index 00000000..5f008d09 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -0,0 +1,192 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +/* +This file contains the forwarding implementation of the TracerProvider used as +the default global instance. Prior to initialization of an SDK, Tracers +returned by the global TracerProvider will provide no-op functionality. This +means that all Span created prior to initialization are no-op Spans. + +Once an SDK has been initialized, all provided no-op Tracers are swapped for +Tracers provided by the SDK defined TracerProvider. However, any Span started +prior to this initialization does not change its behavior. Meaning, the Span +remains a no-op Span. + +The implementation to track and swap Tracers locks all new Tracer creation +until the swap is complete. This assumes that this operation is not +performance-critical. If that assumption is incorrect, be sure to configure an +SDK prior to any Tracer creation. +*/ + +import ( + "context" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" +) + +// tracerProvider is a placeholder for a configured SDK TracerProvider. +// +// All TracerProvider functionality is forwarded to a delegate once +// configured. +type tracerProvider struct { + mtx sync.Mutex + tracers map[il]*tracer + delegate trace.TracerProvider +} + +// Compile-time guarantee that tracerProvider implements the TracerProvider +// interface. +var _ trace.TracerProvider = &tracerProvider{} + +// setDelegate configures p to delegate all TracerProvider functionality to +// provider. +// +// All Tracers provided prior to this function call are switched out to be +// Tracers provided by provider. +// +// It is guaranteed by the caller that this happens only once. +func (p *tracerProvider) setDelegate(provider trace.TracerProvider) { + p.mtx.Lock() + defer p.mtx.Unlock() + + p.delegate = provider + + if len(p.tracers) == 0 { + return + } + + for _, t := range p.tracers { + t.setDelegate(provider) + } + + p.tracers = nil +} + +// Tracer implements TracerProvider. +func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + p.mtx.Lock() + defer p.mtx.Unlock() + + if p.delegate != nil { + return p.delegate.Tracer(name, opts...) + } + + // At this moment it is guaranteed that no sdk is installed, save the tracer in the tracers map. + + c := trace.NewTracerConfig(opts...) + key := il{ + name: name, + version: c.InstrumentationVersion(), + } + + if p.tracers == nil { + p.tracers = make(map[il]*tracer) + } + + if val, ok := p.tracers[key]; ok { + return val + } + + t := &tracer{name: name, opts: opts, provider: p} + p.tracers[key] = t + return t +} + +type il struct { + name string + version string +} + +// tracer is a placeholder for a trace.Tracer. +// +// All Tracer functionality is forwarded to a delegate once configured. +// Otherwise, all functionality is forwarded to a NoopTracer. +type tracer struct { + name string + opts []trace.TracerOption + provider *tracerProvider + + delegate atomic.Value +} + +// Compile-time guarantee that tracer implements the trace.Tracer interface. +var _ trace.Tracer = &tracer{} + +// setDelegate configures t to delegate all Tracer functionality to Tracers +// created by provider. +// +// All subsequent calls to the Tracer methods will be passed to the delegate. +// +// It is guaranteed by the caller that this happens only once. +func (t *tracer) setDelegate(provider trace.TracerProvider) { + t.delegate.Store(provider.Tracer(t.name, t.opts...)) +} + +// Start implements trace.Tracer by forwarding the call to t.delegate if +// set, otherwise it forwards the call to a NoopTracer. +func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + delegate := t.delegate.Load() + if delegate != nil { + return delegate.(trace.Tracer).Start(ctx, name, opts...) + } + + s := nonRecordingSpan{sc: trace.SpanContextFromContext(ctx), tracer: t} + ctx = trace.ContextWithSpan(ctx, s) + return ctx, s +} + +// nonRecordingSpan is a minimal implementation of a Span that wraps a +// SpanContext. It performs no operations other than to return the wrapped +// SpanContext. +type nonRecordingSpan struct { + sc trace.SpanContext + tracer *tracer +} + +var _ trace.Span = nonRecordingSpan{} + +// SpanContext returns the wrapped SpanContext. +func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc } + +// IsRecording always returns false. +func (nonRecordingSpan) IsRecording() bool { return false } + +// SetStatus does nothing. +func (nonRecordingSpan) SetStatus(codes.Code, string) {} + +// SetError does nothing. +func (nonRecordingSpan) SetError(bool) {} + +// SetAttributes does nothing. +func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {} + +// End does nothing. +func (nonRecordingSpan) End(...trace.SpanEndOption) {} + +// RecordError does nothing. +func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {} + +// AddEvent does nothing. +func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {} + +// SetName does nothing. +func (nonRecordingSpan) SetName(string) {} + +func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider } diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go new file mode 100644 index 00000000..e07e7940 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go @@ -0,0 +1,55 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opentelemetry.io/otel/internal" + +import ( + "math" + "unsafe" +) + +func BoolToRaw(b bool) uint64 { // nolint:revive // b is not a control flag. + if b { + return 1 + } + return 0 +} + +func RawToBool(r uint64) bool { + return r != 0 +} + +func Int64ToRaw(i int64) uint64 { + return uint64(i) +} + +func RawToInt64(r uint64) int64 { + return int64(r) +} + +func Float64ToRaw(f float64) uint64 { + return math.Float64bits(f) +} + +func RawToFloat64(r uint64) float64 { + return math.Float64frombits(r) +} + +func RawPtrToFloat64Ptr(r *uint64) *float64 { + return (*float64)(unsafe.Pointer(r)) +} + +func RawPtrToInt64Ptr(r *uint64) *int64 { + return (*int64)(unsafe.Pointer(r)) +} diff --git a/vendor/go.opentelemetry.io/otel/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal_logging.go new file mode 100644 index 00000000..c4f8acd5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal_logging.go @@ -0,0 +1,26 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +import ( + "github.com/go-logr/logr" + + "go.opentelemetry.io/otel/internal/global" +) + +// SetLogger configures the logger used internally to opentelemetry. +func SetLogger(logger logr.Logger) { + global.SetLogger(logger) +} diff --git a/vendor/go.opentelemetry.io/otel/metric.go b/vendor/go.opentelemetry.io/otel/metric.go new file mode 100644 index 00000000..f9551719 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric.go @@ -0,0 +1,53 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +import ( + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/metric" +) + +// Meter returns a Meter from the global MeterProvider. The name must be the +// name of the library providing instrumentation. This name may be the same as +// the instrumented code only if that code provides built-in instrumentation. +// If the name is empty, then a implementation defined default name will be +// used instead. +// +// If this is called before a global MeterProvider is registered the returned +// Meter will be a No-op implementation of a Meter. When a global MeterProvider +// is registered for the first time, the returned Meter, and all the +// instruments it has created or will create, are recreated automatically from +// the new MeterProvider. +// +// This is short for GetMeterProvider().Meter(name). +func Meter(name string, opts ...metric.MeterOption) metric.Meter { + return GetMeterProvider().Meter(name, opts...) +} + +// GetMeterProvider returns the registered global meter provider. +// +// If no global GetMeterProvider has been registered, a No-op GetMeterProvider +// implementation is returned. When a global GetMeterProvider is registered for +// the first time, the returned GetMeterProvider, and all the Meters it has +// created or will create, are recreated automatically from the new +// GetMeterProvider. +func GetMeterProvider() metric.MeterProvider { + return global.MeterProvider() +} + +// SetMeterProvider registers mp as the global MeterProvider. +func SetMeterProvider(mp metric.MeterProvider) { + global.SetMeterProvider(mp) +} diff --git a/vendor/go.opentelemetry.io/otel/metric/LICENSE b/vendor/go.opentelemetry.io/otel/metric/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go new file mode 100644 index 00000000..072baa8e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go @@ -0,0 +1,271 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// Float64Observable describes a set of instruments used asynchronously to +// record float64 measurements once per collection cycle. Observations of +// these instruments are only made within a callback. +// +// Warning: Methods may be added to this interface in minor releases. +type Float64Observable interface { + Observable + + float64Observable() +} + +// Float64ObservableCounter is an instrument used to asynchronously record +// increasing float64 measurements once per collection cycle. Observations are +// only made within a callback for this instrument. The value observed is +// assumed the to be the cumulative sum of the count. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for +// unimplemented methods. +type Float64ObservableCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64ObservableCounter + + Float64Observable +} + +// Float64ObservableCounterConfig contains options for asynchronous counter +// instruments that record int64 values. +type Float64ObservableCounterConfig struct { + description string + unit string + callbacks []Float64Callback +} + +// NewFloat64ObservableCounterConfig returns a new +// [Float64ObservableCounterConfig] with all opts applied. +func NewFloat64ObservableCounterConfig(opts ...Float64ObservableCounterOption) Float64ObservableCounterConfig { + var config Float64ObservableCounterConfig + for _, o := range opts { + config = o.applyFloat64ObservableCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Float64ObservableCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64ObservableCounterConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Float64ObservableCounterConfig) Callbacks() []Float64Callback { + return c.callbacks +} + +// Float64ObservableCounterOption applies options to a +// [Float64ObservableCounterConfig]. See [Float64ObservableOption] and +// [InstrumentOption] for other options that can be used as a +// Float64ObservableCounterOption. +type Float64ObservableCounterOption interface { + applyFloat64ObservableCounter(Float64ObservableCounterConfig) Float64ObservableCounterConfig +} + +// Float64ObservableUpDownCounter is an instrument used to asynchronously +// record float64 measurements once per collection cycle. Observations are only +// made within a callback for this instrument. The value observed is assumed +// the to be the cumulative sum of the count. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64ObservableUpDownCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64ObservableUpDownCounter + + Float64Observable +} + +// Float64ObservableUpDownCounterConfig contains options for asynchronous +// counter instruments that record int64 values. +type Float64ObservableUpDownCounterConfig struct { + description string + unit string + callbacks []Float64Callback +} + +// NewFloat64ObservableUpDownCounterConfig returns a new +// [Float64ObservableUpDownCounterConfig] with all opts applied. +func NewFloat64ObservableUpDownCounterConfig(opts ...Float64ObservableUpDownCounterOption) Float64ObservableUpDownCounterConfig { + var config Float64ObservableUpDownCounterConfig + for _, o := range opts { + config = o.applyFloat64ObservableUpDownCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Float64ObservableUpDownCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64ObservableUpDownCounterConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Float64ObservableUpDownCounterConfig) Callbacks() []Float64Callback { + return c.callbacks +} + +// Float64ObservableUpDownCounterOption applies options to a +// [Float64ObservableUpDownCounterConfig]. See [Float64ObservableOption] and +// [InstrumentOption] for other options that can be used as a +// Float64ObservableUpDownCounterOption. +type Float64ObservableUpDownCounterOption interface { + applyFloat64ObservableUpDownCounter(Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig +} + +// Float64ObservableGauge is an instrument used to asynchronously record +// instantaneous float64 measurements once per collection cycle. Observations +// are only made within a callback for this instrument. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64ObservableGauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64ObservableGauge + + Float64Observable +} + +// Float64ObservableGaugeConfig contains options for asynchronous counter +// instruments that record int64 values. +type Float64ObservableGaugeConfig struct { + description string + unit string + callbacks []Float64Callback +} + +// NewFloat64ObservableGaugeConfig returns a new [Float64ObservableGaugeConfig] +// with all opts applied. +func NewFloat64ObservableGaugeConfig(opts ...Float64ObservableGaugeOption) Float64ObservableGaugeConfig { + var config Float64ObservableGaugeConfig + for _, o := range opts { + config = o.applyFloat64ObservableGauge(config) + } + return config +} + +// Description returns the configured description. +func (c Float64ObservableGaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64ObservableGaugeConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Float64ObservableGaugeConfig) Callbacks() []Float64Callback { + return c.callbacks +} + +// Float64ObservableGaugeOption applies options to a +// [Float64ObservableGaugeConfig]. See [Float64ObservableOption] and +// [InstrumentOption] for other options that can be used as a +// Float64ObservableGaugeOption. +type Float64ObservableGaugeOption interface { + applyFloat64ObservableGauge(Float64ObservableGaugeConfig) Float64ObservableGaugeConfig +} + +// Float64Observer is a recorder of float64 measurements. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Observer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Observer + + // Observe records the float64 value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Observe(value float64, options ...ObserveOption) +} + +// Float64Callback is a function registered with a Meter that makes +// observations for a Float64Observerable instrument it is registered with. +// Calls to the Float64Observer record measurement values for the +// Float64Observable. +// +// The function needs to complete in a finite amount of time and the deadline +// of the passed context is expected to be honored. +// +// The function needs to make unique observations across all registered +// Float64Callbacks. Meaning, it should not report measurements with the same +// attributes as another Float64Callbacks also registered for the same +// instrument. +// +// The function needs to be concurrent safe. +type Float64Callback func(context.Context, Float64Observer) error + +// Float64ObservableOption applies options to float64 Observer instruments. +type Float64ObservableOption interface { + Float64ObservableCounterOption + Float64ObservableUpDownCounterOption + Float64ObservableGaugeOption +} + +type float64CallbackOpt struct { + cback Float64Callback +} + +func (o float64CallbackOpt) applyFloat64ObservableCounter(cfg Float64ObservableCounterConfig) Float64ObservableCounterConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +func (o float64CallbackOpt) applyFloat64ObservableUpDownCounter(cfg Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +func (o float64CallbackOpt) applyFloat64ObservableGauge(cfg Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +// WithFloat64Callback adds callback to be called for an instrument. +func WithFloat64Callback(callback Float64Callback) Float64ObservableOption { + return float64CallbackOpt{callback} +} diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go new file mode 100644 index 00000000..9bd6ebf0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go @@ -0,0 +1,269 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// Int64Observable describes a set of instruments used asynchronously to record +// int64 measurements once per collection cycle. Observations of these +// instruments are only made within a callback. +// +// Warning: Methods may be added to this interface in minor releases. +type Int64Observable interface { + Observable + + int64Observable() +} + +// Int64ObservableCounter is an instrument used to asynchronously record +// increasing int64 measurements once per collection cycle. Observations are +// only made within a callback for this instrument. The value observed is +// assumed the to be the cumulative sum of the count. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64ObservableCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64ObservableCounter + + Int64Observable +} + +// Int64ObservableCounterConfig contains options for asynchronous counter +// instruments that record int64 values. +type Int64ObservableCounterConfig struct { + description string + unit string + callbacks []Int64Callback +} + +// NewInt64ObservableCounterConfig returns a new [Int64ObservableCounterConfig] +// with all opts applied. +func NewInt64ObservableCounterConfig(opts ...Int64ObservableCounterOption) Int64ObservableCounterConfig { + var config Int64ObservableCounterConfig + for _, o := range opts { + config = o.applyInt64ObservableCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Int64ObservableCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64ObservableCounterConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Int64ObservableCounterConfig) Callbacks() []Int64Callback { + return c.callbacks +} + +// Int64ObservableCounterOption applies options to a +// [Int64ObservableCounterConfig]. See [Int64ObservableOption] and +// [InstrumentOption] for other options that can be used as an +// Int64ObservableCounterOption. +type Int64ObservableCounterOption interface { + applyInt64ObservableCounter(Int64ObservableCounterConfig) Int64ObservableCounterConfig +} + +// Int64ObservableUpDownCounter is an instrument used to asynchronously record +// int64 measurements once per collection cycle. Observations are only made +// within a callback for this instrument. The value observed is assumed the to +// be the cumulative sum of the count. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64ObservableUpDownCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64ObservableUpDownCounter + + Int64Observable +} + +// Int64ObservableUpDownCounterConfig contains options for asynchronous counter +// instruments that record int64 values. +type Int64ObservableUpDownCounterConfig struct { + description string + unit string + callbacks []Int64Callback +} + +// NewInt64ObservableUpDownCounterConfig returns a new +// [Int64ObservableUpDownCounterConfig] with all opts applied. +func NewInt64ObservableUpDownCounterConfig(opts ...Int64ObservableUpDownCounterOption) Int64ObservableUpDownCounterConfig { + var config Int64ObservableUpDownCounterConfig + for _, o := range opts { + config = o.applyInt64ObservableUpDownCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Int64ObservableUpDownCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64ObservableUpDownCounterConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Int64ObservableUpDownCounterConfig) Callbacks() []Int64Callback { + return c.callbacks +} + +// Int64ObservableUpDownCounterOption applies options to a +// [Int64ObservableUpDownCounterConfig]. See [Int64ObservableOption] and +// [InstrumentOption] for other options that can be used as an +// Int64ObservableUpDownCounterOption. +type Int64ObservableUpDownCounterOption interface { + applyInt64ObservableUpDownCounter(Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig +} + +// Int64ObservableGauge is an instrument used to asynchronously record +// instantaneous int64 measurements once per collection cycle. Observations are +// only made within a callback for this instrument. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64ObservableGauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64ObservableGauge + + Int64Observable +} + +// Int64ObservableGaugeConfig contains options for asynchronous counter +// instruments that record int64 values. +type Int64ObservableGaugeConfig struct { + description string + unit string + callbacks []Int64Callback +} + +// NewInt64ObservableGaugeConfig returns a new [Int64ObservableGaugeConfig] +// with all opts applied. +func NewInt64ObservableGaugeConfig(opts ...Int64ObservableGaugeOption) Int64ObservableGaugeConfig { + var config Int64ObservableGaugeConfig + for _, o := range opts { + config = o.applyInt64ObservableGauge(config) + } + return config +} + +// Description returns the configured description. +func (c Int64ObservableGaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64ObservableGaugeConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Int64ObservableGaugeConfig) Callbacks() []Int64Callback { + return c.callbacks +} + +// Int64ObservableGaugeOption applies options to a +// [Int64ObservableGaugeConfig]. See [Int64ObservableOption] and +// [InstrumentOption] for other options that can be used as an +// Int64ObservableGaugeOption. +type Int64ObservableGaugeOption interface { + applyInt64ObservableGauge(Int64ObservableGaugeConfig) Int64ObservableGaugeConfig +} + +// Int64Observer is a recorder of int64 measurements. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Observer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Observer + + // Observe records the int64 value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Observe(value int64, options ...ObserveOption) +} + +// Int64Callback is a function registered with a Meter that makes observations +// for an Int64Observerable instrument it is registered with. Calls to the +// Int64Observer record measurement values for the Int64Observable. +// +// The function needs to complete in a finite amount of time and the deadline +// of the passed context is expected to be honored. +// +// The function needs to make unique observations across all registered +// Int64Callbacks. Meaning, it should not report measurements with the same +// attributes as another Int64Callbacks also registered for the same +// instrument. +// +// The function needs to be concurrent safe. +type Int64Callback func(context.Context, Int64Observer) error + +// Int64ObservableOption applies options to int64 Observer instruments. +type Int64ObservableOption interface { + Int64ObservableCounterOption + Int64ObservableUpDownCounterOption + Int64ObservableGaugeOption +} + +type int64CallbackOpt struct { + cback Int64Callback +} + +func (o int64CallbackOpt) applyInt64ObservableCounter(cfg Int64ObservableCounterConfig) Int64ObservableCounterConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +func (o int64CallbackOpt) applyInt64ObservableUpDownCounter(cfg Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +func (o int64CallbackOpt) applyInt64ObservableGauge(cfg Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +// WithInt64Callback adds callback to be called for an instrument. +func WithInt64Callback(callback Int64Callback) Int64ObservableOption { + return int64CallbackOpt{callback} +} diff --git a/vendor/go.opentelemetry.io/otel/metric/config.go b/vendor/go.opentelemetry.io/otel/metric/config.go new file mode 100644 index 00000000..778ad2d7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/config.go @@ -0,0 +1,92 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import "go.opentelemetry.io/otel/attribute" + +// MeterConfig contains options for Meters. +type MeterConfig struct { + instrumentationVersion string + schemaURL string + attrs attribute.Set + + // Ensure forward compatibility by explicitly making this not comparable. + noCmp [0]func() //nolint: unused // This is indeed used. +} + +// InstrumentationVersion returns the version of the library providing +// instrumentation. +func (cfg MeterConfig) InstrumentationVersion() string { + return cfg.instrumentationVersion +} + +// InstrumentationAttributes returns the attributes associated with the library +// providing instrumentation. +func (cfg MeterConfig) InstrumentationAttributes() attribute.Set { + return cfg.attrs +} + +// SchemaURL is the schema_url of the library providing instrumentation. +func (cfg MeterConfig) SchemaURL() string { + return cfg.schemaURL +} + +// MeterOption is an interface for applying Meter options. +type MeterOption interface { + // applyMeter is used to set a MeterOption value of a MeterConfig. + applyMeter(MeterConfig) MeterConfig +} + +// NewMeterConfig creates a new MeterConfig and applies +// all the given options. +func NewMeterConfig(opts ...MeterOption) MeterConfig { + var config MeterConfig + for _, o := range opts { + config = o.applyMeter(config) + } + return config +} + +type meterOptionFunc func(MeterConfig) MeterConfig + +func (fn meterOptionFunc) applyMeter(cfg MeterConfig) MeterConfig { + return fn(cfg) +} + +// WithInstrumentationVersion sets the instrumentation version. +func WithInstrumentationVersion(version string) MeterOption { + return meterOptionFunc(func(config MeterConfig) MeterConfig { + config.instrumentationVersion = version + return config + }) +} + +// WithInstrumentationAttributes sets the instrumentation attributes. +// +// The passed attributes will be de-duplicated. +func WithInstrumentationAttributes(attr ...attribute.KeyValue) MeterOption { + return meterOptionFunc(func(config MeterConfig) MeterConfig { + config.attrs = attribute.NewSet(attr...) + return config + }) +} + +// WithSchemaURL sets the schema URL. +func WithSchemaURL(schemaURL string) MeterOption { + return meterOptionFunc(func(config MeterConfig) MeterConfig { + config.schemaURL = schemaURL + return config + }) +} diff --git a/vendor/go.opentelemetry.io/otel/metric/doc.go b/vendor/go.opentelemetry.io/otel/metric/doc.go new file mode 100644 index 00000000..ae24e448 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/doc.go @@ -0,0 +1,170 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package metric provides the OpenTelemetry API used to measure metrics about +source code operation. + +This API is separate from its implementation so the instrumentation built from +it is reusable. See [go.opentelemetry.io/otel/sdk/metric] for the official +OpenTelemetry implementation of this API. + +All measurements made with this package are made via instruments. These +instruments are created by a [Meter] which itself is created by a +[MeterProvider]. Applications need to accept a [MeterProvider] implementation +as a starting point when instrumenting. This can be done directly, or by using +the OpenTelemetry global MeterProvider via [GetMeterProvider]. Using an +appropriately named [Meter] from the accepted [MeterProvider], instrumentation +can then be built from the [Meter]'s instruments. + +# Instruments + +Each instrument is designed to make measurements of a particular type. Broadly, +all instruments fall into two overlapping logical categories: asynchronous or +synchronous, and int64 or float64. + +All synchronous instruments ([Int64Counter], [Int64UpDownCounter], +[Int64Histogram], [Float64Counter], [Float64UpDownCounter], and +[Float64Histogram]) are used to measure the operation and performance of source +code during the source code execution. These instruments only make measurements +when the source code they instrument is run. + +All asynchronous instruments ([Int64ObservableCounter], +[Int64ObservableUpDownCounter], [Int64ObservableGauge], +[Float64ObservableCounter], [Float64ObservableUpDownCounter], and +[Float64ObservableGauge]) are used to measure metrics outside of the execution +of source code. They are said to make "observations" via a callback function +called once every measurement collection cycle. + +Each instrument is also grouped by the value type it measures. Either int64 or +float64. The value being measured will dictate which instrument in these +categories to use. + +Outside of these two broad categories, instruments are described by the +function they are designed to serve. All Counters ([Int64Counter], +[Float64Counter], [Int64ObservableCounter], and [Float64ObservableCounter]) are +designed to measure values that never decrease in value, but instead only +incrementally increase in value. UpDownCounters ([Int64UpDownCounter], +[Float64UpDownCounter], [Int64ObservableUpDownCounter], and +[Float64ObservableUpDownCounter]) on the other hand, are designed to measure +values that can increase and decrease. When more information needs to be +conveyed about all the synchronous measurements made during a collection cycle, +a Histogram ([Int64Histogram] and [Float64Histogram]) should be used. Finally, +when just the most recent measurement needs to be conveyed about an +asynchronous measurement, a Gauge ([Int64ObservableGauge] and +[Float64ObservableGauge]) should be used. + +See the [OpenTelemetry documentation] for more information about instruments +and their intended use. + +# Measurements + +Measurements are made by recording values and information about the values with +an instrument. How these measurements are recorded depends on the instrument. + +Measurements for synchronous instruments ([Int64Counter], [Int64UpDownCounter], +[Int64Histogram], [Float64Counter], [Float64UpDownCounter], and +[Float64Histogram]) are recorded using the instrument methods directly. All +counter instruments have an Add method that is used to measure an increment +value, and all histogram instruments have a Record method to measure a data +point. + +Asynchronous instruments ([Int64ObservableCounter], +[Int64ObservableUpDownCounter], [Int64ObservableGauge], +[Float64ObservableCounter], [Float64ObservableUpDownCounter], and +[Float64ObservableGauge]) record measurements within a callback function. The +callback is registered with the Meter which ensures the callback is called once +per collection cycle. A callback can be registered two ways: during the +instrument's creation using an option, or later using the RegisterCallback +method of the [Meter] that created the instrument. + +If the following criteria are met, an option ([WithInt64Callback] or +[WithFloat64Callback]) can be used during the asynchronous instrument's +creation to register a callback ([Int64Callback] or [Float64Callback], +respectively): + + - The measurement process is known when the instrument is created + - Only that instrument will make a measurement within the callback + - The callback never needs to be unregistered + +If the criteria are not met, use the RegisterCallback method of the [Meter] that +created the instrument to register a [Callback]. + +# API Implementations + +This package does not conform to the standard Go versioning policy, all of its +interfaces may have methods added to them without a package major version bump. +This non-standard API evolution could surprise an uninformed implementation +author. They could unknowingly build their implementation in a way that would +result in a runtime panic for their users that update to the new API. + +The API is designed to help inform an instrumentation author about this +non-standard API evolution. It requires them to choose a default behavior for +unimplemented interface methods. There are three behavior choices they can +make: + + - Compilation failure + - Panic + - Default to another implementation + +All interfaces in this API embed a corresponding interface from +[go.opentelemetry.io/otel/metric/embedded]. If an author wants the default +behavior of their implementations to be a compilation failure, signaling to +their users they need to update to the latest version of that implementation, +they need to embed the corresponding interface from +[go.opentelemetry.io/otel/metric/embedded] in their implementation. For +example, + + import "go.opentelemetry.io/otel/metric/embedded" + + type MeterProvider struct { + embedded.MeterProvider + // ... + } + +If an author wants the default behavior of their implementations to a panic, +they need to embed the API interface directly. + + import "go.opentelemetry.io/otel/metric" + + type MeterProvider struct { + metric.MeterProvider + // ... + } + +This is not a recommended behavior as it could lead to publishing packages that +contain runtime panics when users update other package that use newer versions +of [go.opentelemetry.io/otel/metric]. + +Finally, an author can embed another implementation in theirs. The embedded +implementation will be used for methods not defined by the author. For example, +an author who want to default to silently dropping the call can use +[go.opentelemetry.io/otel/metric/noop]: + + import "go.opentelemetry.io/otel/metric/noop" + + type MeterProvider struct { + noop.MeterProvider + // ... + } + +It is strongly recommended that authors only embed +[go.opentelemetry.io/otel/metric/noop] if they choose this default behavior. +That implementation is the only one OpenTelemetry authors can guarantee will +fully implement all the API interfaces when a user updates their API. + +[OpenTelemetry documentation]: https://opentelemetry.io/docs/concepts/signals/metrics/ +[GetMeterProvider]: https://pkg.go.dev/go.opentelemetry.io/otel#GetMeterProvider +*/ +package metric // import "go.opentelemetry.io/otel/metric" diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go new file mode 100644 index 00000000..ae0bdbd2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go @@ -0,0 +1,234 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package embedded provides interfaces embedded within the [OpenTelemetry +// metric API]. +// +// Implementers of the [OpenTelemetry metric API] can embed the relevant type +// from this package into their implementation directly. Doing so will result +// in a compilation error for users when the [OpenTelemetry metric API] is +// extended (which is something that can happen without a major version bump of +// the API package). +// +// [OpenTelemetry metric API]: https://pkg.go.dev/go.opentelemetry.io/otel/metric +package embedded // import "go.opentelemetry.io/otel/metric/embedded" + +// MeterProvider is embedded in +// [go.opentelemetry.io/otel/metric.MeterProvider]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.MeterProvider] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.MeterProvider] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type MeterProvider interface{ meterProvider() } + +// Meter is embedded in [go.opentelemetry.io/otel/metric.Meter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Meter] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Meter] interface +// is extended (which is something that can happen without a major version bump +// of the API package). +type Meter interface{ meter() } + +// Float64Observer is embedded in +// [go.opentelemetry.io/otel/metric.Float64Observer]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Observer] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64Observer] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Float64Observer interface{ float64Observer() } + +// Int64Observer is embedded in +// [go.opentelemetry.io/otel/metric.Int64Observer]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Observer] if you want users +// to experience a compilation error, signaling they need to update to your +// latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64Observer] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Int64Observer interface{ int64Observer() } + +// Observer is embedded in [go.opentelemetry.io/otel/metric.Observer]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Observer] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Observer] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Observer interface{ observer() } + +// Registration is embedded in [go.opentelemetry.io/otel/metric.Registration]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Registration] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Registration] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Registration interface{ registration() } + +// Float64Counter is embedded in +// [go.opentelemetry.io/otel/metric.Float64Counter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Counter] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64Counter] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Float64Counter interface{ float64Counter() } + +// Float64Histogram is embedded in +// [go.opentelemetry.io/otel/metric.Float64Histogram]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Histogram] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64Histogram] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Float64Histogram interface{ float64Histogram() } + +// Float64ObservableCounter is embedded in +// [go.opentelemetry.io/otel/metric.Float64ObservableCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64ObservableCounter] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64ObservableCounter] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64ObservableCounter interface{ float64ObservableCounter() } + +// Float64ObservableGauge is embedded in +// [go.opentelemetry.io/otel/metric.Float64ObservableGauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64ObservableGauge] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64ObservableGauge] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64ObservableGauge interface{ float64ObservableGauge() } + +// Float64ObservableUpDownCounter is embedded in +// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter] +// if you want users to experience a compilation error, signaling they need to +// update to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64ObservableUpDownCounter interface{ float64ObservableUpDownCounter() } + +// Float64UpDownCounter is embedded in +// [go.opentelemetry.io/otel/metric.Float64UpDownCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64UpDownCounter] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64UpDownCounter] interface +// is extended (which is something that can happen without a major version bump +// of the API package). +type Float64UpDownCounter interface{ float64UpDownCounter() } + +// Int64Counter is embedded in +// [go.opentelemetry.io/otel/metric.Int64Counter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Counter] if you want users +// to experience a compilation error, signaling they need to update to your +// latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64Counter] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Int64Counter interface{ int64Counter() } + +// Int64Histogram is embedded in +// [go.opentelemetry.io/otel/metric.Int64Histogram]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Histogram] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64Histogram] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Int64Histogram interface{ int64Histogram() } + +// Int64ObservableCounter is embedded in +// [go.opentelemetry.io/otel/metric.Int64ObservableCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64ObservableCounter] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64ObservableCounter] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Int64ObservableCounter interface{ int64ObservableCounter() } + +// Int64ObservableGauge is embedded in +// [go.opentelemetry.io/otel/metric.Int64ObservableGauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64ObservableGauge] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64ObservableGauge] interface +// is extended (which is something that can happen without a major version bump +// of the API package). +type Int64ObservableGauge interface{ int64ObservableGauge() } + +// Int64ObservableUpDownCounter is embedded in +// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter] if +// you want users to experience a compilation error, signaling they need to +// update to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Int64ObservableUpDownCounter interface{ int64ObservableUpDownCounter() } + +// Int64UpDownCounter is embedded in +// [go.opentelemetry.io/otel/metric.Int64UpDownCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64UpDownCounter] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64UpDownCounter] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Int64UpDownCounter interface{ int64UpDownCounter() } diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go new file mode 100644 index 00000000..cdca0005 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go @@ -0,0 +1,334 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import "go.opentelemetry.io/otel/attribute" + +// Observable is used as a grouping mechanism for all instruments that are +// updated within a Callback. +type Observable interface { + observable() +} + +// InstrumentOption applies options to all instruments. +type InstrumentOption interface { + Int64CounterOption + Int64UpDownCounterOption + Int64HistogramOption + Int64ObservableCounterOption + Int64ObservableUpDownCounterOption + Int64ObservableGaugeOption + + Float64CounterOption + Float64UpDownCounterOption + Float64HistogramOption + Float64ObservableCounterOption + Float64ObservableUpDownCounterOption + Float64ObservableGaugeOption +} + +type descOpt string + +func (o descOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64UpDownCounter(c Float64UpDownCounterConfig) Float64UpDownCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64ObservableGauge(c Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64Counter(c Int64CounterConfig) Int64CounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64UpDownCounter(c Int64UpDownCounterConfig) Int64UpDownCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { + c.description = string(o) + return c +} + +// WithDescription sets the instrument description. +func WithDescription(desc string) InstrumentOption { return descOpt(desc) } + +type unitOpt string + +func (o unitOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64UpDownCounter(c Float64UpDownCounterConfig) Float64UpDownCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64ObservableGauge(c Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64Counter(c Int64CounterConfig) Int64CounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64UpDownCounter(c Int64UpDownCounterConfig) Int64UpDownCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { + c.unit = string(o) + return c +} + +// WithUnit sets the instrument unit. +// +// The unit u should be defined using the appropriate [UCUM](https://ucum.org) case-sensitive code. +func WithUnit(u string) InstrumentOption { return unitOpt(u) } + +// AddOption applies options to an addition measurement. See +// [MeasurementOption] for other options that can be used as an AddOption. +type AddOption interface { + applyAdd(AddConfig) AddConfig +} + +// AddConfig contains options for an addition measurement. +type AddConfig struct { + attrs attribute.Set +} + +// NewAddConfig returns a new [AddConfig] with all opts applied. +func NewAddConfig(opts []AddOption) AddConfig { + config := AddConfig{attrs: *attribute.EmptySet()} + for _, o := range opts { + config = o.applyAdd(config) + } + return config +} + +// Attributes returns the configured attribute set. +func (c AddConfig) Attributes() attribute.Set { + return c.attrs +} + +// RecordOption applies options to an addition measurement. See +// [MeasurementOption] for other options that can be used as a RecordOption. +type RecordOption interface { + applyRecord(RecordConfig) RecordConfig +} + +// RecordConfig contains options for a recorded measurement. +type RecordConfig struct { + attrs attribute.Set +} + +// NewRecordConfig returns a new [RecordConfig] with all opts applied. +func NewRecordConfig(opts []RecordOption) RecordConfig { + config := RecordConfig{attrs: *attribute.EmptySet()} + for _, o := range opts { + config = o.applyRecord(config) + } + return config +} + +// Attributes returns the configured attribute set. +func (c RecordConfig) Attributes() attribute.Set { + return c.attrs +} + +// ObserveOption applies options to an addition measurement. See +// [MeasurementOption] for other options that can be used as a ObserveOption. +type ObserveOption interface { + applyObserve(ObserveConfig) ObserveConfig +} + +// ObserveConfig contains options for an observed measurement. +type ObserveConfig struct { + attrs attribute.Set +} + +// NewObserveConfig returns a new [ObserveConfig] with all opts applied. +func NewObserveConfig(opts []ObserveOption) ObserveConfig { + config := ObserveConfig{attrs: *attribute.EmptySet()} + for _, o := range opts { + config = o.applyObserve(config) + } + return config +} + +// Attributes returns the configured attribute set. +func (c ObserveConfig) Attributes() attribute.Set { + return c.attrs +} + +// MeasurementOption applies options to all instrument measurement. +type MeasurementOption interface { + AddOption + RecordOption + ObserveOption +} + +type attrOpt struct { + set attribute.Set +} + +// mergeSets returns the union of keys between a and b. Any duplicate keys will +// use the value associated with b. +func mergeSets(a, b attribute.Set) attribute.Set { + // NewMergeIterator uses the first value for any duplicates. + iter := attribute.NewMergeIterator(&b, &a) + merged := make([]attribute.KeyValue, 0, a.Len()+b.Len()) + for iter.Next() { + merged = append(merged, iter.Attribute()) + } + return attribute.NewSet(merged...) +} + +func (o attrOpt) applyAdd(c AddConfig) AddConfig { + switch { + case o.set.Len() == 0: + case c.attrs.Len() == 0: + c.attrs = o.set + default: + c.attrs = mergeSets(c.attrs, o.set) + } + return c +} + +func (o attrOpt) applyRecord(c RecordConfig) RecordConfig { + switch { + case o.set.Len() == 0: + case c.attrs.Len() == 0: + c.attrs = o.set + default: + c.attrs = mergeSets(c.attrs, o.set) + } + return c +} + +func (o attrOpt) applyObserve(c ObserveConfig) ObserveConfig { + switch { + case o.set.Len() == 0: + case c.attrs.Len() == 0: + c.attrs = o.set + default: + c.attrs = mergeSets(c.attrs, o.set) + } + return c +} + +// WithAttributeSet sets the attribute Set associated with a measurement is +// made with. +// +// If multiple WithAttributeSet or WithAttributes options are passed the +// attributes will be merged together in the order they are passed. Attributes +// with duplicate keys will use the last value passed. +func WithAttributeSet(attributes attribute.Set) MeasurementOption { + return attrOpt{set: attributes} +} + +// WithAttributes converts attributes into an attribute Set and sets the Set to +// be associated with a measurement. This is shorthand for: +// +// cp := make([]attribute.KeyValue, len(attributes)) +// copy(cp, attributes) +// WithAttributes(attribute.NewSet(cp...)) +// +// [attribute.NewSet] may modify the passed attributes so this will make a copy +// of attributes before creating a set in order to ensure this function is +// concurrent safe. This makes this option function less optimized in +// comparison to [WithAttributeSet]. Therefore, [WithAttributeSet] should be +// preferred for performance sensitive code. +// +// See [WithAttributeSet] for information about how multiple WithAttributes are +// merged. +func WithAttributes(attributes ...attribute.KeyValue) MeasurementOption { + cp := make([]attribute.KeyValue, len(attributes)) + copy(cp, attributes) + return attrOpt{set: attribute.NewSet(cp...)} +} diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go new file mode 100644 index 00000000..2520bc74 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -0,0 +1,212 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// MeterProvider provides access to named Meter instances, for instrumenting +// an application or package. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type MeterProvider interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.MeterProvider + + // Meter returns a new Meter with the provided name and configuration. + // + // A Meter should be scoped at most to a single package. The name needs to + // be unique so it does not collide with other names used by + // an application, nor other applications. To achieve this, the import path + // of the instrumentation package is recommended to be used as name. + // + // If the name is empty, then an implementation defined default name will + // be used instead. + Meter(name string, opts ...MeterOption) Meter +} + +// Meter provides access to instrument instances for recording metrics. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Meter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Meter + + // Int64Counter returns a new Int64Counter instrument identified by name + // and configured with options. The instrument is used to synchronously + // record increasing int64 measurements during a computational operation. + Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error) + // Int64UpDownCounter returns a new Int64UpDownCounter instrument + // identified by name and configured with options. The instrument is used + // to synchronously record int64 measurements during a computational + // operation. + Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error) + // Int64Histogram returns a new Int64Histogram instrument identified by + // name and configured with options. The instrument is used to + // synchronously record the distribution of int64 measurements during a + // computational operation. + Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error) + // Int64ObservableCounter returns a new Int64ObservableCounter identified + // by name and configured with options. The instrument is used to + // asynchronously record increasing int64 measurements once per a + // measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithInt64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error) + // Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter + // instrument identified by name and configured with options. The + // instrument is used to asynchronously record int64 measurements once per + // a measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithInt64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error) + // Int64ObservableGauge returns a new Int64ObservableGauge instrument + // identified by name and configured with options. The instrument is used + // to asynchronously record instantaneous int64 measurements once per a + // measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithInt64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + Int64ObservableGauge(name string, options ...Int64ObservableGaugeOption) (Int64ObservableGauge, error) + + // Float64Counter returns a new Float64Counter instrument identified by + // name and configured with options. The instrument is used to + // synchronously record increasing float64 measurements during a + // computational operation. + Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error) + // Float64UpDownCounter returns a new Float64UpDownCounter instrument + // identified by name and configured with options. The instrument is used + // to synchronously record float64 measurements during a computational + // operation. + Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error) + // Float64Histogram returns a new Float64Histogram instrument identified by + // name and configured with options. The instrument is used to + // synchronously record the distribution of float64 measurements during a + // computational operation. + Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error) + // Float64ObservableCounter returns a new Float64ObservableCounter + // instrument identified by name and configured with options. The + // instrument is used to asynchronously record increasing float64 + // measurements once per a measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithFloat64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error) + // Float64ObservableUpDownCounter returns a new + // Float64ObservableUpDownCounter instrument identified by name and + // configured with options. The instrument is used to asynchronously record + // float64 measurements once per a measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithFloat64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error) + // Float64ObservableGauge returns a new Float64ObservableGauge instrument + // identified by name and configured with options. The instrument is used + // to asynchronously record instantaneous float64 measurements once per a + // measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithFloat64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + Float64ObservableGauge(name string, options ...Float64ObservableGaugeOption) (Float64ObservableGauge, error) + + // RegisterCallback registers f to be called during the collection of a + // measurement cycle. + // + // If Unregister of the returned Registration is called, f needs to be + // unregistered and not called during collection. + // + // The instruments f is registered with are the only instruments that f may + // observe values for. + // + // If no instruments are passed, f should not be registered nor called + // during collection. + // + // The function f needs to be concurrent safe. + RegisterCallback(f Callback, instruments ...Observable) (Registration, error) +} + +// Callback is a function registered with a Meter that makes observations for +// the set of instruments it is registered with. The Observer parameter is used +// to record measurement observations for these instruments. +// +// The function needs to complete in a finite amount of time and the deadline +// of the passed context is expected to be honored. +// +// The function needs to make unique observations across all registered +// Callbacks. Meaning, it should not report measurements for an instrument with +// the same attributes as another Callback will report. +// +// The function needs to be concurrent safe. +type Callback func(context.Context, Observer) error + +// Observer records measurements for multiple instruments in a Callback. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Observer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Observer + + // ObserveFloat64 records the float64 value for obsrv. + ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption) + // ObserveInt64 records the int64 value for obsrv. + ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption) +} + +// Registration is an token representing the unique registration of a callback +// for a set of instruments with a Meter. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Registration interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Registration + + // Unregister removes the callback registration from a Meter. + // + // This method needs to be idempotent and concurrent safe. + Unregister() error +} diff --git a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go new file mode 100644 index 00000000..f0b06372 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go @@ -0,0 +1,179 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// Float64Counter is an instrument that records increasing float64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Counter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Counter + + // Add records a change to the counter. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Add(ctx context.Context, incr float64, options ...AddOption) +} + +// Float64CounterConfig contains options for synchronous counter instruments that +// record int64 values. +type Float64CounterConfig struct { + description string + unit string +} + +// NewFloat64CounterConfig returns a new [Float64CounterConfig] with all opts +// applied. +func NewFloat64CounterConfig(opts ...Float64CounterOption) Float64CounterConfig { + var config Float64CounterConfig + for _, o := range opts { + config = o.applyFloat64Counter(config) + } + return config +} + +// Description returns the configured description. +func (c Float64CounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64CounterConfig) Unit() string { + return c.unit +} + +// Float64CounterOption applies options to a [Float64CounterConfig]. See +// [InstrumentOption] for other options that can be used as a +// Float64CounterOption. +type Float64CounterOption interface { + applyFloat64Counter(Float64CounterConfig) Float64CounterConfig +} + +// Float64UpDownCounter is an instrument that records increasing or decreasing +// float64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64UpDownCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64UpDownCounter + + // Add records a change to the counter. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Add(ctx context.Context, incr float64, options ...AddOption) +} + +// Float64UpDownCounterConfig contains options for synchronous counter +// instruments that record int64 values. +type Float64UpDownCounterConfig struct { + description string + unit string +} + +// NewFloat64UpDownCounterConfig returns a new [Float64UpDownCounterConfig] +// with all opts applied. +func NewFloat64UpDownCounterConfig(opts ...Float64UpDownCounterOption) Float64UpDownCounterConfig { + var config Float64UpDownCounterConfig + for _, o := range opts { + config = o.applyFloat64UpDownCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Float64UpDownCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64UpDownCounterConfig) Unit() string { + return c.unit +} + +// Float64UpDownCounterOption applies options to a +// [Float64UpDownCounterConfig]. See [InstrumentOption] for other options that +// can be used as a Float64UpDownCounterOption. +type Float64UpDownCounterOption interface { + applyFloat64UpDownCounter(Float64UpDownCounterConfig) Float64UpDownCounterConfig +} + +// Float64Histogram is an instrument that records a distribution of float64 +// values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Histogram interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Histogram + + // Record adds an additional value to the distribution. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, incr float64, options ...RecordOption) +} + +// Float64HistogramConfig contains options for synchronous counter instruments +// that record int64 values. +type Float64HistogramConfig struct { + description string + unit string +} + +// NewFloat64HistogramConfig returns a new [Float64HistogramConfig] with all +// opts applied. +func NewFloat64HistogramConfig(opts ...Float64HistogramOption) Float64HistogramConfig { + var config Float64HistogramConfig + for _, o := range opts { + config = o.applyFloat64Histogram(config) + } + return config +} + +// Description returns the configured description. +func (c Float64HistogramConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64HistogramConfig) Unit() string { + return c.unit +} + +// Float64HistogramOption applies options to a [Float64HistogramConfig]. See +// [InstrumentOption] for other options that can be used as a +// Float64HistogramOption. +type Float64HistogramOption interface { + applyFloat64Histogram(Float64HistogramConfig) Float64HistogramConfig +} diff --git a/vendor/go.opentelemetry.io/otel/metric/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/syncint64.go new file mode 100644 index 00000000..6f508eb6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/syncint64.go @@ -0,0 +1,179 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// Int64Counter is an instrument that records increasing int64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Counter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Counter + + // Add records a change to the counter. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Add(ctx context.Context, incr int64, options ...AddOption) +} + +// Int64CounterConfig contains options for synchronous counter instruments that +// record int64 values. +type Int64CounterConfig struct { + description string + unit string +} + +// NewInt64CounterConfig returns a new [Int64CounterConfig] with all opts +// applied. +func NewInt64CounterConfig(opts ...Int64CounterOption) Int64CounterConfig { + var config Int64CounterConfig + for _, o := range opts { + config = o.applyInt64Counter(config) + } + return config +} + +// Description returns the configured description. +func (c Int64CounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64CounterConfig) Unit() string { + return c.unit +} + +// Int64CounterOption applies options to a [Int64CounterConfig]. See +// [InstrumentOption] for other options that can be used as an +// Int64CounterOption. +type Int64CounterOption interface { + applyInt64Counter(Int64CounterConfig) Int64CounterConfig +} + +// Int64UpDownCounter is an instrument that records increasing or decreasing +// int64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64UpDownCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64UpDownCounter + + // Add records a change to the counter. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Add(ctx context.Context, incr int64, options ...AddOption) +} + +// Int64UpDownCounterConfig contains options for synchronous counter +// instruments that record int64 values. +type Int64UpDownCounterConfig struct { + description string + unit string +} + +// NewInt64UpDownCounterConfig returns a new [Int64UpDownCounterConfig] with +// all opts applied. +func NewInt64UpDownCounterConfig(opts ...Int64UpDownCounterOption) Int64UpDownCounterConfig { + var config Int64UpDownCounterConfig + for _, o := range opts { + config = o.applyInt64UpDownCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Int64UpDownCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64UpDownCounterConfig) Unit() string { + return c.unit +} + +// Int64UpDownCounterOption applies options to a [Int64UpDownCounterConfig]. +// See [InstrumentOption] for other options that can be used as an +// Int64UpDownCounterOption. +type Int64UpDownCounterOption interface { + applyInt64UpDownCounter(Int64UpDownCounterConfig) Int64UpDownCounterConfig +} + +// Int64Histogram is an instrument that records a distribution of int64 +// values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Histogram interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Histogram + + // Record adds an additional value to the distribution. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, incr int64, options ...RecordOption) +} + +// Int64HistogramConfig contains options for synchronous counter instruments +// that record int64 values. +type Int64HistogramConfig struct { + description string + unit string +} + +// NewInt64HistogramConfig returns a new [Int64HistogramConfig] with all opts +// applied. +func NewInt64HistogramConfig(opts ...Int64HistogramOption) Int64HistogramConfig { + var config Int64HistogramConfig + for _, o := range opts { + config = o.applyInt64Histogram(config) + } + return config +} + +// Description returns the configured description. +func (c Int64HistogramConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64HistogramConfig) Unit() string { + return c.unit +} + +// Int64HistogramOption applies options to a [Int64HistogramConfig]. See +// [InstrumentOption] for other options that can be used as an +// Int64HistogramOption. +type Int64HistogramOption interface { + applyInt64Histogram(Int64HistogramConfig) Int64HistogramConfig +} diff --git a/vendor/go.opentelemetry.io/otel/propagation.go b/vendor/go.opentelemetry.io/otel/propagation.go new file mode 100644 index 00000000..d29aaa32 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +import ( + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/propagation" +) + +// GetTextMapPropagator returns the global TextMapPropagator. If none has been +// set, a No-Op TextMapPropagator is returned. +func GetTextMapPropagator() propagation.TextMapPropagator { + return global.TextMapPropagator() +} + +// SetTextMapPropagator sets propagator as the global TextMapPropagator. +func SetTextMapPropagator(propagator propagation.TextMapPropagator) { + global.SetTextMapPropagator(propagator) +} diff --git a/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/vendor/go.opentelemetry.io/otel/propagation/baggage.go new file mode 100644 index 00000000..303cdf1c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/baggage.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package propagation // import "go.opentelemetry.io/otel/propagation" + +import ( + "context" + + "go.opentelemetry.io/otel/baggage" +) + +const baggageHeader = "baggage" + +// Baggage is a propagator that supports the W3C Baggage format. +// +// This propagates user-defined baggage associated with a trace. The complete +// specification is defined at https://www.w3.org/TR/baggage/. +type Baggage struct{} + +var _ TextMapPropagator = Baggage{} + +// Inject sets baggage key-values from ctx into the carrier. +func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) { + bStr := baggage.FromContext(ctx).String() + if bStr != "" { + carrier.Set(baggageHeader, bStr) + } +} + +// Extract returns a copy of parent with the baggage from the carrier added. +func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context { + bStr := carrier.Get(baggageHeader) + if bStr == "" { + return parent + } + + bag, err := baggage.Parse(bStr) + if err != nil { + return parent + } + return baggage.ContextWithBaggage(parent, bag) +} + +// Fields returns the keys who's values are set with Inject. +func (b Baggage) Fields() []string { + return []string{baggageHeader} +} diff --git a/vendor/go.opentelemetry.io/otel/propagation/doc.go b/vendor/go.opentelemetry.io/otel/propagation/doc.go new file mode 100644 index 00000000..c119eb28 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/doc.go @@ -0,0 +1,24 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package propagation contains OpenTelemetry context propagators. + +OpenTelemetry propagators are used to extract and inject context data from and +into messages exchanged by applications. The propagator supported by this +package is the W3C Trace Context encoding +(https://www.w3.org/TR/trace-context/), and W3C Baggage +(https://www.w3.org/TR/baggage/). +*/ +package propagation // import "go.opentelemetry.io/otel/propagation" diff --git a/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/vendor/go.opentelemetry.io/otel/propagation/propagation.go new file mode 100644 index 00000000..c94438f7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/propagation.go @@ -0,0 +1,153 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package propagation // import "go.opentelemetry.io/otel/propagation" + +import ( + "context" + "net/http" +) + +// TextMapCarrier is the storage medium used by a TextMapPropagator. +type TextMapCarrier interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Get returns the value associated with the passed key. + Get(key string) string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Set stores the key-value pair. + Set(key string, value string) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Keys lists the keys stored in this carrier. + Keys() []string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// MapCarrier is a TextMapCarrier that uses a map held in memory as a storage +// medium for propagated key-value pairs. +type MapCarrier map[string]string + +// Compile time check that MapCarrier implements the TextMapCarrier. +var _ TextMapCarrier = MapCarrier{} + +// Get returns the value associated with the passed key. +func (c MapCarrier) Get(key string) string { + return c[key] +} + +// Set stores the key-value pair. +func (c MapCarrier) Set(key, value string) { + c[key] = value +} + +// Keys lists the keys stored in this carrier. +func (c MapCarrier) Keys() []string { + keys := make([]string, 0, len(c)) + for k := range c { + keys = append(keys, k) + } + return keys +} + +// HeaderCarrier adapts http.Header to satisfy the TextMapCarrier interface. +type HeaderCarrier http.Header + +// Get returns the value associated with the passed key. +func (hc HeaderCarrier) Get(key string) string { + return http.Header(hc).Get(key) +} + +// Set stores the key-value pair. +func (hc HeaderCarrier) Set(key string, value string) { + http.Header(hc).Set(key, value) +} + +// Keys lists the keys stored in this carrier. +func (hc HeaderCarrier) Keys() []string { + keys := make([]string, 0, len(hc)) + for k := range hc { + keys = append(keys, k) + } + return keys +} + +// TextMapPropagator propagates cross-cutting concerns as key-value text +// pairs within a carrier that travels in-band across process boundaries. +type TextMapPropagator interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Inject set cross-cutting concerns from the Context into the carrier. + Inject(ctx context.Context, carrier TextMapCarrier) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Extract reads cross-cutting concerns from the carrier into a Context. + Extract(ctx context.Context, carrier TextMapCarrier) context.Context + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Fields returns the keys whose values are set with Inject. + Fields() []string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +type compositeTextMapPropagator []TextMapPropagator + +func (p compositeTextMapPropagator) Inject(ctx context.Context, carrier TextMapCarrier) { + for _, i := range p { + i.Inject(ctx, carrier) + } +} + +func (p compositeTextMapPropagator) Extract(ctx context.Context, carrier TextMapCarrier) context.Context { + for _, i := range p { + ctx = i.Extract(ctx, carrier) + } + return ctx +} + +func (p compositeTextMapPropagator) Fields() []string { + unique := make(map[string]struct{}) + for _, i := range p { + for _, k := range i.Fields() { + unique[k] = struct{}{} + } + } + + fields := make([]string, 0, len(unique)) + for k := range unique { + fields = append(fields, k) + } + return fields +} + +// NewCompositeTextMapPropagator returns a unified TextMapPropagator from the +// group of passed TextMapPropagator. This allows different cross-cutting +// concerns to be propagates in a unified manner. +// +// The returned TextMapPropagator will inject and extract cross-cutting +// concerns in the order the TextMapPropagators were provided. Additionally, +// the Fields method will return a de-duplicated slice of the keys that are +// set with the Inject method. +func NewCompositeTextMapPropagator(p ...TextMapPropagator) TextMapPropagator { + return compositeTextMapPropagator(p) +} diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go new file mode 100644 index 00000000..902692da --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go @@ -0,0 +1,159 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package propagation // import "go.opentelemetry.io/otel/propagation" + +import ( + "context" + "encoding/hex" + "fmt" + "regexp" + + "go.opentelemetry.io/otel/trace" +) + +const ( + supportedVersion = 0 + maxVersion = 254 + traceparentHeader = "traceparent" + tracestateHeader = "tracestate" +) + +// TraceContext is a propagator that supports the W3C Trace Context format +// (https://www.w3.org/TR/trace-context/) +// +// This propagator will propagate the traceparent and tracestate headers to +// guarantee traces are not broken. It is up to the users of this propagator +// to choose if they want to participate in a trace by modifying the +// traceparent header and relevant parts of the tracestate header containing +// their proprietary information. +type TraceContext struct{} + +var _ TextMapPropagator = TraceContext{} +var traceCtxRegExp = regexp.MustCompile("^(?P[0-9a-f]{2})-(?P[a-f0-9]{32})-(?P[a-f0-9]{16})-(?P[a-f0-9]{2})(?:-.*)?$") + +// Inject set tracecontext from the Context into the carrier. +func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { + sc := trace.SpanContextFromContext(ctx) + if !sc.IsValid() { + return + } + + if ts := sc.TraceState().String(); ts != "" { + carrier.Set(tracestateHeader, ts) + } + + // Clear all flags other than the trace-context supported sampling bit. + flags := sc.TraceFlags() & trace.FlagsSampled + + h := fmt.Sprintf("%.2x-%s-%s-%s", + supportedVersion, + sc.TraceID(), + sc.SpanID(), + flags) + carrier.Set(traceparentHeader, h) +} + +// Extract reads tracecontext from the carrier into a returned Context. +// +// The returned Context will be a copy of ctx and contain the extracted +// tracecontext as the remote SpanContext. If the extracted tracecontext is +// invalid, the passed ctx will be returned directly instead. +func (tc TraceContext) Extract(ctx context.Context, carrier TextMapCarrier) context.Context { + sc := tc.extract(carrier) + if !sc.IsValid() { + return ctx + } + return trace.ContextWithRemoteSpanContext(ctx, sc) +} + +func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { + h := carrier.Get(traceparentHeader) + if h == "" { + return trace.SpanContext{} + } + + matches := traceCtxRegExp.FindStringSubmatch(h) + + if len(matches) == 0 { + return trace.SpanContext{} + } + + if len(matches) < 5 { // four subgroups plus the overall match + return trace.SpanContext{} + } + + if len(matches[1]) != 2 { + return trace.SpanContext{} + } + ver, err := hex.DecodeString(matches[1]) + if err != nil { + return trace.SpanContext{} + } + version := int(ver[0]) + if version > maxVersion { + return trace.SpanContext{} + } + + if version == 0 && len(matches) != 5 { // four subgroups plus the overall match + return trace.SpanContext{} + } + + if len(matches[2]) != 32 { + return trace.SpanContext{} + } + + var scc trace.SpanContextConfig + + scc.TraceID, err = trace.TraceIDFromHex(matches[2][:32]) + if err != nil { + return trace.SpanContext{} + } + + if len(matches[3]) != 16 { + return trace.SpanContext{} + } + scc.SpanID, err = trace.SpanIDFromHex(matches[3]) + if err != nil { + return trace.SpanContext{} + } + + if len(matches[4]) != 2 { + return trace.SpanContext{} + } + opts, err := hex.DecodeString(matches[4]) + if err != nil || len(opts) < 1 || (version == 0 && opts[0] > 2) { + return trace.SpanContext{} + } + // Clear all flags other than the trace-context supported sampling bit. + scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled + + // Ignore the error returned here. Failure to parse tracestate MUST NOT + // affect the parsing of traceparent according to the W3C tracecontext + // specification. + scc.TraceState, _ = trace.ParseTraceState(carrier.Get(tracestateHeader)) + scc.Remote = true + + sc := trace.NewSpanContext(scc) + if !sc.IsValid() { + return trace.SpanContext{} + } + + return sc +} + +// Fields returns the keys who's values are set with Inject. +func (tc TraceContext) Fields() []string { + return []string{traceparentHeader, tracestateHeader} +} diff --git a/vendor/go.opentelemetry.io/otel/requirements.txt b/vendor/go.opentelemetry.io/otel/requirements.txt new file mode 100644 index 00000000..ddff4546 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/requirements.txt @@ -0,0 +1 @@ +codespell==2.2.5 diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go new file mode 100644 index 00000000..71a1f774 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the conventions +// as of the v1.17.0 version of the OpenTelemetry specification. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go new file mode 100644 index 00000000..679c40c4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go @@ -0,0 +1,199 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +import "go.opentelemetry.io/otel/attribute" + +// This semantic convention defines the attributes used to represent a feature +// flag evaluation as an event. +const ( + // FeatureFlagKeyKey is the attribute Key conforming to the + // "feature_flag.key" semantic conventions. It represents the unique + // identifier of the feature flag. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'logo-color' + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider_name" semantic conventions. It represents the + // name of the service provider that performs the flag evaluation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'Flag Manager' + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") + + // FeatureFlagVariantKey is the attribute Key conforming to the + // "feature_flag.variant" semantic conventions. It represents the sHOULD be + // a semantic identifier for a value. If one is unavailable, a stringified + // version of the value can be used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides + // a means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + // + // A stringified version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + FeatureFlagVariantKey = attribute.Key("feature_flag.variant") +) + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the unique identifier +// of the feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider_name" semantic conventions. It represents the name of +// the service provider that performs the flag evaluation. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagVariant returns an attribute KeyValue conforming to the +// "feature_flag.variant" semantic conventions. It represents the sHOULD be a +// semantic identifier for a value. If one is unavailable, a stringified +// version of the value can be used. +func FeatureFlagVariant(val string) attribute.KeyValue { + return FeatureFlagVariantKey.String(val) +} + +// RPC received/sent message. +const ( + // MessageTypeKey is the attribute Key conforming to the "message.type" + // semantic conventions. It represents the whether this is a received or + // sent message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessageTypeKey = attribute.Key("message.type") + + // MessageIDKey is the attribute Key conforming to the "message.id" + // semantic conventions. It represents the mUST be calculated as two + // different counters starting from `1` one for sent messages and one for + // received message. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Note: This way we guarantee that the values will be consistent between + // different implementations. + MessageIDKey = attribute.Key("message.id") + + // MessageCompressedSizeKey is the attribute Key conforming to the + // "message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + MessageCompressedSizeKey = attribute.Key("message.compressed_size") + + // MessageUncompressedSizeKey is the attribute Key conforming to the + // "message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") +) + +var ( + // sent + MessageTypeSent = MessageTypeKey.String("SENT") + // received + MessageTypeReceived = MessageTypeKey.String("RECEIVED") +) + +// MessageID returns an attribute KeyValue conforming to the "message.id" +// semantic conventions. It represents the mUST be calculated as two different +// counters starting from `1` one for sent messages and one for received +// message. +func MessageID(val int) attribute.KeyValue { + return MessageIDKey.Int(val) +} + +// MessageCompressedSize returns an attribute KeyValue conforming to the +// "message.compressed_size" semantic conventions. It represents the compressed +// size of the message in bytes. +func MessageCompressedSize(val int) attribute.KeyValue { + return MessageCompressedSizeKey.Int(val) +} + +// MessageUncompressedSize returns an attribute KeyValue conforming to the +// "message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func MessageUncompressedSize(val int) attribute.KeyValue { + return MessageUncompressedSizeKey.Int(val) +} + +// The attributes used to report a single exception associated with a span. +const ( + // ExceptionEscapedKey is the attribute Key conforming to the + // "exception.escaped" semantic conventions. It represents the sHOULD be + // set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of + // a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most + // languages. + // + // It is usually not possible to determine at the point where an exception + // is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending + // the span, + // as done in the [example above](#recording-an-exception). + // + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") +) + +// ExceptionEscaped returns an attribute KeyValue conforming to the +// "exception.escaped" semantic conventions. It represents the sHOULD be set to +// true if the exception event is recorded at a point where it is known that +// the exception is escaping the scope of the span. +func ExceptionEscaped(val bool) attribute.KeyValue { + return ExceptionEscapedKey.Bool(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go new file mode 100644 index 00000000..9b8c559d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go new file mode 100644 index 00000000..d5c4b5c1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go @@ -0,0 +1,21 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +// HTTP scheme attributes. +var ( + HTTPSchemeHTTP = HTTPSchemeKey.String("http") + HTTPSchemeHTTPS = HTTPSchemeKey.String("https") +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go new file mode 100644 index 00000000..39a2eab3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go @@ -0,0 +1,2010 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +import "go.opentelemetry.io/otel/attribute" + +// The web browser in which the application represented by the resource is +// running. The `browser.*` attributes MUST be used only for resources that +// represent applications running in a web browser (regardless of whether +// running on a mobile or desktop device). +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserPlatformKey is the attribute Key conforming to the + // "browser.platform" semantic conventions. It represents the platform on + // which the browser is running + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute + // SHOULD be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client + // Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in + // the [`os.type` and `os.name` attributes](./os.md). However, for + // consistency, the values in the `browser.platform` attribute should + // capture the exact value that the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the + // browser is running on a mobile device + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute + // SHOULD be left unset. + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserUserAgentKey is the attribute Key conforming to the + // "browser.user_agent" semantic conventions. It represents the full + // user-agent string provided by the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) + // AppleWebKit/537.36 (KHTML, ' + // 'like Gecko) Chrome/95.0.4638.54 Safari/537.36' + // Note: The user-agent value SHOULD be provided only from browsers that do + // not have a mechanism to retrieve brands and platform individually from + // the User-Agent Client Hints API. To retrieve the value, the legacy + // `navigator.userAgent` API can be used. + BrowserUserAgentKey = attribute.Key("browser.user_agent") + + // BrowserLanguageKey is the attribute Key conforming to the + // "browser.language" semantic conventions. It represents the preferred + // language of the user using the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") +) + +// BrowserBrands returns an attribute KeyValue conforming to the +// "browser.brands" semantic conventions. It represents the array of brand name +// and version separated by a space +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the +// "browser.mobile" semantic conventions. It represents a boolean that is true +// if the browser is running on a mobile device +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserUserAgent returns an attribute KeyValue conforming to the +// "browser.user_agent" semantic conventions. It represents the full user-agent +// string provided by the browser +func BrowserUserAgent(val string) attribute.KeyValue { + return BrowserUserAgentKey.String(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred +// language of the user using the browser +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// A cloud environment (e.g. GCP, Azure, AWS) +const ( + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudAccountIDKey is the attribute Key conforming to the + // "cloud.account.id" semantic conventions. It represents the cloud account + // ID the resource is assigned to. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" + // semantic conventions. It represents the geographical region the resource + // is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for + // example [Alibaba Cloud + // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure + // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), + // [Google Cloud regions](https://cloud.google.com/about/locations), or + // [Tencent Cloud + // regions](https://intl.cloud.tencent.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the + // resource is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // IBM Cloud + CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + CloudPlatformGoogleCloudOpenshift = CloudPlatformKey.String("google_cloud_openshift") + // Red Hat OpenShift on IBM Cloud + CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the +// "cloud.region" semantic conventions. It represents the geographical region +// the resource is running. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container + // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS + // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch + // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the + // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an + // [ECS task + // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the task + // definition family this task definition is a member of. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision + // for this task definition. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container +// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS +// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS +// task +// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the task +// definition family this task definition is a member of. +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// this task definition. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an + // EKS cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// Resources specific to Amazon Web Services. +const ( + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of + // the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like + // multi-container applications, where a single application has sidecar + // containers, and each write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon + // Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) + // of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of + // the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + // One log group can contain several log streams, so these ARNs necessarily + // identify both a log group and a log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") +) + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of +// the AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// A container instance. +const ( + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + + // ContainerIDKey is the attribute Key conforming to the "container.id" + // semantic conventions. It represents the container ID. Usually a UUID, as + // for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container-identification). + // The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of + // the image the container was built on. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageTagKey is the attribute Key conforming to the + // "container.image.tag" semantic conventions. It represents the container + // image tag. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + ContainerImageTagKey = attribute.Key("container.image.tag") +) + +// ContainerName returns an attribute KeyValue conforming to the +// "container.name" semantic conventions. It represents the container name used +// by container runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the +// "container.id" semantic conventions. It represents the container ID. Usually +// a UUID, as for example used to [identify Docker +// containers](https://docs.docker.com/engine/reference/run/#container-identification). +// The UUID might be abbreviated. +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageTag returns an attribute KeyValue conforming to the +// "container.image.tag" semantic conventions. It represents the container +// image tag. +func ContainerImageTag(val string) attribute.KeyValue { + return ContainerImageTagKey.String(val) +} + +// The software deployment. +const ( + // DeploymentEnvironmentKey is the attribute Key conforming to the + // "deployment.environment" semantic conventions. It represents the name of + // the [deployment + // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'staging', 'production' + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// DeploymentEnvironment returns an attribute KeyValue conforming to the +// "deployment.environment" semantic conventions. It represents the name of the +// [deployment +// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka +// deployment tier). +func DeploymentEnvironment(val string) attribute.KeyValue { + return DeploymentEnvironmentKey.String(val) +} + +// The device on which the process represented by this resource is running. +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values + // outlined below. This value is not an advertising identifier and MUST NOT + // be used as such. On iOS (Swift or Objective-C), this value MUST be equal + // to the [vendor + // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). + // On Android (Java or Kotlin), this value MUST be equal to the Firebase + // Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on + // best practices and exact implementation details. Caution should be taken + // when storing personal data or anything which can identify a user. GDPR + // and data protection laws may apply, ensure you do your own due + // diligence. + DeviceIDKey = attribute.Key("device.id") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine readable version + // of the model identifier rather than the market or consumer-friendly name + // of the device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the + // "device.model.name" semantic conventions. It represents the marketing + // name for the device model + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human readable version of + // the device model rather than a machine readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of + // the device manufacturer + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" +// semantic conventions. It represents a unique identifier representing the +// device +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name +// for the device model +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// A serverless instance. +const ( + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this + // runtime instance executes. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the + // FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes) + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The + // following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud + // providers/products: + // + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `faas.id` attribute). + FaaSNameKey = attribute.Key("faas.name") + + // FaaSIDKey is the attribute Key conforming to the "faas.id" semantic + // conventions. It represents the unique ID of the single function that + // this runtime instance executes. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' + // Note: On some cloud providers, it may not be possible to determine the + // full ID at startup, + // so consider setting `faas.id` as a span attribute instead. + // + // The exact value to use for `faas.id` depends on the cloud provider: + // + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias + // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + // with the resolved function version, as the same runtime instance may + // be invokable with + // multiple different aliases. + // * **GCP:** The [URI of the + // resource](https://cloud.google.com/iam/docs/full-resource-names) + // * **Azure:** The [Fully Qualified Resource + // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) + // of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider. + FaaSIDKey = attribute.Key("faas.id") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" + // semantic conventions. It represents the immutable version of the + // function being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + // + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run:** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a + // string, that will be potentially reused for other invocations to the + // same function/function version. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSMaxMemoryKey is the attribute Key conforming to the + // "faas.max_memory" semantic conventions. It represents the amount of + // memory available to the serverless function in MiB. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 128 + // Note: It's recommended to set this attribute since e.g. too little + // memory can easily stop a Java AWS Lambda function from working + // correctly. On AWS Lambda, the environment variable + // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information. + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") +) + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" +// semantic conventions. It represents the name of the single function that +// this runtime instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSID returns an attribute KeyValue conforming to the "faas.id" semantic +// conventions. It represents the unique ID of the single function that this +// runtime instance executes. +func FaaSID(val string) attribute.KeyValue { + return FaaSIDKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the +// "faas.version" semantic conventions. It represents the immutable version of +// the function being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the +// "faas.instance" semantic conventions. It represents the execution +// environment ID as a string, that will be potentially reused for other +// invocations to the same function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function in MiB. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// A host is defined as a general computing instance. +const ( + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be + // the instance_id assigned by the cloud provider. For non-containerized + // Linux systems, the `machine-id` located in `/etc/machine-id` or + // `/var/lib/dbus/machine-id` may be used. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + HostIDKey = attribute.Key("host.id") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified + // hostname, or another name specified by the user. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") + + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is + // running on. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + HostArchKey = attribute.Key("host.arch") + + // HostImageNameKey is the attribute Key conforming to the + // "host.image.name" semantic conventions. It represents the name of the VM + // image or OS install the host was instantiated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the vM image ID. For Cloud, this + // value is from the provider. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version + // string of the VM image as defined in [Version + // Attributes](README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized Linux +// systems, the `machine-id` located in `/etc/machine-id` or +// `/var/lib/dbus/machine-id` may be used. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" +// semantic conventions. It represents the name of the host. On Unix systems, +// it may contain what the hostname command returns, or the fully qualified +// hostname, or another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" +// semantic conventions. It represents the type of host. For Cloud, this must +// be the machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM +// image or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the +// "host.image.id" semantic conventions. It represents the vM image ID. For +// Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string +// of the VM image as defined in [Version +// Attributes](README.md#version-attributes). +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// A Kubernetes Cluster. +const ( + // K8SClusterNameKey is the attribute Key conforming to the + // "k8s.cluster.name" semantic conventions. It represents the name of the + // cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// A Kubernetes Node object. +const ( + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" + // semantic conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") +) + +// K8SNodeName returns an attribute KeyValue conforming to the +// "k8s.node.name" semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// A Kubernetes Namespace. +const ( + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") +) + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// A Kubernetes Pod object. +const ( + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" + // semantic conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" + // semantic conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") +) + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// A container in a +// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). +const ( + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the + // number of times the container was restarted. This attribute can be used + // to identify a particular container (running or stopped) within a + // container spec. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") +) + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify +// a particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// A Kubernetes ReplicaSet object. +const ( + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of + // the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") +) + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// A Kubernetes Deployment object. +const ( + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of + // the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") +) + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// A Kubernetes StatefulSet object. +const ( + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of + // the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") +) + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// A Kubernetes DaemonSet object. +const ( + // K8SDaemonSetUIDKey is the attribute Key conforming to the + // "k8s.daemonset.uid" semantic conventions. It represents the UID of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") +) + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// A Kubernetes Job object. +const ( + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" + // semantic conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" + // semantic conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") +) + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// A Kubernetes CronJob object. +const ( + // K8SCronJobUIDKey is the attribute Key conforming to the + // "k8s.cronjob.uid" semantic conventions. It represents the UID of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SCronJobNameKey is the attribute Key conforming to the + // "k8s.cronjob.name" semantic conventions. It represents the name of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") +) + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the +// CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + OSTypeKey = attribute.Key("os.type") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to + // be parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 + // LTS' + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + + // OSVersionKey is the attribute Key conforming to the "os.version" + // semantic conventions. It represents the version string of the operating + // system as defined in [Version + // Attributes](../../resource/semantic_conventions/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// OSDescription returns an attribute KeyValue conforming to the +// "os.description" semantic conventions. It represents the human readable (not +// intended to be parsed) OS version information, like e.g. reported by `ver` +// or `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating +// system as defined in [Version +// Attributes](../../resource/semantic_conventions/README.md#version-attributes). +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// An operating system process. +const ( + // ProcessPIDKey is the attribute Key conforming to the "process.pid" + // semantic conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent + // Process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name + // of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name + // of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full + // path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessCommandKey is the attribute Key conforming to the + // "process.command" semantic conventions. It represents the command used + // to launch the process (i.e. the command name). On Linux based systems, + // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can + // be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full + // command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. + // Do not set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, + // this would be the full argv vector passed to `main`. + // + // Type: string[] + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns + // the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") +) + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of +// the process executable. On Linux based systems, can be set to the `Name` in +// `proc/[pid]/status`. On Windows, can be set to the base name of +// `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path +// to the process executable. On Linux based systems, can be set to the target +// of `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be +// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to +// the first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this +// if you have to assemble it just for monitoring; use `process.command_args` +// instead. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) as received by +// the process. On Linux-based systems (and some other Unixoid systems +// supporting procfs), can be set according to the list of null-delimited +// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, +// this would be the full argv vector passed to `main`. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the +// "process.owner" semantic conventions. It represents the username of the user +// that owns the process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// The single (language) runtime instance which is monitored. +const ( + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of + // the runtime of this process. For compiled native binaries, this SHOULD + // be the name of the compiler. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the + // version of the runtime of this process, as returned by the runtime + // without modification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") +) + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. For compiled native binaries, this SHOULD be the +// name of the compiler. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without +// modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// A service instance. +const ( + // ServiceNameKey is the attribute Key conforming to the "service.name" + // semantic conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled + // services. If the value was not specified, SDKs MUST fallback to + // `unknown_service:` concatenated with + // [`process.executable.name`](process.md#process), e.g. + // `unknown_service:bash`. If `process.executable.name` is not available, + // the value MUST be set to `unknown_service`. + ServiceNameKey = attribute.Key("service.name") + + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group + // of services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` + // is expected to be unique for all services that have no explicit + // namespace defined (so the empty/unspecified namespace is simply one more + // valid namespace). Zero-length namespace string is assumed equal to + // unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID + // of the service instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be + // globally unique). The ID helps to distinguish instances of the same + // service that exist at the same time (e.g. instances of a horizontally + // scaled service). It is preferable for the ID to be persistent and stay + // the same for the lifetime of the service instance, however it is + // acceptable that the ID is ephemeral and changes during important + // lifetime events for the service (e.g. service restarts). If the service + // has no inherent unique ID that can be used as the value of this + // attribute it is recommended to generate a random Version 1 or Version 4 + // RFC 4122 UUID (services aiming for reproducible UUIDs may also use + // Version 5, see RFC 4122 for more recommendations). + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceVersionKey is the attribute Key conforming to the + // "service.version" semantic conventions. It represents the version string + // of the service API or implementation. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2.0.0' + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceName returns an attribute KeyValue conforming to the +// "service.name" semantic conventions. It represents the logical name of the +// service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of +// the service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the + // language of the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") + + // TelemetryAutoVersionKey is the attribute Key conforming to the + // "telemetry.auto.version" semantic conventions. It represents the version + // string of the auto instrumentation agent, if used. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.2.3' + TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") +) + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version +// string of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// TelemetryAutoVersion returns an attribute KeyValue conforming to the +// "telemetry.auto.version" semantic conventions. It represents the version +// string of the auto instrumentation agent, if used. +func TelemetryAutoVersion(val string) attribute.KeyValue { + return TelemetryAutoVersionKey.String(val) +} + +// Resource describing the packaged software running the application code. Web +// engines are typically executed using process.runtime. +const ( + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of + // the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") + + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the + // additional description of the web engine (e.g. detailed version and + // edition information). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") +) + +// WebEngineName returns an attribute KeyValue conforming to the +// "webengine.name" semantic conventions. It represents the name of the web +// engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the +// web engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition +// information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // OtelScopeNameKey is the attribute Key conforming to the + // "otel.scope.name" semantic conventions. It represents the name of the + // instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + OtelScopeNameKey = attribute.Key("otel.scope.name") + + // OtelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of + // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0.0' + OtelScopeVersionKey = attribute.Key("otel.scope.version") +) + +// OtelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OtelScopeName(val string) attribute.KeyValue { + return OtelScopeNameKey.String(val) +} + +// OtelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OtelScopeVersion(val string) attribute.KeyValue { + return OtelScopeVersionKey.String(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry +// Scope's concepts. +const ( + // OtelLibraryNameKey is the attribute Key conforming to the + // "otel.library.name" semantic conventions. It represents the deprecated, + // use the `otel.scope.name` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'io.opentelemetry.contrib.mongodb' + OtelLibraryNameKey = attribute.Key("otel.library.name") + + // OtelLibraryVersionKey is the attribute Key conforming to the + // "otel.library.version" semantic conventions. It represents the + // deprecated, use the `otel.scope.version` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '1.0.0' + OtelLibraryVersionKey = attribute.Key("otel.library.version") +) + +// OtelLibraryName returns an attribute KeyValue conforming to the +// "otel.library.name" semantic conventions. It represents the deprecated, use +// the `otel.scope.name` attribute. +func OtelLibraryName(val string) attribute.KeyValue { + return OtelLibraryNameKey.String(val) +} + +// OtelLibraryVersion returns an attribute KeyValue conforming to the +// "otel.library.version" semantic conventions. It represents the deprecated, +// use the `otel.scope.version` attribute. +func OtelLibraryVersion(val string) attribute.KeyValue { + return OtelLibraryVersionKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go new file mode 100644 index 00000000..42fc525d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.17.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go new file mode 100644 index 00000000..8c4a7299 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go @@ -0,0 +1,3375 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +import "go.opentelemetry.io/otel/attribute" + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the + // exception should be preferred over the static type in languages that + // support it. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") + + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str + // implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace + // as a string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") +) + +// ExceptionType returns an attribute KeyValue conforming to the +// "exception.type" semantic conventions. It represents the type of the +// exception (its fully-qualified class name, if applicable). The dynamic type +// of the exception should be preferred over the static type in languages that +// support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception +// message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// Attributes for Events represented using Log Records. +const ( + // EventNameKey is the attribute Key conforming to the "event.name" + // semantic conventions. It represents the name identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'click', 'exception' + EventNameKey = attribute.Key("event.name") + + // EventDomainKey is the attribute Key conforming to the "event.domain" + // semantic conventions. It represents the domain identifies the business + // context for the events. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: Events across different domains may have same `event.name`, yet be + // unrelated events. + EventDomainKey = attribute.Key("event.domain") +) + +var ( + // Events from browser apps + EventDomainBrowser = EventDomainKey.String("browser") + // Events from mobile apps + EventDomainDevice = EventDomainKey.String("device") + // Events from Kubernetes + EventDomainK8S = EventDomainKey.String("k8s") +) + +// EventName returns an attribute KeyValue conforming to the "event.name" +// semantic conventions. It represents the name identifies the event. +func EventName(val string) attribute.KeyValue { + return EventNameKey.String(val) +} + +// Span attributes used by AWS Lambda (in addition to general `faas` +// attributes). +const ( + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full + // invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `faas.id` if an alias is involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full +// invoked ARN as provided on the `Context` passed to the function +// (`Lambda-Runtime-Invoked-Function-ARN` header on the +// `/runtime/invocation/next` applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// Attributes for CloudEvents. CloudEvents is a specification on how to define +// event data in a standard way. These attributes can be attached to spans when +// performing operations with CloudEvents, regardless of the protocol being +// used. +const ( + // CloudeventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the + // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudeventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the + // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://github.com/cloudevents', + // '/cloudevents/spec/pull/123', 'my-service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudeventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents + // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) + // which the event uses. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudeventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the + // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.github.pull_request.opened', + // 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") + + // CloudeventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the + // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) + // of the event in the context of the event producer (identified by + // source). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") +) + +// CloudeventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the +// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) +// uniquely identifies the event. +func CloudeventsEventID(val string) attribute.KeyValue { + return CloudeventsEventIDKey.String(val) +} + +// CloudeventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the +// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) +// identifies the context in which an event happened. +func CloudeventsEventSource(val string) attribute.KeyValue { + return CloudeventsEventSourceKey.String(val) +} + +// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to +// the "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents +// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) +// which the event uses. +func CloudeventsEventSpecVersion(val string) attribute.KeyValue { + return CloudeventsEventSpecVersionKey.String(val) +} + +// CloudeventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the +// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) +// contains a value describing the type of event related to the originating +// occurrence. +func CloudeventsEventType(val string) attribute.KeyValue { + return CloudeventsEventTypeKey.String(val) +} + +// CloudeventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the +// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) +// of the event in the context of the event producer (identified by source). +func CloudeventsEventSubject(val string) attribute.KeyValue { + return CloudeventsEventSubjectKey.String(val) +} + +// Semantic conventions for the OpenTracing Shim +const ( + // OpentracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the + // parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span does not depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// The attributes used to perform database client calls. +const ( + // DBSystemKey is the attribute Key conforming to the "db.system" semantic + // conventions. It represents an identifier for the database management + // system (DBMS) product being used. See below for a list of well-known + // identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + DBSystemKey = attribute.Key("db.system") + + // DBConnectionStringKey is the attribute Key conforming to the + // "db.connection_string" semantic conventions. It represents the + // connection string used to connect to the database. It is recommended to + // remove embedded credentials. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + DBConnectionStringKey = attribute.Key("db.connection_string") + + // DBUserKey is the attribute Key conforming to the "db.user" semantic + // conventions. It represents the username for accessing the database. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'readonly_user', 'reporting_user' + DBUserKey = attribute.Key("db.user") + + // DBJDBCDriverClassnameKey is the attribute Key conforming to the + // "db.jdbc.driver_classname" semantic conventions. It represents the + // fully-qualified class name of the [Java Database Connectivity + // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) + // driver used to connect. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") + + // DBNameKey is the attribute Key conforming to the "db.name" semantic + // conventions. It represents the this attribute is used to report the name + // of the database being accessed. For commands that switch the database, + // this should be set to the target database (even if the command fails). + // + // Type: string + // RequirementLevel: ConditionallyRequired (If applicable.) + // Stability: stable + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called + // "schema name". In case there are multiple layers that could be + // considered for database name (e.g. Oracle instance name and schema + // name), the database name to be used is the more specific layer (e.g. + // Oracle schema name). + DBNameKey = attribute.Key("db.name") + + // DBStatementKey is the attribute Key conforming to the "db.statement" + // semantic conventions. It represents the database statement being + // executed. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If applicable and not + // explicitly disabled via instrumentation configuration.) + // Stability: stable + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + // Note: The value may be sanitized to exclude sensitive information. + DBStatementKey = attribute.Key("db.statement") + + // DBOperationKey is the attribute Key conforming to the "db.operation" + // semantic conventions. It represents the name of the operation being + // executed, e.g. the [MongoDB command + // name](https://docs.mongodb.com/manual/reference/command/#database-operations) + // such as `findAndModify`, or the SQL keyword. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If `db.statement` is not + // applicable.) + // Stability: stable + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to + // attempt any client-side parsing of `db.statement` just to get this + // property, but it should be set if the operation name is provided by the + // library being instrumented. If the SQL statement has an ambiguous + // operation, or performs more than one operation, this value may be + // omitted. + DBOperationKey = attribute.Key("db.operation") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") + // OpenSearch + DBSystemOpensearch = DBSystemKey.String("opensearch") + // ClickHouse + DBSystemClickhouse = DBSystemKey.String("clickhouse") +) + +// DBConnectionString returns an attribute KeyValue conforming to the +// "db.connection_string" semantic conventions. It represents the connection +// string used to connect to the database. It is recommended to remove embedded +// credentials. +func DBConnectionString(val string) attribute.KeyValue { + return DBConnectionStringKey.String(val) +} + +// DBUser returns an attribute KeyValue conforming to the "db.user" semantic +// conventions. It represents the username for accessing the database. +func DBUser(val string) attribute.KeyValue { + return DBUserKey.String(val) +} + +// DBJDBCDriverClassname returns an attribute KeyValue conforming to the +// "db.jdbc.driver_classname" semantic conventions. It represents the +// fully-qualified class name of the [Java Database Connectivity +// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver +// used to connect. +func DBJDBCDriverClassname(val string) attribute.KeyValue { + return DBJDBCDriverClassnameKey.String(val) +} + +// DBName returns an attribute KeyValue conforming to the "db.name" semantic +// conventions. It represents the this attribute is used to report the name of +// the database being accessed. For commands that switch the database, this +// should be set to the target database (even if the command fails). +func DBName(val string) attribute.KeyValue { + return DBNameKey.String(val) +} + +// DBStatement returns an attribute KeyValue conforming to the +// "db.statement" semantic conventions. It represents the database statement +// being executed. +func DBStatement(val string) attribute.KeyValue { + return DBStatementKey.String(val) +} + +// DBOperation returns an attribute KeyValue conforming to the +// "db.operation" semantic conventions. It represents the name of the operation +// being executed, e.g. the [MongoDB command +// name](https://docs.mongodb.com/manual/reference/command/#database-operations) +// such as `findAndModify`, or the SQL keyword. +func DBOperation(val string) attribute.KeyValue { + return DBOperationKey.String(val) +} + +// Connection-level attributes for Microsoft SQL Server +const ( + // DBMSSQLInstanceNameKey is the attribute Key conforming to the + // "db.mssql.instance_name" semantic conventions. It represents the + // Microsoft SQL Server [instance + // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) + // connecting to. This name is used to determine the port of a named + // instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MSSQLSERVER' + // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no + // longer required (but still recommended if non-standard). + DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") +) + +// DBMSSQLInstanceName returns an attribute KeyValue conforming to the +// "db.mssql.instance_name" semantic conventions. It represents the Microsoft +// SQL Server [instance +// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) +// connecting to. This name is used to determine the port of a named instance. +func DBMSSQLInstanceName(val string) attribute.KeyValue { + return DBMSSQLInstanceNameKey.String(val) +} + +// Call-level attributes for Cassandra +const ( + // DBCassandraPageSizeKey is the attribute Key conforming to the + // "db.cassandra.page_size" semantic conventions. It represents the fetch + // size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + + // DBCassandraConsistencyLevelKey is the attribute Key conforming to the + // "db.cassandra.consistency_level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + + // DBCassandraTableKey is the attribute Key conforming to the + // "db.cassandra.table" semantic conventions. It represents the name of the + // primary table that the operation is acting upon, including the keyspace + // name (if applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra + // rather than sql. It is not recommended to attempt any client-side + // parsing of `db.statement` just to get this property, but it should be + // set if it is provided by the library being instrumented. If the + // operation is acting upon an anonymous table, or more than one table, + // this value MUST NOT be set. + DBCassandraTableKey = attribute.Key("db.cassandra.table") + + // DBCassandraIdempotenceKey is the attribute Key conforming to the + // "db.cassandra.idempotence" semantic conventions. It represents the + // whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + + // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming + // to the "db.cassandra.speculative_execution_count" semantic conventions. + // It represents the number of times a query was speculatively executed. + // Not set or `0` if the query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") + + // DBCassandraCoordinatorIDKey is the attribute Key conforming to the + // "db.cassandra.coordinator.id" semantic conventions. It represents the ID + // of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + + // DBCassandraCoordinatorDCKey is the attribute Key conforming to the + // "db.cassandra.coordinator.dc" semantic conventions. It represents the + // data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +// DBCassandraPageSize returns an attribute KeyValue conforming to the +// "db.cassandra.page_size" semantic conventions. It represents the fetch size +// used for paging, i.e. how many rows will be returned at once. +func DBCassandraPageSize(val int) attribute.KeyValue { + return DBCassandraPageSizeKey.Int(val) +} + +// DBCassandraTable returns an attribute KeyValue conforming to the +// "db.cassandra.table" semantic conventions. It represents the name of the +// primary table that the operation is acting upon, including the keyspace name +// (if applicable). +func DBCassandraTable(val string) attribute.KeyValue { + return DBCassandraTableKey.String(val) +} + +// DBCassandraIdempotence returns an attribute KeyValue conforming to the +// "db.cassandra.idempotence" semantic conventions. It represents the whether +// or not the query is idempotent. +func DBCassandraIdempotence(val bool) attribute.KeyValue { + return DBCassandraIdempotenceKey.Bool(val) +} + +// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue +// conforming to the "db.cassandra.speculative_execution_count" semantic +// conventions. It represents the number of times a query was speculatively +// executed. Not set or `0` if the query was not executed speculatively. +func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return DBCassandraSpeculativeExecutionCountKey.Int(val) +} + +// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of +// the coordinating node for a query. +func DBCassandraCoordinatorID(val string) attribute.KeyValue { + return DBCassandraCoordinatorIDKey.String(val) +} + +// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.dc" semantic conventions. It represents the data +// center of the coordinating node for a query. +func DBCassandraCoordinatorDC(val string) attribute.KeyValue { + return DBCassandraCoordinatorDCKey.String(val) +} + +// Call-level attributes for Redis +const ( + // DBRedisDBIndexKey is the attribute Key conforming to the + // "db.redis.database_index" semantic conventions. It represents the index + // of the database being accessed as used in the [`SELECT` + // command](https://redis.io/commands/select), provided as an integer. To + // be used instead of the generic `db.name` attribute. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If other than the default + // database (`0`).) + // Stability: stable + // Examples: 0, 1, 15 + DBRedisDBIndexKey = attribute.Key("db.redis.database_index") +) + +// DBRedisDBIndex returns an attribute KeyValue conforming to the +// "db.redis.database_index" semantic conventions. It represents the index of +// the database being accessed as used in the [`SELECT` +// command](https://redis.io/commands/select), provided as an integer. To be +// used instead of the generic `db.name` attribute. +func DBRedisDBIndex(val int) attribute.KeyValue { + return DBRedisDBIndexKey.Int(val) +} + +// Call-level attributes for MongoDB +const ( + // DBMongoDBCollectionKey is the attribute Key conforming to the + // "db.mongodb.collection" semantic conventions. It represents the + // collection being accessed within the database stated in `db.name`. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'customers', 'products' + DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") +) + +// DBMongoDBCollection returns an attribute KeyValue conforming to the +// "db.mongodb.collection" semantic conventions. It represents the collection +// being accessed within the database stated in `db.name`. +func DBMongoDBCollection(val string) attribute.KeyValue { + return DBMongoDBCollectionKey.String(val) +} + +// Call-level attributes for SQL databases +const ( + // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" + // semantic conventions. It represents the name of the primary table that + // the operation is acting upon, including the database name (if + // applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting + // upon an anonymous table, or more than one table, this value MUST NOT be + // set. + DBSQLTableKey = attribute.Key("db.sql.table") +) + +// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" +// semantic conventions. It represents the name of the primary table that the +// operation is acting upon, including the database name (if applicable). +func DBSQLTable(val string) attribute.KeyValue { + return DBSQLTableKey.String(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's +// concepts. +const ( + // OtelStatusCodeKey is the attribute Key conforming to the + // "otel.status_code" semantic conventions. It represents the name of the + // code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + OtelStatusCodeKey = attribute.Key("otel.status_code") + + // OtelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the + // description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'resource not found' + OtelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OtelStatusCodeOk = OtelStatusCodeKey.String("OK") + // The operation contains an error + OtelStatusCodeError = OtelStatusCodeKey.String("ERROR") +) + +// OtelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the +// description of the Status if it has a value, otherwise not set. +func OtelStatusDescription(val string) attribute.KeyValue { + return OtelStatusDescriptionKey.String(val) +} + +// This semantic convention describes an instance of a function that runs +// without provisioning or managing of servers (also known as serverless +// functions or Function as a Service (FaaS)) with spans. +const ( + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" + // semantic conventions. It represents the type of the trigger which caused + // this function execution. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: For the server/consumer span on the incoming side, + // `faas.trigger` MUST be set. + // + // Clients invoking FaaS instances usually cannot set `faas.trigger`, + // since they would typically need to look in the payload to determine + // the event type. If clients set it, it should be the same as the + // trigger that corresponding incoming would have (i.e., this has + // nothing to do with the underlying transport used to make the API + // call to invoke the lambda, which is often HTTP). + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSExecutionKey is the attribute Key conforming to the "faas.execution" + // semantic conventions. It represents the execution ID of the current + // function execution. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSExecutionKey = attribute.Key("faas.execution") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// FaaSExecution returns an attribute KeyValue conforming to the +// "faas.execution" semantic conventions. It represents the execution ID of the +// current function execution. +func FaaSExecution(val string) attribute.KeyValue { + return FaaSExecutionKey.String(val) +} + +// Semantic Convention for FaaS triggered as a response to some data source +// operation such as a database or filesystem read/write. +const ( + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name + // of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in + // Cosmos DB to the database name. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the + // describes the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string + // containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or + // S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of +// the source on which the triggering operation was performed. For example, in +// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the +// database name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 +// is the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// Semantic Convention for FaaS scheduled to be executed regularly. +const ( + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation + // time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron + // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") +) + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" +// semantic conventions. It represents a string containing the function +// invocation time in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" +// semantic conventions. It represents a string containing the schedule period +// as [Cron +// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// Contains additional attributes for incoming FaaS spans. +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the + // serverless function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + FaaSColdstartKey = attribute.Key("faas.coldstart") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the +// "faas.coldstart" semantic conventions. It represents a boolean that is true +// if the serverless function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// Contains additional attributes for outgoing FaaS spans. +const ( + // FaaSInvokedNameKey is the attribute Key conforming to the + // "faas.invoked_name" semantic conventions. It represents the name of the + // invoked function. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the + // invoked function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud + // region of the invoked function. + // + // Type: string + // RequirementLevel: ConditionallyRequired (For some cloud providers, like + // AWS or GCP, the region in which a function is hosted is essential to + // uniquely identify the function and also part of its endpoint. Since it's + // part of the endpoint being called, the region is always known to + // clients. In these cases, `faas.invoked_region` MUST be set accordingly. + // If the region is unknown to the client or not required for identifying + // the invoked function, setting `faas.invoked_region` is optional.) + // Stability: stable + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the + // invoked function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region +// of the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetTransportKey is the attribute Key conforming to the "net.transport" + // semantic conventions. It represents the transport protocol used. See + // note below. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + NetTransportKey = attribute.Key("net.transport") + + // NetAppProtocolNameKey is the attribute Key conforming to the + // "net.app.protocol.name" semantic conventions. It represents the + // application layer protocol used. The value SHOULD be normalized to + // lowercase. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + NetAppProtocolNameKey = attribute.Key("net.app.protocol.name") + + // NetAppProtocolVersionKey is the attribute Key conforming to the + // "net.app.protocol.version" semantic conventions. It represents the + // version of the application layer protocol used. See note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '3.1.1' + // Note: `net.app.protocol.version` refers to the version of the protocol + // used and might be different from the protocol client's version. If the + // HTTP client used has a version of `0.27.2`, but sends HTTP version + // `1.1`, this attribute should be set to `1.1`. + NetAppProtocolVersionKey = attribute.Key("net.app.protocol.version") + + // NetSockPeerNameKey is the attribute Key conforming to the + // "net.sock.peer.name" semantic conventions. It represents the remote + // socket peer name. + // + // Type: string + // RequirementLevel: Recommended (If available and different from + // `net.peer.name` and if `net.sock.peer.addr` is set.) + // Stability: stable + // Examples: 'proxy.example.com' + NetSockPeerNameKey = attribute.Key("net.sock.peer.name") + + // NetSockPeerAddrKey is the attribute Key conforming to the + // "net.sock.peer.addr" semantic conventions. It represents the remote + // socket peer address: IPv4 or IPv6 for internet protocols, path for local + // communication, + // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '127.0.0.1', '/tmp/mysql.sock' + NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") + + // NetSockPeerPortKey is the attribute Key conforming to the + // "net.sock.peer.port" semantic conventions. It represents the remote + // socket peer port. + // + // Type: int + // RequirementLevel: Recommended (If defined for the address family and if + // different than `net.peer.port` and if `net.sock.peer.addr` is set.) + // Stability: stable + // Examples: 16456 + NetSockPeerPortKey = attribute.Key("net.sock.peer.port") + + // NetSockFamilyKey is the attribute Key conforming to the + // "net.sock.family" semantic conventions. It represents the protocol + // [address + // family](https://man7.org/linux/man-pages/man7/address_families.7.html) + // which is used for communication. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (If different than `inet` and if + // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers + // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in + // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support + // instrumentations that follow previous versions of this document.) + // Stability: stable + // Examples: 'inet6', 'bluetooth' + NetSockFamilyKey = attribute.Key("net.sock.family") + + // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" + // semantic conventions. It represents the logical remote hostname, see + // note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'example.com' + // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an + // extra DNS lookup. + NetPeerNameKey = attribute.Key("net.peer.name") + + // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" + // semantic conventions. It represents the logical remote port number + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 80, 8080, 443 + NetPeerPortKey = attribute.Key("net.peer.port") + + // NetHostNameKey is the attribute Key conforming to the "net.host.name" + // semantic conventions. It represents the logical local hostname or + // similar, see note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'localhost' + NetHostNameKey = attribute.Key("net.host.name") + + // NetHostPortKey is the attribute Key conforming to the "net.host.port" + // semantic conventions. It represents the logical local port number, + // preferably the one that the peer used to connect + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 8080 + NetHostPortKey = attribute.Key("net.host.port") + + // NetSockHostAddrKey is the attribute Key conforming to the + // "net.sock.host.addr" semantic conventions. It represents the local + // socket address. Useful in case of a multi-IP host. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '192.168.0.1' + NetSockHostAddrKey = attribute.Key("net.sock.host.addr") + + // NetSockHostPortKey is the attribute Key conforming to the + // "net.sock.host.port" semantic conventions. It represents the local + // socket port number. + // + // Type: int + // RequirementLevel: Recommended (If defined for the address family and if + // different than `net.host.port` and if `net.sock.host.addr` is set.) + // Stability: stable + // Examples: 35555 + NetSockHostPortKey = attribute.Key("net.sock.host.port") + + // NetHostConnectionTypeKey is the attribute Key conforming to the + // "net.host.connection.type" semantic conventions. It represents the + // internet connection type currently being used by the host. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'wifi' + NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") + + // NetHostConnectionSubtypeKey is the attribute Key conforming to the + // "net.host.connection.subtype" semantic conventions. It represents the + // this describes more details regarding the connection.type. It may be the + // type of cell technology connection, but it could be used for describing + // details about a wifi connection. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'LTE' + NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") + + // NetHostCarrierNameKey is the attribute Key conforming to the + // "net.host.carrier.name" semantic conventions. It represents the name of + // the mobile carrier. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'sprint' + NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") + + // NetHostCarrierMccKey is the attribute Key conforming to the + // "net.host.carrier.mcc" semantic conventions. It represents the mobile + // carrier country code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '310' + NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") + + // NetHostCarrierMncKey is the attribute Key conforming to the + // "net.host.carrier.mnc" semantic conventions. It represents the mobile + // carrier network code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '001' + NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") + + // NetHostCarrierIccKey is the attribute Key conforming to the + // "net.host.carrier.icc" semantic conventions. It represents the ISO + // 3166-1 alpha-2 2-character country code associated with the mobile + // carrier network. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'DE' + NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") +) + +var ( + // ip_tcp + NetTransportTCP = NetTransportKey.String("ip_tcp") + // ip_udp + NetTransportUDP = NetTransportKey.String("ip_udp") + // Named or anonymous pipe. See note below + NetTransportPipe = NetTransportKey.String("pipe") + // In-process communication + NetTransportInProc = NetTransportKey.String("inproc") + // Something else (non IP-based) + NetTransportOther = NetTransportKey.String("other") +) + +var ( + // IPv4 address + NetSockFamilyInet = NetSockFamilyKey.String("inet") + // IPv6 address + NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") + // Unix domain socket path + NetSockFamilyUnix = NetSockFamilyKey.String("unix") +) + +var ( + // wifi + NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") + // wired + NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") + // cell + NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") + // unavailable + NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") + // unknown + NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") +) + +var ( + // GPRS + NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") + // EDGE + NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") + // UMTS + NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") + // CDMA + NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") + // HSPA + NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") + // IDEN + NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") + // LTE + NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") + // EHRPD + NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") + // GSM + NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") +) + +// NetAppProtocolName returns an attribute KeyValue conforming to the +// "net.app.protocol.name" semantic conventions. It represents the application +// layer protocol used. The value SHOULD be normalized to lowercase. +func NetAppProtocolName(val string) attribute.KeyValue { + return NetAppProtocolNameKey.String(val) +} + +// NetAppProtocolVersion returns an attribute KeyValue conforming to the +// "net.app.protocol.version" semantic conventions. It represents the version +// of the application layer protocol used. See note below. +func NetAppProtocolVersion(val string) attribute.KeyValue { + return NetAppProtocolVersionKey.String(val) +} + +// NetSockPeerName returns an attribute KeyValue conforming to the +// "net.sock.peer.name" semantic conventions. It represents the remote socket +// peer name. +func NetSockPeerName(val string) attribute.KeyValue { + return NetSockPeerNameKey.String(val) +} + +// NetSockPeerAddr returns an attribute KeyValue conforming to the +// "net.sock.peer.addr" semantic conventions. It represents the remote socket +// peer address: IPv4 or IPv6 for internet protocols, path for local +// communication, +// [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). +func NetSockPeerAddr(val string) attribute.KeyValue { + return NetSockPeerAddrKey.String(val) +} + +// NetSockPeerPort returns an attribute KeyValue conforming to the +// "net.sock.peer.port" semantic conventions. It represents the remote socket +// peer port. +func NetSockPeerPort(val int) attribute.KeyValue { + return NetSockPeerPortKey.Int(val) +} + +// NetPeerName returns an attribute KeyValue conforming to the +// "net.peer.name" semantic conventions. It represents the logical remote +// hostname, see note below. +func NetPeerName(val string) attribute.KeyValue { + return NetPeerNameKey.String(val) +} + +// NetPeerPort returns an attribute KeyValue conforming to the +// "net.peer.port" semantic conventions. It represents the logical remote port +// number +func NetPeerPort(val int) attribute.KeyValue { + return NetPeerPortKey.Int(val) +} + +// NetHostName returns an attribute KeyValue conforming to the +// "net.host.name" semantic conventions. It represents the logical local +// hostname or similar, see note below. +func NetHostName(val string) attribute.KeyValue { + return NetHostNameKey.String(val) +} + +// NetHostPort returns an attribute KeyValue conforming to the +// "net.host.port" semantic conventions. It represents the logical local port +// number, preferably the one that the peer used to connect +func NetHostPort(val int) attribute.KeyValue { + return NetHostPortKey.Int(val) +} + +// NetSockHostAddr returns an attribute KeyValue conforming to the +// "net.sock.host.addr" semantic conventions. It represents the local socket +// address. Useful in case of a multi-IP host. +func NetSockHostAddr(val string) attribute.KeyValue { + return NetSockHostAddrKey.String(val) +} + +// NetSockHostPort returns an attribute KeyValue conforming to the +// "net.sock.host.port" semantic conventions. It represents the local socket +// port number. +func NetSockHostPort(val int) attribute.KeyValue { + return NetSockHostPortKey.Int(val) +} + +// NetHostCarrierName returns an attribute KeyValue conforming to the +// "net.host.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetHostCarrierName(val string) attribute.KeyValue { + return NetHostCarrierNameKey.String(val) +} + +// NetHostCarrierMcc returns an attribute KeyValue conforming to the +// "net.host.carrier.mcc" semantic conventions. It represents the mobile +// carrier country code. +func NetHostCarrierMcc(val string) attribute.KeyValue { + return NetHostCarrierMccKey.String(val) +} + +// NetHostCarrierMnc returns an attribute KeyValue conforming to the +// "net.host.carrier.mnc" semantic conventions. It represents the mobile +// carrier network code. +func NetHostCarrierMnc(val string) attribute.KeyValue { + return NetHostCarrierMncKey.String(val) +} + +// NetHostCarrierIcc returns an attribute KeyValue conforming to the +// "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetHostCarrierIcc(val string) attribute.KeyValue { + return NetHostCarrierIccKey.String(val) +} + +// Operations that access some remote service. +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" + // semantic conventions. It represents the + // [`service.name`](../../resource/semantic_conventions/README.md#service) + // of the remote service. SHOULD be equal to the actual `service.name` + // resource attribute of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the +// "peer.service" semantic conventions. It represents the +// [`service.name`](../../resource/semantic_conventions/README.md#service) of +// the remote service. SHOULD be equal to the actual `service.name` resource +// attribute of the remote service if any. +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// These attributes may be used for any operation with an authenticated and/or +// authorized enduser. +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" + // semantic conventions. It represents the username or client_id extracted + // from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header + // in the inbound request from outside the system. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserRoleKey is the attribute Key conforming to the "enduser.role" + // semantic conventions. It represents the actual/assumed role the client + // is making the request under extracted from token or application security + // context. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + + // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" + // semantic conventions. It represents the scopes or granted authorities + // the client currently possesses extracted from token or application + // security context. The value would come from the scope associated with an + // [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute + // value in a [SAML 2.0 + // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the username or client_id extracted from +// the access token or +// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in +// the inbound request from outside the system. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserRole returns an attribute KeyValue conforming to the +// "enduser.role" semantic conventions. It represents the actual/assumed role +// the client is making the request under extracted from token or application +// security context. +func EnduserRole(val string) attribute.KeyValue { + return EnduserRoleKey.String(val) +} + +// EnduserScope returns an attribute KeyValue conforming to the +// "enduser.scope" semantic conventions. It represents the scopes or granted +// authorities the client currently possesses extracted from token or +// application security context. The value would come from the scope associated +// with an [OAuth 2.0 Access +// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute +// value in a [SAML 2.0 +// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). +func EnduserScope(val string) attribute.KeyValue { + return EnduserScopeKey.String(val) +} + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed + // to OS thread ID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" + // semantic conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" +// semantic conventions. It represents the current "managed" thread ID (as +// opposed to OS thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // CodeFunctionKey is the attribute Key conforming to the "code.function" + // semantic conventions. It represents the method or function name, or + // equivalent (usually rightmost part of the code unit's name). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + + // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" + // semantic conventions. It represents the "namespace" within which + // `code.function` is defined. Usually the qualified class or module name, + // such that `code.namespace` + some separator + `code.function` form a + // unique identifier for the code unit. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + + // CodeFilepathKey is the attribute Key conforming to the "code.filepath" + // semantic conventions. It represents the source code file name that + // identifies the code unit as uniquely as possible (preferably an absolute + // file path). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + + // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" + // semantic conventions. It represents the line number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") + + // CodeColumnKey is the attribute Key conforming to the "code.column" + // semantic conventions. It represents the column number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 16 + CodeColumnKey = attribute.Key("code.column") +) + +// CodeFunction returns an attribute KeyValue conforming to the +// "code.function" semantic conventions. It represents the method or function +// name, or equivalent (usually rightmost part of the code unit's name). +func CodeFunction(val string) attribute.KeyValue { + return CodeFunctionKey.String(val) +} + +// CodeNamespace returns an attribute KeyValue conforming to the +// "code.namespace" semantic conventions. It represents the "namespace" within +// which `code.function` is defined. Usually the qualified class or module +// name, such that `code.namespace` + some separator + `code.function` form a +// unique identifier for the code unit. +func CodeNamespace(val string) attribute.KeyValue { + return CodeNamespaceKey.String(val) +} + +// CodeFilepath returns an attribute KeyValue conforming to the +// "code.filepath" semantic conventions. It represents the source code file +// name that identifies the code unit as uniquely as possible (preferably an +// absolute file path). +func CodeFilepath(val string) attribute.KeyValue { + return CodeFilepathKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" +// semantic conventions. It represents the line number in `code.filepath` best +// representing the operation. It SHOULD point within the code unit named in +// `code.function`. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeColumn returns an attribute KeyValue conforming to the "code.column" +// semantic conventions. It represents the column number in `code.filepath` +// best representing the operation. It SHOULD point within the code unit named +// in `code.function`. +func CodeColumn(val int) attribute.KeyValue { + return CodeColumnKey.Int(val) +} + +// Semantic conventions for HTTP client and server Spans. +const ( + // HTTPMethodKey is the attribute Key conforming to the "http.method" + // semantic conventions. It represents the hTTP request method. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + HTTPMethodKey = attribute.Key("http.method") + + // HTTPStatusCodeKey is the attribute Key conforming to the + // "http.status_code" semantic conventions. It represents the [HTTP + // response status code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // RequirementLevel: ConditionallyRequired (If and only if one was + // received/sent.) + // Stability: stable + // Examples: 200 + HTTPStatusCodeKey = attribute.Key("http.status_code") + + // HTTPFlavorKey is the attribute Key conforming to the "http.flavor" + // semantic conventions. It represents the kind of HTTP protocol used. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: If `net.transport` is not specified, it can be assumed to be + // `IP.TCP` except if `http.flavor` is `QUIC`, in which case `IP.UDP` is + // assumed. + HTTPFlavorKey = attribute.Key("http.flavor") + + // HTTPUserAgentKey is the attribute Key conforming to the + // "http.user_agent" semantic conventions. It represents the value of the + // [HTTP + // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) + // header sent by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' + HTTPUserAgentKey = attribute.Key("http.user_agent") + + // HTTPRequestContentLengthKey is the attribute Key conforming to the + // "http.request_content_length" semantic conventions. It represents the + // size of the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") + + // HTTPResponseContentLengthKey is the attribute Key conforming to the + // "http.response_content_length" semantic conventions. It represents the + // size of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") +) + +var ( + // HTTP/1.0 + HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") + // HTTP/1.1 + HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") + // HTTP/2 + HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") + // HTTP/3 + HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") + // SPDY protocol + HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") + // QUIC protocol + HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") +) + +// HTTPMethod returns an attribute KeyValue conforming to the "http.method" +// semantic conventions. It represents the hTTP request method. +func HTTPMethod(val string) attribute.KeyValue { + return HTTPMethodKey.String(val) +} + +// HTTPStatusCode returns an attribute KeyValue conforming to the +// "http.status_code" semantic conventions. It represents the [HTTP response +// status code](https://tools.ietf.org/html/rfc7231#section-6). +func HTTPStatusCode(val int) attribute.KeyValue { + return HTTPStatusCodeKey.Int(val) +} + +// HTTPUserAgent returns an attribute KeyValue conforming to the +// "http.user_agent" semantic conventions. It represents the value of the [HTTP +// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) +// header sent by the client. +func HTTPUserAgent(val string) attribute.KeyValue { + return HTTPUserAgentKey.String(val) +} + +// HTTPRequestContentLength returns an attribute KeyValue conforming to the +// "http.request_content_length" semantic conventions. It represents the size +// of the request payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPRequestContentLength(val int) attribute.KeyValue { + return HTTPRequestContentLengthKey.Int(val) +} + +// HTTPResponseContentLength returns an attribute KeyValue conforming to the +// "http.response_content_length" semantic conventions. It represents the size +// of the response payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPResponseContentLength(val int) attribute.KeyValue { + return HTTPResponseContentLengthKey.Int(val) +} + +// Semantic Convention for HTTP Client +const ( + // HTTPURLKey is the attribute Key conforming to the "http.url" semantic + // conventions. It represents the full HTTP request URL in the form + // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is + // not transmitted over HTTP, but if it is known, it should be included + // nevertheless. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + // Note: `http.url` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case the + // attribute's value should be `https://www.example.com/`. + HTTPURLKey = attribute.Key("http.url") + + // HTTPResendCountKey is the attribute Key conforming to the + // "http.resend_count" semantic conventions. It represents the ordinal + // number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // RequirementLevel: Recommended (if and only if request was retried.) + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending + // (e.g. redirection, authorization failure, 503 Server Unavailable, + // network issues, or any other). + HTTPResendCountKey = attribute.Key("http.resend_count") +) + +// HTTPURL returns an attribute KeyValue conforming to the "http.url" +// semantic conventions. It represents the full HTTP request URL in the form +// `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not +// transmitted over HTTP, but if it is known, it should be included +// nevertheless. +func HTTPURL(val string) attribute.KeyValue { + return HTTPURLKey.String(val) +} + +// HTTPResendCount returns an attribute KeyValue conforming to the +// "http.resend_count" semantic conventions. It represents the ordinal number +// of request resending attempt (for any reason, including redirects). +func HTTPResendCount(val int) attribute.KeyValue { + return HTTPResendCountKey.Int(val) +} + +// Semantic Convention for HTTP Server +const ( + // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" + // semantic conventions. It represents the URI scheme identifying the used + // protocol. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'http', 'https' + HTTPSchemeKey = attribute.Key("http.scheme") + + // HTTPTargetKey is the attribute Key conforming to the "http.target" + // semantic conventions. It represents the full request target as passed in + // a HTTP request line or equivalent. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '/path/12314/?q=ddds' + HTTPTargetKey = attribute.Key("http.target") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" + // semantic conventions. It represents the matched route (path template in + // the format used by the respective server framework). See note below + // + // Type: string + // RequirementLevel: ConditionallyRequired (If and only if it's available) + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: 'http.route' MUST NOT be populated when this is not supported by + // the HTTP server framework as the route attribute should have + // low-cardinality and the URI path can NOT substitute it. + HTTPRouteKey = attribute.Key("http.route") + + // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip" + // semantic conventions. It represents the IP address of the original + // client behind all proxies, if known (e.g. from + // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '83.164.160.102' + // Note: This is not necessarily the same as `net.sock.peer.addr`, which + // would + // identify the network-level peer, which may be a proxy. + // + // This attribute should be set when a source of information different + // from the one used for `net.sock.peer.addr`, is available even if that + // other + // source just confirms the same value as `net.sock.peer.addr`. + // Rationale: For `net.sock.peer.addr`, one typically does not know if it + // comes from a proxy, reverse proxy, or the actual client. Setting + // `http.client_ip` when it's the same as `net.sock.peer.addr` means that + // one is at least somewhat confident that the address is not that of + // the closest proxy. + HTTPClientIPKey = attribute.Key("http.client_ip") +) + +// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" +// semantic conventions. It represents the URI scheme identifying the used +// protocol. +func HTTPScheme(val string) attribute.KeyValue { + return HTTPSchemeKey.String(val) +} + +// HTTPTarget returns an attribute KeyValue conforming to the "http.target" +// semantic conventions. It represents the full request target as passed in a +// HTTP request line or equivalent. +func HTTPTarget(val string) attribute.KeyValue { + return HTTPTargetKey.String(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route (path template in the +// format used by the respective server framework). See note below +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// HTTPClientIP returns an attribute KeyValue conforming to the +// "http.client_ip" semantic conventions. It represents the IP address of the +// original client behind all proxies, if known (e.g. from +// [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). +func HTTPClientIP(val string) attribute.KeyValue { + return HTTPClientIPKey.String(val) +} + +// Attributes that exist for multiple DynamoDB request types. +const ( + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys + // in the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { + // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number }, "TableName": "string", + // "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to + // the "aws.dynamodb.item_collection_metrics" semantic conventions. It + // represents the JSON-serialized value of the `ItemCollectionMetrics` + // response field. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": + // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { + // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], + // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, + // "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to + // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It + // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` + // request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming + // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. + // It represents the value of the + // `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the + // value of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value + // of the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, + // RelatedItems, ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of + // the `Limit` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value + // of the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of + // the `Select` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") +) + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in +// the `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming +// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It +// represents the JSON-serialized value of the `ItemCollectionMetrics` response +// field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.ReadCapacityUnits` request parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.WriteCapacityUnits` request parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of +// the `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to +// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the +// value of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of +// the `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// DynamoDB.CreateTable +const ( + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `GlobalSecondaryIndexes` request field + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `LocalSecondaryIndexes` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexARN": "string", "IndexName": "string", + // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") +) + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_indexes" semantic +// conventions. It represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming +// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `LocalSecondaryIndexes` request field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// DynamoDB.ListTables +const ( + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents + // the value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the the + // number of items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") +) + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming +// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It +// represents the value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the the +// number of items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// DynamoDB.Query +const ( + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the + // value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") +) + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// DynamoDB.Scan +const ( + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of + // the `Segment` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the + // value of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of + // the `Count` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the + // value of the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") +) + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value +// of the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value +// of the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// DynamoDB.UpdateTable +const ( + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to + // the "aws.dynamodb.attribute_definitions" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `AttributeDefinitions` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key + // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic + // conventions. It represents the JSON-serialized value of each item in the + // the `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") +) + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming +// to the "aws.dynamodb.attribute_definitions" semantic conventions. It +// represents the JSON-serialized value of each item in the +// `AttributeDefinitions` request field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// Semantic conventions to apply when instrumenting the GraphQL implementation. +// They map GraphQL operations to attributes on a Span. +const ( + // GraphqlOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of + // the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphqlOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of + // the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") + + // GraphqlDocumentKey is the attribute Key conforming to the + // "graphql.document" semantic conventions. It represents the GraphQL + // document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// GraphqlOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphqlOperationName(val string) attribute.KeyValue { + return GraphqlOperationNameKey.String(val) +} + +// GraphqlDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphqlDocument(val string) attribute.KeyValue { + return GraphqlDocumentKey.String(val) +} + +// Semantic convention describing per-message attributes populated on messaging +// spans or links. +const ( + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used + // by the messaging system as an identifier for the message, represented as + // a string. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents + // the [conversation ID](#conversations) identifying the conversation to + // which the message belongs, represented as a string. Sometimes called + // "Correlation ID". + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyConversationID' + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to + // the "messaging.message.payload_size_bytes" semantic conventions. It + // represents the (uncompressed) size of the message payload in bytes. Also + // use this attribute if it is unknown whether the compressed or + // uncompressed payload size is reported. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2738 + MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes") + + // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key + // conforming to the "messaging.message.payload_compressed_size_bytes" + // semantic conventions. It represents the compressed size of the message + // payload in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2048 + MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes") +) + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by +// the messaging system as an identifier for the message, represented as a +// string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming +// to the "messaging.message.conversation_id" semantic conventions. It +// represents the [conversation ID](#conversations) identifying the +// conversation to which the message belongs, represented as a string. +// Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming +// to the "messaging.message.payload_size_bytes" semantic conventions. It +// represents the (uncompressed) size of the message payload in bytes. Also use +// this attribute if it is unknown whether the compressed or uncompressed +// payload size is reported. +func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadSizeBytesKey.Int(val) +} + +// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue +// conforming to the "messaging.message.payload_compressed_size_bytes" semantic +// conventions. It represents the compressed size of the message payload in +// bytes. +func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadCompressedSizeBytesKey.Int(val) +} + +// Semantic convention for attributes that describe messaging destination on +// broker +const ( + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the + // message destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic + // or other entity within the broker. If + // the broker does not have such notion, the destination name SHOULD + // uniquely identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationKindKey is the attribute Key conforming to the + // "messaging.destination.kind" semantic conventions. It represents the + // kind of message destination + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationKindKey = attribute.Key("messaging.destination.kind") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the + // low cardinality representation of the messaging destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example + // would be a destination name involving a user name or product id. + // Although the destination name in this case is of high cardinality, the + // underlying template is of low cardinality and can be effectively used + // for grouping and aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might + // not exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") +) + +var ( + // A message sent to a queue + MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") + // A message sent to a topic + MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") +) + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to +// the "messaging.destination.template" semantic conventions. It represents the +// low cardinality representation of the messaging destination name +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to +// the "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to +// the "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be +// unnamed or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// Semantic convention for attributes that describe messaging source on broker +const ( + // MessagingSourceNameKey is the attribute Key conforming to the + // "messaging.source.name" semantic conventions. It represents the message + // source name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Source name SHOULD uniquely identify a specific queue, topic, or + // other entity within the broker. If + // the broker does not have such notion, the source name SHOULD uniquely + // identify the broker. + MessagingSourceNameKey = attribute.Key("messaging.source.name") + + // MessagingSourceKindKey is the attribute Key conforming to the + // "messaging.source.kind" semantic conventions. It represents the kind of + // message source + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingSourceKindKey = attribute.Key("messaging.source.kind") + + // MessagingSourceTemplateKey is the attribute Key conforming to the + // "messaging.source.template" semantic conventions. It represents the low + // cardinality representation of the messaging source name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Source names could be constructed from templates. An example would + // be a source name involving a user name or product id. Although the + // source name in this case is of high cardinality, the underlying template + // is of low cardinality and can be effectively used for grouping and + // aggregation. + MessagingSourceTemplateKey = attribute.Key("messaging.source.template") + + // MessagingSourceTemporaryKey is the attribute Key conforming to the + // "messaging.source.temporary" semantic conventions. It represents a + // boolean that is true if the message source is temporary and might not + // exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary") + + // MessagingSourceAnonymousKey is the attribute Key conforming to the + // "messaging.source.anonymous" semantic conventions. It represents a + // boolean that is true if the message source is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous") +) + +var ( + // A message received from a queue + MessagingSourceKindQueue = MessagingSourceKindKey.String("queue") + // A message received from a topic + MessagingSourceKindTopic = MessagingSourceKindKey.String("topic") +) + +// MessagingSourceName returns an attribute KeyValue conforming to the +// "messaging.source.name" semantic conventions. It represents the message +// source name +func MessagingSourceName(val string) attribute.KeyValue { + return MessagingSourceNameKey.String(val) +} + +// MessagingSourceTemplate returns an attribute KeyValue conforming to the +// "messaging.source.template" semantic conventions. It represents the low +// cardinality representation of the messaging source name +func MessagingSourceTemplate(val string) attribute.KeyValue { + return MessagingSourceTemplateKey.String(val) +} + +// MessagingSourceTemporary returns an attribute KeyValue conforming to the +// "messaging.source.temporary" semantic conventions. It represents a boolean +// that is true if the message source is temporary and might not exist anymore +// after messages are processed. +func MessagingSourceTemporary(val bool) attribute.KeyValue { + return MessagingSourceTemporaryKey.Bool(val) +} + +// MessagingSourceAnonymous returns an attribute KeyValue conforming to the +// "messaging.source.anonymous" semantic conventions. It represents a boolean +// that is true if the message source is anonymous (could be unnamed or have +// auto-generated name). +func MessagingSourceAnonymous(val bool) attribute.KeyValue { + return MessagingSourceAnonymousKey.Bool(val) +} + +// General attributes used in messaging systems. +const ( + // MessagingSystemKey is the attribute Key conforming to the + // "messaging.system" semantic conventions. It represents a string + // identifying the messaging system. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' + MessagingSystemKey = attribute.Key("messaging.system") + + // MessagingOperationKey is the attribute Key conforming to the + // "messaging.operation" semantic conventions. It represents a string + // identifying the kind of messaging operation as defined in the [Operation + // names](#operation-names) section above. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationKey = attribute.Key("messaging.operation") + + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the span describes an + // operation on a batch of messages.) + // Stability: stable + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client + // library supports both batch and single-message API for the same + // operation, instrumentations SHOULD use `messaging.batch.message_count` + // for batching APIs and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") +) + +var ( + // publish + MessagingOperationPublish = MessagingOperationKey.String("publish") + // receive + MessagingOperationReceive = MessagingOperationKey.String("receive") + // process + MessagingOperationProcess = MessagingOperationKey.String("process") +) + +// MessagingSystem returns an attribute KeyValue conforming to the +// "messaging.system" semantic conventions. It represents a string identifying +// the messaging system. +func MessagingSystem(val string) attribute.KeyValue { + return MessagingSystemKey.String(val) +} + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to +// the "messaging.batch.message_count" semantic conventions. It represents the +// number of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// Semantic convention for a consumer of messages received from a messaging +// system +const ( + // MessagingConsumerIDKey is the attribute Key conforming to the + // "messaging.consumer.id" semantic conventions. It represents the + // identifier for the consumer receiving a message. For Kafka, set it to + // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if + // both are present, or only `messaging.kafka.consumer.group`. For brokers, + // such as RabbitMQ and Artemis, set it to the `client_id` of the client + // consuming the message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mygroup - client-6' + MessagingConsumerIDKey = attribute.Key("messaging.consumer.id") +) + +// MessagingConsumerID returns an attribute KeyValue conforming to the +// "messaging.consumer.id" semantic conventions. It represents the identifier +// for the consumer receiving a message. For Kafka, set it to +// `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both +// are present, or only `messaging.kafka.consumer.group`. For brokers, such as +// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the +// message. +func MessagingConsumerID(val string) attribute.KeyValue { + return MessagingConsumerIDKey.String(val) +} + +// Attributes for RabbitMQ +const ( + // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key + // conforming to the "messaging.rabbitmq.destination.routing_key" semantic + // conventions. It represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If not empty.) + // Stability: stable + // Examples: 'myKey' + MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") +) + +// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitmqDestinationRoutingKeyKey.String(val) +} + +// Attributes for Apache Kafka +const ( + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the + // message keys in Kafka are used for grouping alike messages to ensure + // they're processed on the same partition. They differ from + // `messaging.message.id` in that they're not unique. If the key is `null`, + // the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to + // be supplied for the attribute. If the key has no unambiguous, canonical + // string form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the + // "messaging.kafka.consumer.group" semantic conventions. It represents the + // name of the Kafka Consumer Group that is handling the message. Only + // applies to consumers, not producers. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") + + // MessagingKafkaClientIDKey is the attribute Key conforming to the + // "messaging.kafka.client_id" semantic conventions. It represents the + // client ID for the Consumer or Producer that is handling the message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'client-5' + MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") + + // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to + // the "messaging.kafka.destination.partition" semantic conventions. It + // represents the partition the message is sent to. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") + + // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the + // "messaging.kafka.source.partition" semantic conventions. It represents + // the partition the message is received from. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition") + + // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the + // "messaging.kafka.message.offset" semantic conventions. It represents the + // offset of a record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents + // a boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: ConditionallyRequired (If value is `true`. When + // missing, the value is assumed to be `false`.) + // Stability: stable + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") +) + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the +// message keys in Kafka are used for grouping alike messages to ensure they're +// processed on the same partition. They differ from `messaging.message.id` in +// that they're not unique. If the key is `null`, the attribute MUST NOT be +// set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to +// the "messaging.kafka.consumer.group" semantic conventions. It represents the +// name of the Kafka Consumer Group that is handling the message. Only applies +// to consumers, not producers. +func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { + return MessagingKafkaConsumerGroupKey.String(val) +} + +// MessagingKafkaClientID returns an attribute KeyValue conforming to the +// "messaging.kafka.client_id" semantic conventions. It represents the client +// ID for the Consumer or Producer that is handling the message. +func MessagingKafkaClientID(val string) attribute.KeyValue { + return MessagingKafkaClientIDKey.String(val) +} + +// MessagingKafkaDestinationPartition returns an attribute KeyValue +// conforming to the "messaging.kafka.destination.partition" semantic +// conventions. It represents the partition the message is sent to. +func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { + return MessagingKafkaDestinationPartitionKey.Int(val) +} + +// MessagingKafkaSourcePartition returns an attribute KeyValue conforming to +// the "messaging.kafka.source.partition" semantic conventions. It represents +// the partition the message is received from. +func MessagingKafkaSourcePartition(val int) attribute.KeyValue { + return MessagingKafkaSourcePartitionKey.Int(val) +} + +// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to +// the "messaging.kafka.message.offset" semantic conventions. It represents the +// offset of a record in the corresponding Kafka partition. +func MessagingKafkaMessageOffset(val int) attribute.KeyValue { + return MessagingKafkaMessageOffsetKey.Int(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming +// to the "messaging.kafka.message.tombstone" semantic conventions. It +// represents a boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// Attributes for Apache RocketMQ +const ( + // MessagingRocketmqNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + + // MessagingRocketmqClientGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.client_group" semantic conventions. It represents + // the name of the RocketMQ producer/consumer group that is handling the + // message. The client type is identified by the SpanKind. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + + // MessagingRocketmqClientIDKey is the attribute Key conforming to the + // "messaging.rocketmq.client_id" semantic conventions. It represents the + // unique identifier for each client. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myhost@8742@s8083jm' + MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") + + // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delivery_timestamp" + // semantic conventions. It represents the timestamp in milliseconds that + // the delay message is expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delay time level is not specified.) + // Stability: stable + // Examples: 1665987217045 + MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delay_time_level" semantic + // conventions. It represents the delay time level for delay message, which + // determines the message delay time. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delivery timestamp is not specified.) + // Stability: stable + // Examples: 3 + MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents + // the it is essential for FIFO message. Messages that belong to the same + // message group are always processed one by one within the same consumer + // group. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If the message type is FIFO.) + // Stability: stable + // Examples: 'myMessageGroup' + MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents + // the type of message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketmqMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents + // the key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to + // the "messaging.rocketmq.consumption_model" semantic conventions. It + // represents the model of message consumption. This only applies to + // consumer spans. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +// MessagingRocketmqNamespace returns an attribute KeyValue conforming to +// the "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketmqNamespace(val string) attribute.KeyValue { + return MessagingRocketmqNamespaceKey.String(val) +} + +// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.client_group" semantic conventions. It represents +// the name of the RocketMQ producer/consumer group that is handling the +// message. The client type is identified by the SpanKind. +func MessagingRocketmqClientGroup(val string) attribute.KeyValue { + return MessagingRocketmqClientGroupKey.String(val) +} + +// MessagingRocketmqClientID returns an attribute KeyValue conforming to the +// "messaging.rocketmq.client_id" semantic conventions. It represents the +// unique identifier for each client. +func MessagingRocketmqClientID(val string) attribute.KeyValue { + return MessagingRocketmqClientIDKey.String(val) +} + +// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.group" semantic conventions. It represents +// the it is essential for FIFO message. Messages that belong to the same +// message group are always processed one by one within the same consumer +// group. +func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { + return MessagingRocketmqMessageGroupKey.String(val) +} + +// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketmqMessageTag(val string) attribute.KeyValue { + return MessagingRocketmqMessageTagKey.String(val) +} + +// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.keys" semantic conventions. It represents +// the key(s) of message, another way to mark message besides message id. +func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketmqMessageKeysKey.StringSlice(val) +} + +// Semantic conventions for remote procedure calls. +const ( + // RPCSystemKey is the attribute Key conforming to the "rpc.system" + // semantic conventions. It represents a string identifying the remoting + // system. See below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCSystemKey = attribute.Key("rpc.system") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" + // semantic conventions. It represents the full (logical) name of the + // service being called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing + // class. The `code.namespace` attribute may be used to store the latter + // (despite the attribute name, it may include a class name; e.g., class + // with method actually executing the call on the server side, RPC client + // stub class on the client side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" + // semantic conventions. It represents the name of the (logical) method + // being called, must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") +) + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// Tech-specific attributes for gRPC. +const ( + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the [numeric + // status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of + // the gRPC request. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). +const ( + // RPCJsonrpcVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // does not specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If other than the default + // version (`1.0`)) + // Stability: stable + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCJsonrpcRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be + // cast to string for simplicity. Use empty string in case of `null` value. + // Omit entirely if this is a notification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the + // `error.code` property of response if it is an error response. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If response is not successful.) + // Stability: stable + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") +) + +// RPCJsonrpcVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol +// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 +// does not specify this, the value can be omitted. +func RPCJsonrpcVersion(val string) attribute.KeyValue { + return RPCJsonrpcVersionKey.String(val) +} + +// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` +// property of request or response. Since protocol allows id to be int, string, +// `null` or missing (for notifications), value is expected to be cast to +// string for simplicity. Use empty string in case of `null` value. Omit +// entirely if this is a notification. +func RPCJsonrpcRequestID(val string) attribute.KeyValue { + return RPCJsonrpcRequestIDKey.String(val) +} + +// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the +// `error.code` property of response if it is an error response. +func RPCJsonrpcErrorCode(val int) attribute.KeyValue { + return RPCJsonrpcErrorCodeKey.Int(val) +} + +// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { + return RPCJsonrpcErrorMessageKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/trace.go b/vendor/go.opentelemetry.io/otel/trace.go new file mode 100644 index 00000000..caf7249d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace.go @@ -0,0 +1,47 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +import ( + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/trace" +) + +// Tracer creates a named tracer that implements Tracer interface. +// If the name is an empty string then provider uses default name. +// +// This is short for GetTracerProvider().Tracer(name, opts...) +func Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + return GetTracerProvider().Tracer(name, opts...) +} + +// GetTracerProvider returns the registered global trace provider. +// If none is registered then an instance of NoopTracerProvider is returned. +// +// Use the trace provider to create a named tracer. E.g. +// +// tracer := otel.GetTracerProvider().Tracer("example.com/foo") +// +// or +// +// tracer := otel.Tracer("example.com/foo") +func GetTracerProvider() trace.TracerProvider { + return global.TracerProvider() +} + +// SetTracerProvider registers `tp` as the global trace provider. +func SetTracerProvider(tp trace.TracerProvider) { + global.SetTracerProvider(tp) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/LICENSE b/vendor/go.opentelemetry.io/otel/trace/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go new file mode 100644 index 00000000..cb3efbb9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -0,0 +1,333 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// TracerConfig is a group of options for a Tracer. +type TracerConfig struct { + instrumentationVersion string + // Schema URL of the telemetry emitted by the Tracer. + schemaURL string + attrs attribute.Set +} + +// InstrumentationVersion returns the version of the library providing instrumentation. +func (t *TracerConfig) InstrumentationVersion() string { + return t.instrumentationVersion +} + +// InstrumentationAttributes returns the attributes associated with the library +// providing instrumentation. +func (t *TracerConfig) InstrumentationAttributes() attribute.Set { + return t.attrs +} + +// SchemaURL returns the Schema URL of the telemetry emitted by the Tracer. +func (t *TracerConfig) SchemaURL() string { + return t.schemaURL +} + +// NewTracerConfig applies all the options to a returned TracerConfig. +func NewTracerConfig(options ...TracerOption) TracerConfig { + var config TracerConfig + for _, option := range options { + config = option.apply(config) + } + return config +} + +// TracerOption applies an option to a TracerConfig. +type TracerOption interface { + apply(TracerConfig) TracerConfig +} + +type tracerOptionFunc func(TracerConfig) TracerConfig + +func (fn tracerOptionFunc) apply(cfg TracerConfig) TracerConfig { + return fn(cfg) +} + +// SpanConfig is a group of options for a Span. +type SpanConfig struct { + attributes []attribute.KeyValue + timestamp time.Time + links []Link + newRoot bool + spanKind SpanKind + stackTrace bool +} + +// Attributes describe the associated qualities of a Span. +func (cfg *SpanConfig) Attributes() []attribute.KeyValue { + return cfg.attributes +} + +// Timestamp is a time in a Span life-cycle. +func (cfg *SpanConfig) Timestamp() time.Time { + return cfg.timestamp +} + +// StackTrace checks whether stack trace capturing is enabled. +func (cfg *SpanConfig) StackTrace() bool { + return cfg.stackTrace +} + +// Links are the associations a Span has with other Spans. +func (cfg *SpanConfig) Links() []Link { + return cfg.links +} + +// NewRoot identifies a Span as the root Span for a new trace. This is +// commonly used when an existing trace crosses trust boundaries and the +// remote parent span context should be ignored for security. +func (cfg *SpanConfig) NewRoot() bool { + return cfg.newRoot +} + +// SpanKind is the role a Span has in a trace. +func (cfg *SpanConfig) SpanKind() SpanKind { + return cfg.spanKind +} + +// NewSpanStartConfig applies all the options to a returned SpanConfig. +// No validation is performed on the returned SpanConfig (e.g. no uniqueness +// checking or bounding of data), it is left to the SDK to perform this +// action. +func NewSpanStartConfig(options ...SpanStartOption) SpanConfig { + var c SpanConfig + for _, option := range options { + c = option.applySpanStart(c) + } + return c +} + +// NewSpanEndConfig applies all the options to a returned SpanConfig. +// No validation is performed on the returned SpanConfig (e.g. no uniqueness +// checking or bounding of data), it is left to the SDK to perform this +// action. +func NewSpanEndConfig(options ...SpanEndOption) SpanConfig { + var c SpanConfig + for _, option := range options { + c = option.applySpanEnd(c) + } + return c +} + +// SpanStartOption applies an option to a SpanConfig. These options are applicable +// only when the span is created. +type SpanStartOption interface { + applySpanStart(SpanConfig) SpanConfig +} + +type spanOptionFunc func(SpanConfig) SpanConfig + +func (fn spanOptionFunc) applySpanStart(cfg SpanConfig) SpanConfig { + return fn(cfg) +} + +// SpanEndOption applies an option to a SpanConfig. These options are +// applicable only when the span is ended. +type SpanEndOption interface { + applySpanEnd(SpanConfig) SpanConfig +} + +// EventConfig is a group of options for an Event. +type EventConfig struct { + attributes []attribute.KeyValue + timestamp time.Time + stackTrace bool +} + +// Attributes describe the associated qualities of an Event. +func (cfg *EventConfig) Attributes() []attribute.KeyValue { + return cfg.attributes +} + +// Timestamp is a time in an Event life-cycle. +func (cfg *EventConfig) Timestamp() time.Time { + return cfg.timestamp +} + +// StackTrace checks whether stack trace capturing is enabled. +func (cfg *EventConfig) StackTrace() bool { + return cfg.stackTrace +} + +// NewEventConfig applies all the EventOptions to a returned EventConfig. If no +// timestamp option is passed, the returned EventConfig will have a Timestamp +// set to the call time, otherwise no validation is performed on the returned +// EventConfig. +func NewEventConfig(options ...EventOption) EventConfig { + var c EventConfig + for _, option := range options { + c = option.applyEvent(c) + } + if c.timestamp.IsZero() { + c.timestamp = time.Now() + } + return c +} + +// EventOption applies span event options to an EventConfig. +type EventOption interface { + applyEvent(EventConfig) EventConfig +} + +// SpanOption are options that can be used at both the beginning and end of a span. +type SpanOption interface { + SpanStartOption + SpanEndOption +} + +// SpanStartEventOption are options that can be used at the start of a span, or with an event. +type SpanStartEventOption interface { + SpanStartOption + EventOption +} + +// SpanEndEventOption are options that can be used at the end of a span, or with an event. +type SpanEndEventOption interface { + SpanEndOption + EventOption +} + +type attributeOption []attribute.KeyValue + +func (o attributeOption) applySpan(c SpanConfig) SpanConfig { + c.attributes = append(c.attributes, []attribute.KeyValue(o)...) + return c +} +func (o attributeOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) } +func (o attributeOption) applyEvent(c EventConfig) EventConfig { + c.attributes = append(c.attributes, []attribute.KeyValue(o)...) + return c +} + +var _ SpanStartEventOption = attributeOption{} + +// WithAttributes adds the attributes related to a span life-cycle event. +// These attributes are used to describe the work a Span represents when this +// option is provided to a Span's start or end events. Otherwise, these +// attributes provide additional information about the event being recorded +// (e.g. error, state change, processing progress, system event). +// +// If multiple of these options are passed the attributes of each successive +// option will extend the attributes instead of overwriting. There is no +// guarantee of uniqueness in the resulting attributes. +func WithAttributes(attributes ...attribute.KeyValue) SpanStartEventOption { + return attributeOption(attributes) +} + +// SpanEventOption are options that can be used with an event or a span. +type SpanEventOption interface { + SpanOption + EventOption +} + +type timestampOption time.Time + +func (o timestampOption) applySpan(c SpanConfig) SpanConfig { + c.timestamp = time.Time(o) + return c +} +func (o timestampOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) } +func (o timestampOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) } +func (o timestampOption) applyEvent(c EventConfig) EventConfig { + c.timestamp = time.Time(o) + return c +} + +var _ SpanEventOption = timestampOption{} + +// WithTimestamp sets the time of a Span or Event life-cycle moment (e.g. +// started, stopped, errored). +func WithTimestamp(t time.Time) SpanEventOption { + return timestampOption(t) +} + +type stackTraceOption bool + +func (o stackTraceOption) applyEvent(c EventConfig) EventConfig { + c.stackTrace = bool(o) + return c +} +func (o stackTraceOption) applySpan(c SpanConfig) SpanConfig { + c.stackTrace = bool(o) + return c +} +func (o stackTraceOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) } + +// WithStackTrace sets the flag to capture the error with stack trace (e.g. true, false). +func WithStackTrace(b bool) SpanEndEventOption { + return stackTraceOption(b) +} + +// WithLinks adds links to a Span. The links are added to the existing Span +// links, i.e. this does not overwrite. Links with invalid span context are ignored. +func WithLinks(links ...Link) SpanStartOption { + return spanOptionFunc(func(cfg SpanConfig) SpanConfig { + cfg.links = append(cfg.links, links...) + return cfg + }) +} + +// WithNewRoot specifies that the Span should be treated as a root Span. Any +// existing parent span context will be ignored when defining the Span's trace +// identifiers. +func WithNewRoot() SpanStartOption { + return spanOptionFunc(func(cfg SpanConfig) SpanConfig { + cfg.newRoot = true + return cfg + }) +} + +// WithSpanKind sets the SpanKind of a Span. +func WithSpanKind(kind SpanKind) SpanStartOption { + return spanOptionFunc(func(cfg SpanConfig) SpanConfig { + cfg.spanKind = kind + return cfg + }) +} + +// WithInstrumentationVersion sets the instrumentation version. +func WithInstrumentationVersion(version string) TracerOption { + return tracerOptionFunc(func(cfg TracerConfig) TracerConfig { + cfg.instrumentationVersion = version + return cfg + }) +} + +// WithInstrumentationAttributes sets the instrumentation attributes. +// +// The passed attributes will be de-duplicated. +func WithInstrumentationAttributes(attr ...attribute.KeyValue) TracerOption { + return tracerOptionFunc(func(config TracerConfig) TracerConfig { + config.attrs = attribute.NewSet(attr...) + return config + }) +} + +// WithSchemaURL sets the schema URL for the Tracer. +func WithSchemaURL(schemaURL string) TracerOption { + return tracerOptionFunc(func(cfg TracerConfig) TracerConfig { + cfg.schemaURL = schemaURL + return cfg + }) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/context.go b/vendor/go.opentelemetry.io/otel/trace/context.go new file mode 100644 index 00000000..76f9a083 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/context.go @@ -0,0 +1,61 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +import "context" + +type traceContextKeyType int + +const currentSpanKey traceContextKeyType = iota + +// ContextWithSpan returns a copy of parent with span set as the current Span. +func ContextWithSpan(parent context.Context, span Span) context.Context { + return context.WithValue(parent, currentSpanKey, span) +} + +// ContextWithSpanContext returns a copy of parent with sc as the current +// Span. The Span implementation that wraps sc is non-recording and performs +// no operations other than to return sc as the SpanContext from the +// SpanContext method. +func ContextWithSpanContext(parent context.Context, sc SpanContext) context.Context { + return ContextWithSpan(parent, nonRecordingSpan{sc: sc}) +} + +// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicly +// as a remote SpanContext and as the current Span. The Span implementation +// that wraps rsc is non-recording and performs no operations other than to +// return rsc as the SpanContext from the SpanContext method. +func ContextWithRemoteSpanContext(parent context.Context, rsc SpanContext) context.Context { + return ContextWithSpanContext(parent, rsc.WithRemote(true)) +} + +// SpanFromContext returns the current Span from ctx. +// +// If no Span is currently set in ctx an implementation of a Span that +// performs no operations is returned. +func SpanFromContext(ctx context.Context) Span { + if ctx == nil { + return noopSpan{} + } + if span, ok := ctx.Value(currentSpanKey).(Span); ok { + return span + } + return noopSpan{} +} + +// SpanContextFromContext returns the current Span's SpanContext. +func SpanContextFromContext(ctx context.Context) SpanContext { + return SpanFromContext(ctx).SpanContext() +} diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go new file mode 100644 index 00000000..ab0346f9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/doc.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package trace provides an implementation of the tracing part of the +OpenTelemetry API. + +To participate in distributed traces a Span needs to be created for the +operation being performed as part of a traced workflow. In its simplest form: + + var tracer trace.Tracer + + func init() { + tracer = otel.Tracer("instrumentation/package/name") + } + + func operation(ctx context.Context) { + var span trace.Span + ctx, span = tracer.Start(ctx, "operation") + defer span.End() + // ... + } + +A Tracer is unique to the instrumentation and is used to create Spans. +Instrumentation should be designed to accept a TracerProvider from which it +can create its own unique Tracer. Alternatively, the registered global +TracerProvider from the go.opentelemetry.io/otel package can be used as +a default. + + const ( + name = "instrumentation/package/name" + version = "0.1.0" + ) + + type Instrumentation struct { + tracer trace.Tracer + } + + func NewInstrumentation(tp trace.TracerProvider) *Instrumentation { + if tp == nil { + tp = otel.TracerProvider() + } + return &Instrumentation{ + tracer: tp.Tracer(name, trace.WithInstrumentationVersion(version)), + } + } + + func operation(ctx context.Context, inst *Instrumentation) { + var span trace.Span + ctx, span = inst.tracer.Start(ctx, "operation") + defer span.End() + // ... + } +*/ +package trace // import "go.opentelemetry.io/otel/trace" diff --git a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go new file mode 100644 index 00000000..88fcb816 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go @@ -0,0 +1,27 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +// nonRecordingSpan is a minimal implementation of a Span that wraps a +// SpanContext. It performs no operations other than to return the wrapped +// SpanContext. +type nonRecordingSpan struct { + noopSpan + + sc SpanContext +} + +// SpanContext returns the wrapped SpanContext. +func (s nonRecordingSpan) SpanContext() SpanContext { return s.sc } diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go new file mode 100644 index 00000000..7cf6c7f3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/noop.go @@ -0,0 +1,89 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" +) + +// NewNoopTracerProvider returns an implementation of TracerProvider that +// performs no operations. The Tracer and Spans created from the returned +// TracerProvider also perform no operations. +func NewNoopTracerProvider() TracerProvider { + return noopTracerProvider{} +} + +type noopTracerProvider struct{} + +var _ TracerProvider = noopTracerProvider{} + +// Tracer returns noop implementation of Tracer. +func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer { + return noopTracer{} +} + +// noopTracer is an implementation of Tracer that performs no operations. +type noopTracer struct{} + +var _ Tracer = noopTracer{} + +// Start carries forward a non-recording Span, if one is present in the context, otherwise it +// creates a no-op Span. +func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption) (context.Context, Span) { + span := SpanFromContext(ctx) + if _, ok := span.(nonRecordingSpan); !ok { + // span is likely already a noopSpan, but let's be sure + span = noopSpan{} + } + return ContextWithSpan(ctx, span), span +} + +// noopSpan is an implementation of Span that performs no operations. +type noopSpan struct{} + +var _ Span = noopSpan{} + +// SpanContext returns an empty span context. +func (noopSpan) SpanContext() SpanContext { return SpanContext{} } + +// IsRecording always returns false. +func (noopSpan) IsRecording() bool { return false } + +// SetStatus does nothing. +func (noopSpan) SetStatus(codes.Code, string) {} + +// SetError does nothing. +func (noopSpan) SetError(bool) {} + +// SetAttributes does nothing. +func (noopSpan) SetAttributes(...attribute.KeyValue) {} + +// End does nothing. +func (noopSpan) End(...SpanEndOption) {} + +// RecordError does nothing. +func (noopSpan) RecordError(error, ...EventOption) {} + +// AddEvent does nothing. +func (noopSpan) AddEvent(string, ...EventOption) {} + +// SetName does nothing. +func (noopSpan) SetName(string) {} + +// TracerProvider returns a no-op TracerProvider. +func (noopSpan) TracerProvider() TracerProvider { return noopTracerProvider{} } diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go new file mode 100644 index 00000000..4aa94f79 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -0,0 +1,551 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "bytes" + "context" + "encoding/hex" + "encoding/json" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" +) + +const ( + // FlagsSampled is a bitmask with the sampled bit set. A SpanContext + // with the sampling bit set means the span is sampled. + FlagsSampled = TraceFlags(0x01) + + errInvalidHexID errorConst = "trace-id and span-id can only contain [0-9a-f] characters, all lowercase" + + errInvalidTraceIDLength errorConst = "hex encoded trace-id must have length equals to 32" + errNilTraceID errorConst = "trace-id can't be all zero" + + errInvalidSpanIDLength errorConst = "hex encoded span-id must have length equals to 16" + errNilSpanID errorConst = "span-id can't be all zero" +) + +type errorConst string + +func (e errorConst) Error() string { + return string(e) +} + +// TraceID is a unique identity of a trace. +// nolint:revive // revive complains about stutter of `trace.TraceID`. +type TraceID [16]byte + +var nilTraceID TraceID +var _ json.Marshaler = nilTraceID + +// IsValid checks whether the trace TraceID is valid. A valid trace ID does +// not consist of zeros only. +func (t TraceID) IsValid() bool { + return !bytes.Equal(t[:], nilTraceID[:]) +} + +// MarshalJSON implements a custom marshal function to encode TraceID +// as a hex string. +func (t TraceID) MarshalJSON() ([]byte, error) { + return json.Marshal(t.String()) +} + +// String returns the hex string representation form of a TraceID. +func (t TraceID) String() string { + return hex.EncodeToString(t[:]) +} + +// SpanID is a unique identity of a span in a trace. +type SpanID [8]byte + +var nilSpanID SpanID +var _ json.Marshaler = nilSpanID + +// IsValid checks whether the SpanID is valid. A valid SpanID does not consist +// of zeros only. +func (s SpanID) IsValid() bool { + return !bytes.Equal(s[:], nilSpanID[:]) +} + +// MarshalJSON implements a custom marshal function to encode SpanID +// as a hex string. +func (s SpanID) MarshalJSON() ([]byte, error) { + return json.Marshal(s.String()) +} + +// String returns the hex string representation form of a SpanID. +func (s SpanID) String() string { + return hex.EncodeToString(s[:]) +} + +// TraceIDFromHex returns a TraceID from a hex string if it is compliant with +// the W3C trace-context specification. See more at +// https://www.w3.org/TR/trace-context/#trace-id +// nolint:revive // revive complains about stutter of `trace.TraceIDFromHex`. +func TraceIDFromHex(h string) (TraceID, error) { + t := TraceID{} + if len(h) != 32 { + return t, errInvalidTraceIDLength + } + + if err := decodeHex(h, t[:]); err != nil { + return t, err + } + + if !t.IsValid() { + return t, errNilTraceID + } + return t, nil +} + +// SpanIDFromHex returns a SpanID from a hex string if it is compliant +// with the w3c trace-context specification. +// See more at https://www.w3.org/TR/trace-context/#parent-id +func SpanIDFromHex(h string) (SpanID, error) { + s := SpanID{} + if len(h) != 16 { + return s, errInvalidSpanIDLength + } + + if err := decodeHex(h, s[:]); err != nil { + return s, err + } + + if !s.IsValid() { + return s, errNilSpanID + } + return s, nil +} + +func decodeHex(h string, b []byte) error { + for _, r := range h { + switch { + case 'a' <= r && r <= 'f': + continue + case '0' <= r && r <= '9': + continue + default: + return errInvalidHexID + } + } + + decoded, err := hex.DecodeString(h) + if err != nil { + return err + } + + copy(b, decoded) + return nil +} + +// TraceFlags contains flags that can be set on a SpanContext. +type TraceFlags byte //nolint:revive // revive complains about stutter of `trace.TraceFlags`. + +// IsSampled returns if the sampling bit is set in the TraceFlags. +func (tf TraceFlags) IsSampled() bool { + return tf&FlagsSampled == FlagsSampled +} + +// WithSampled sets the sampling bit in a new copy of the TraceFlags. +func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { // nolint:revive // sampled is not a control flag. + if sampled { + return tf | FlagsSampled + } + + return tf &^ FlagsSampled +} + +// MarshalJSON implements a custom marshal function to encode TraceFlags +// as a hex string. +func (tf TraceFlags) MarshalJSON() ([]byte, error) { + return json.Marshal(tf.String()) +} + +// String returns the hex string representation form of TraceFlags. +func (tf TraceFlags) String() string { + return hex.EncodeToString([]byte{byte(tf)}[:]) +} + +// SpanContextConfig contains mutable fields usable for constructing +// an immutable SpanContext. +type SpanContextConfig struct { + TraceID TraceID + SpanID SpanID + TraceFlags TraceFlags + TraceState TraceState + Remote bool +} + +// NewSpanContext constructs a SpanContext using values from the provided +// SpanContextConfig. +func NewSpanContext(config SpanContextConfig) SpanContext { + return SpanContext{ + traceID: config.TraceID, + spanID: config.SpanID, + traceFlags: config.TraceFlags, + traceState: config.TraceState, + remote: config.Remote, + } +} + +// SpanContext contains identifying trace information about a Span. +type SpanContext struct { + traceID TraceID + spanID SpanID + traceFlags TraceFlags + traceState TraceState + remote bool +} + +var _ json.Marshaler = SpanContext{} + +// IsValid returns if the SpanContext is valid. A valid span context has a +// valid TraceID and SpanID. +func (sc SpanContext) IsValid() bool { + return sc.HasTraceID() && sc.HasSpanID() +} + +// IsRemote indicates whether the SpanContext represents a remotely-created Span. +func (sc SpanContext) IsRemote() bool { + return sc.remote +} + +// WithRemote returns a copy of sc with the Remote property set to remote. +func (sc SpanContext) WithRemote(remote bool) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: sc.spanID, + traceFlags: sc.traceFlags, + traceState: sc.traceState, + remote: remote, + } +} + +// TraceID returns the TraceID from the SpanContext. +func (sc SpanContext) TraceID() TraceID { + return sc.traceID +} + +// HasTraceID checks if the SpanContext has a valid TraceID. +func (sc SpanContext) HasTraceID() bool { + return sc.traceID.IsValid() +} + +// WithTraceID returns a new SpanContext with the TraceID replaced. +func (sc SpanContext) WithTraceID(traceID TraceID) SpanContext { + return SpanContext{ + traceID: traceID, + spanID: sc.spanID, + traceFlags: sc.traceFlags, + traceState: sc.traceState, + remote: sc.remote, + } +} + +// SpanID returns the SpanID from the SpanContext. +func (sc SpanContext) SpanID() SpanID { + return sc.spanID +} + +// HasSpanID checks if the SpanContext has a valid SpanID. +func (sc SpanContext) HasSpanID() bool { + return sc.spanID.IsValid() +} + +// WithSpanID returns a new SpanContext with the SpanID replaced. +func (sc SpanContext) WithSpanID(spanID SpanID) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: spanID, + traceFlags: sc.traceFlags, + traceState: sc.traceState, + remote: sc.remote, + } +} + +// TraceFlags returns the flags from the SpanContext. +func (sc SpanContext) TraceFlags() TraceFlags { + return sc.traceFlags +} + +// IsSampled returns if the sampling bit is set in the SpanContext's TraceFlags. +func (sc SpanContext) IsSampled() bool { + return sc.traceFlags.IsSampled() +} + +// WithTraceFlags returns a new SpanContext with the TraceFlags replaced. +func (sc SpanContext) WithTraceFlags(flags TraceFlags) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: sc.spanID, + traceFlags: flags, + traceState: sc.traceState, + remote: sc.remote, + } +} + +// TraceState returns the TraceState from the SpanContext. +func (sc SpanContext) TraceState() TraceState { + return sc.traceState +} + +// WithTraceState returns a new SpanContext with the TraceState replaced. +func (sc SpanContext) WithTraceState(state TraceState) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: sc.spanID, + traceFlags: sc.traceFlags, + traceState: state, + remote: sc.remote, + } +} + +// Equal is a predicate that determines whether two SpanContext values are equal. +func (sc SpanContext) Equal(other SpanContext) bool { + return sc.traceID == other.traceID && + sc.spanID == other.spanID && + sc.traceFlags == other.traceFlags && + sc.traceState.String() == other.traceState.String() && + sc.remote == other.remote +} + +// MarshalJSON implements a custom marshal function to encode a SpanContext. +func (sc SpanContext) MarshalJSON() ([]byte, error) { + return json.Marshal(SpanContextConfig{ + TraceID: sc.traceID, + SpanID: sc.spanID, + TraceFlags: sc.traceFlags, + TraceState: sc.traceState, + Remote: sc.remote, + }) +} + +// Span is the individual component of a trace. It represents a single named +// and timed operation of a workflow that is traced. A Tracer is used to +// create a Span and it is then up to the operation the Span represents to +// properly end the Span when the operation itself ends. +// +// Warning: methods may be added to this interface in minor releases. +type Span interface { + // End completes the Span. The Span is considered complete and ready to be + // delivered through the rest of the telemetry pipeline after this method + // is called. Therefore, updates to the Span are not allowed after this + // method has been called. + End(options ...SpanEndOption) + + // AddEvent adds an event with the provided name and options. + AddEvent(name string, options ...EventOption) + + // IsRecording returns the recording state of the Span. It will return + // true if the Span is active and events can be recorded. + IsRecording() bool + + // RecordError will record err as an exception span event for this span. An + // additional call to SetStatus is required if the Status of the Span should + // be set to Error, as this method does not change the Span status. If this + // span is not being recorded or err is nil then this method does nothing. + RecordError(err error, options ...EventOption) + + // SpanContext returns the SpanContext of the Span. The returned SpanContext + // is usable even after the End method has been called for the Span. + SpanContext() SpanContext + + // SetStatus sets the status of the Span in the form of a code and a + // description, provided the status hasn't already been set to a higher + // value before (OK > Error > Unset). The description is only included in a + // status when the code is for an error. + SetStatus(code codes.Code, description string) + + // SetName sets the Span name. + SetName(name string) + + // SetAttributes sets kv as attributes of the Span. If a key from kv + // already exists for an attribute of the Span it will be overwritten with + // the value contained in kv. + SetAttributes(kv ...attribute.KeyValue) + + // TracerProvider returns a TracerProvider that can be used to generate + // additional Spans on the same telemetry pipeline as the current Span. + TracerProvider() TracerProvider +} + +// Link is the relationship between two Spans. The relationship can be within +// the same Trace or across different Traces. +// +// For example, a Link is used in the following situations: +// +// 1. Batch Processing: A batch of operations may contain operations +// associated with one or more traces/spans. Since there can only be one +// parent SpanContext, a Link is used to keep reference to the +// SpanContext of all operations in the batch. +// 2. Public Endpoint: A SpanContext for an in incoming client request on a +// public endpoint should be considered untrusted. In such a case, a new +// trace with its own identity and sampling decision needs to be created, +// but this new trace needs to be related to the original trace in some +// form. A Link is used to keep reference to the original SpanContext and +// track the relationship. +type Link struct { + // SpanContext of the linked Span. + SpanContext SpanContext + + // Attributes describe the aspects of the link. + Attributes []attribute.KeyValue +} + +// LinkFromContext returns a link encapsulating the SpanContext in the provided ctx. +func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { + return Link{ + SpanContext: SpanContextFromContext(ctx), + Attributes: attrs, + } +} + +// SpanKind is the role a Span plays in a Trace. +type SpanKind int + +// As a convenience, these match the proto definition, see +// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 +// +// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` +// to coerce a span kind to a valid value. +const ( + // SpanKindUnspecified is an unspecified SpanKind and is not a valid + // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal + // if it is received. + SpanKindUnspecified SpanKind = 0 + // SpanKindInternal is a SpanKind for a Span that represents an internal + // operation within an application. + SpanKindInternal SpanKind = 1 + // SpanKindServer is a SpanKind for a Span that represents the operation + // of handling a request from a client. + SpanKindServer SpanKind = 2 + // SpanKindClient is a SpanKind for a Span that represents the operation + // of client making a request to a server. + SpanKindClient SpanKind = 3 + // SpanKindProducer is a SpanKind for a Span that represents the operation + // of a producer sending a message to a message broker. Unlike + // SpanKindClient and SpanKindServer, there is often no direct + // relationship between this kind of Span and a SpanKindConsumer kind. A + // SpanKindProducer Span will end once the message is accepted by the + // message broker which might not overlap with the processing of that + // message. + SpanKindProducer SpanKind = 4 + // SpanKindConsumer is a SpanKind for a Span that represents the operation + // of a consumer receiving a message from a message broker. Like + // SpanKindProducer Spans, there is often no direct relationship between + // this Span and the Span that produced the message. + SpanKindConsumer SpanKind = 5 +) + +// ValidateSpanKind returns a valid span kind value. This will coerce +// invalid values into the default value, SpanKindInternal. +func ValidateSpanKind(spanKind SpanKind) SpanKind { + switch spanKind { + case SpanKindInternal, + SpanKindServer, + SpanKindClient, + SpanKindProducer, + SpanKindConsumer: + // valid + return spanKind + default: + return SpanKindInternal + } +} + +// String returns the specified name of the SpanKind in lower-case. +func (sk SpanKind) String() string { + switch sk { + case SpanKindInternal: + return "internal" + case SpanKindServer: + return "server" + case SpanKindClient: + return "client" + case SpanKindProducer: + return "producer" + case SpanKindConsumer: + return "consumer" + default: + return "unspecified" + } +} + +// Tracer is the creator of Spans. +// +// Warning: methods may be added to this interface in minor releases. +type Tracer interface { + // Start creates a span and a context.Context containing the newly-created span. + // + // If the context.Context provided in `ctx` contains a Span then the newly-created + // Span will be a child of that span, otherwise it will be a root span. This behavior + // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the + // newly-created Span to be a root span even if `ctx` contains a Span. + // + // When creating a Span it is recommended to provide all known span attributes using + // the `WithAttributes()` SpanOption as samplers will only have access to the + // attributes provided when a Span is created. + // + // Any Span that is created MUST also be ended. This is the responsibility of the user. + // Implementations of this API may leak memory or other resources if Spans are not ended. + Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) +} + +// TracerProvider provides Tracers that are used by instrumentation code to +// trace computational workflows. +// +// A TracerProvider is the collection destination of all Spans from Tracers it +// provides, it represents a unique telemetry collection pipeline. How that +// pipeline is defined, meaning how those Spans are collected, processed, and +// where they are exported, depends on its implementation. Instrumentation +// authors do not need to define this implementation, rather just use the +// provided Tracers to instrument code. +// +// Commonly, instrumentation code will accept a TracerProvider implementation +// at runtime from its users or it can simply use the globally registered one +// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). +// +// Warning: methods may be added to this interface in minor releases. +type TracerProvider interface { + // Tracer returns a unique Tracer scoped to be used by instrumentation code + // to trace computational workflows. The scope and identity of that + // instrumentation code is uniquely defined by the name and options passed. + // + // The passed name needs to uniquely identify instrumentation code. + // Therefore, it is recommended that name is the Go package name of the + // library providing instrumentation (note: not the code being + // instrumented). Instrumentation libraries can have multiple versions, + // therefore, the WithInstrumentationVersion option should be used to + // distinguish these different codebases. Additionally, instrumentation + // libraries may sometimes use traces to communicate different domains of + // workflow data (i.e. using spans to communicate workflow events only). If + // this is the case, the WithScopeAttributes option should be used to + // uniquely identify Tracers that handle the different domains of workflow + // data. + // + // If the same name and options are passed multiple times, the same Tracer + // will be returned (it is up to the implementation if this will be the + // same underlying instance of that Tracer or not). It is not necessary to + // call this multiple times with the same name and options to get an + // up-to-date Tracer. All implementations will ensure any TracerProvider + // configuration changes are propagated to all provided Tracers. + // + // If name is empty, then an implementation defined default name will be + // used instead. + // + // This method is safe to call concurrently. + Tracer(name string, options ...TracerOption) Tracer +} diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go new file mode 100644 index 00000000..ca68a82e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -0,0 +1,212 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "encoding/json" + "fmt" + "regexp" + "strings" +) + +const ( + maxListMembers = 32 + + listDelimiter = "," + + // based on the W3C Trace Context specification, see + // https://www.w3.org/TR/trace-context-1/#tracestate-header + noTenantKeyFormat = `[a-z][_0-9a-z\-\*\/]{0,255}` + withTenantKeyFormat = `[a-z0-9][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}` + valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]` + + errInvalidKey errorConst = "invalid tracestate key" + errInvalidValue errorConst = "invalid tracestate value" + errInvalidMember errorConst = "invalid tracestate list-member" + errMemberNumber errorConst = "too many list-members in tracestate" + errDuplicate errorConst = "duplicate list-member in tracestate" +) + +var ( + keyRe = regexp.MustCompile(`^((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))$`) + valueRe = regexp.MustCompile(`^(` + valueFormat + `)$`) + memberRe = regexp.MustCompile(`^\s*((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))=(` + valueFormat + `)\s*$`) +) + +type member struct { + Key string + Value string +} + +func newMember(key, value string) (member, error) { + if !keyRe.MatchString(key) { + return member{}, fmt.Errorf("%w: %s", errInvalidKey, key) + } + if !valueRe.MatchString(value) { + return member{}, fmt.Errorf("%w: %s", errInvalidValue, value) + } + return member{Key: key, Value: value}, nil +} + +func parseMember(m string) (member, error) { + matches := memberRe.FindStringSubmatch(m) + if len(matches) != 5 { + return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) + } + + return member{ + Key: matches[1], + Value: matches[4], + }, nil +} + +// String encodes member into a string compliant with the W3C Trace Context +// specification. +func (m member) String() string { + return fmt.Sprintf("%s=%s", m.Key, m.Value) +} + +// TraceState provides additional vendor-specific trace identification +// information across different distributed tracing systems. It represents an +// immutable list consisting of key/value pairs, each pair is referred to as a +// list-member. +// +// TraceState conforms to the W3C Trace Context specification +// (https://www.w3.org/TR/trace-context-1). All operations that create or copy +// a TraceState do so by validating all input and will only produce TraceState +// that conform to the specification. Specifically, this means that all +// list-member's key/value pairs are valid, no duplicate list-members exist, +// and the maximum number of list-members (32) is not exceeded. +type TraceState struct { //nolint:revive // revive complains about stutter of `trace.TraceState` + // list is the members in order. + list []member +} + +var _ json.Marshaler = TraceState{} + +// ParseTraceState attempts to decode a TraceState from the passed +// string. It returns an error if the input is invalid according to the W3C +// Trace Context specification. +func ParseTraceState(tracestate string) (TraceState, error) { + if tracestate == "" { + return TraceState{}, nil + } + + wrapErr := func(err error) error { + return fmt.Errorf("failed to parse tracestate: %w", err) + } + + var members []member + found := make(map[string]struct{}) + for _, memberStr := range strings.Split(tracestate, listDelimiter) { + if len(memberStr) == 0 { + continue + } + + m, err := parseMember(memberStr) + if err != nil { + return TraceState{}, wrapErr(err) + } + + if _, ok := found[m.Key]; ok { + return TraceState{}, wrapErr(errDuplicate) + } + found[m.Key] = struct{}{} + + members = append(members, m) + if n := len(members); n > maxListMembers { + return TraceState{}, wrapErr(errMemberNumber) + } + } + + return TraceState{list: members}, nil +} + +// MarshalJSON marshals the TraceState into JSON. +func (ts TraceState) MarshalJSON() ([]byte, error) { + return json.Marshal(ts.String()) +} + +// String encodes the TraceState into a string compliant with the W3C +// Trace Context specification. The returned string will be invalid if the +// TraceState contains any invalid members. +func (ts TraceState) String() string { + members := make([]string, len(ts.list)) + for i, m := range ts.list { + members[i] = m.String() + } + return strings.Join(members, listDelimiter) +} + +// Get returns the value paired with key from the corresponding TraceState +// list-member if it exists, otherwise an empty string is returned. +func (ts TraceState) Get(key string) string { + for _, member := range ts.list { + if member.Key == key { + return member.Value + } + } + + return "" +} + +// Insert adds a new list-member defined by the key/value pair to the +// TraceState. If a list-member already exists for the given key, that +// list-member's value is updated. The new or updated list-member is always +// moved to the beginning of the TraceState as specified by the W3C Trace +// Context specification. +// +// If key or value are invalid according to the W3C Trace Context +// specification an error is returned with the original TraceState. +// +// If adding a new list-member means the TraceState would have more members +// then is allowed, the new list-member will be inserted and the right-most +// list-member will be dropped in the returned TraceState. +func (ts TraceState) Insert(key, value string) (TraceState, error) { + m, err := newMember(key, value) + if err != nil { + return ts, err + } + + cTS := ts.Delete(key) + if cTS.Len()+1 <= maxListMembers { + cTS.list = append(cTS.list, member{}) + } + // When the number of members exceeds capacity, drop the "right-most". + copy(cTS.list[1:], cTS.list) + cTS.list[0] = m + + return cTS, nil +} + +// Delete returns a copy of the TraceState with the list-member identified by +// key removed. +func (ts TraceState) Delete(key string) TraceState { + members := make([]member, ts.Len()) + copy(members, ts.list) + for i, member := range ts.list { + if member.Key == key { + members = append(members[:i], members[i+1:]...) + // TraceState should contain no duplicate members. + break + } + } + return TraceState{list: members} +} + +// Len returns the number of list-members in the TraceState. +func (ts TraceState) Len() int { + return len(ts.list) +} diff --git a/vendor/go.opentelemetry.io/otel/verify_examples.sh b/vendor/go.opentelemetry.io/otel/verify_examples.sh new file mode 100644 index 00000000..dbb61a42 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/verify_examples.sh @@ -0,0 +1,85 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -euo pipefail + +cd $(dirname $0) +TOOLS_DIR=$(pwd)/.tools + +if [ -z "${GOPATH}" ] ; then + printf "GOPATH is not defined.\n" + exit -1 +fi + +if [ ! -d "${GOPATH}" ] ; then + printf "GOPATH ${GOPATH} is invalid \n" + exit -1 +fi + +# Pre-requisites +if ! git diff --quiet; then \ + git status + printf "\n\nError: working tree is not clean\n" + exit -1 +fi + +if [ "$(git tag --contains $(git log -1 --pretty=format:"%H"))" = "" ] ; then + printf "$(git log -1)" + printf "\n\nError: HEAD is not pointing to a tagged version" +fi + +make ${TOOLS_DIR}/gojq + +DIR_TMP="${GOPATH}/src/oteltmp/" +rm -rf $DIR_TMP +mkdir -p $DIR_TMP + +printf "Copy examples to ${DIR_TMP}\n" +cp -a ./example ${DIR_TMP} + +# Update go.mod files +printf "Update go.mod: rename module and remove replace\n" + +PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | egrep 'example' | sed 's/^\.\///' | sort) + +for dir in $PACKAGE_DIRS; do + printf " Update go.mod for $dir\n" + (cd "${DIR_TMP}/${dir}" && \ + # replaces is ("mod1" "mod2" …) + replaces=($(go mod edit -json | ${TOOLS_DIR}/gojq '.Replace[].Old.Path')) && \ + # strip double quotes + replaces=("${replaces[@]%\"}") && \ + replaces=("${replaces[@]#\"}") && \ + # make an array (-dropreplace=mod1 -dropreplace=mod2 …) + dropreplaces=("${replaces[@]/#/-dropreplace=}") && \ + go mod edit -module "oteltmp/${dir}" "${dropreplaces[@]}" && \ + go mod tidy) +done +printf "Update done:\n\n" + +# Build directories that contain main package. These directories are different than +# directories that contain go.mod files. +printf "Build examples:\n" +EXAMPLES=$(./get_main_pkgs.sh ./example) +for ex in $EXAMPLES; do + printf " Build $ex in ${DIR_TMP}/${ex}\n" + (cd "${DIR_TMP}/${ex}" && \ + go build .) +done + +# Cleanup +printf "Remove copied files.\n" +rm -rf $DIR_TMP diff --git a/vendor/github.com/go-openapi/swag/pre_go18.go b/vendor/go.opentelemetry.io/otel/version.go similarity index 67% rename from vendor/github.com/go-openapi/swag/pre_go18.go rename to vendor/go.opentelemetry.io/otel/version.go index 2757d9b9..ad64e199 100644 --- a/vendor/github.com/go-openapi/swag/pre_go18.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -1,10 +1,10 @@ -// Copyright 2015 go-swagger maintainers +// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -12,13 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build !go1.8 -// +build !go1.8 +package otel // import "go.opentelemetry.io/otel" -package swag - -import "net/url" - -func pathUnescape(path string) (string, error) { - return url.QueryUnescape(path) +// Version is the current release version of OpenTelemetry in use. +func Version() string { + return "1.19.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml new file mode 100644 index 00000000..7d212769 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -0,0 +1,55 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +module-sets: + stable-v1: + version: v1.19.0 + modules: + - go.opentelemetry.io/otel + - go.opentelemetry.io/otel/bridge/opentracing + - go.opentelemetry.io/otel/bridge/opentracing/test + - go.opentelemetry.io/otel/example/dice + - go.opentelemetry.io/otel/example/fib + - go.opentelemetry.io/otel/example/namedtracer + - go.opentelemetry.io/otel/example/otel-collector + - go.opentelemetry.io/otel/example/passthrough + - go.opentelemetry.io/otel/example/zipkin + - go.opentelemetry.io/otel/exporters/otlp/otlptrace + - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc + - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp + - go.opentelemetry.io/otel/exporters/stdout/stdouttrace + - go.opentelemetry.io/otel/exporters/zipkin + - go.opentelemetry.io/otel/metric + - go.opentelemetry.io/otel/sdk + - go.opentelemetry.io/otel/sdk/metric + - go.opentelemetry.io/otel/trace + experimental-metrics: + version: v0.42.0 + modules: + - go.opentelemetry.io/otel/bridge/opencensus + - go.opentelemetry.io/otel/bridge/opencensus/test + - go.opentelemetry.io/otel/example/opencensus + - go.opentelemetry.io/otel/example/prometheus + - go.opentelemetry.io/otel/example/view + - go.opentelemetry.io/otel/exporters/otlp/otlpmetric + - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc + - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp + - go.opentelemetry.io/otel/exporters/prometheus + - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric + experimental-schema: + version: v0.0.7 + modules: + - go.opentelemetry.io/otel/schema +excluded-modules: + - go.opentelemetry.io/otel/internal/tools diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s index d2ca5dee..b3c1699b 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s @@ -19,15 +19,14 @@ #define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3, t4, t5) \ MULLD r0, h0, t0; \ - MULLD r0, h1, t4; \ MULHDU r0, h0, t1; \ + MULLD r0, h1, t4; \ MULHDU r0, h1, t5; \ ADDC t4, t1, t1; \ MULLD r0, h2, t2; \ - ADDZE t5; \ MULHDU r1, h0, t4; \ MULLD r1, h0, h0; \ - ADD t5, t2, t2; \ + ADDE t5, t2, t2; \ ADDC h0, t1, t1; \ MULLD h2, r1, t3; \ ADDZE t4, h0; \ @@ -37,13 +36,11 @@ ADDE t5, t3, t3; \ ADDC h0, t2, t2; \ MOVD $-4, t4; \ - MOVD t0, h0; \ - MOVD t1, h1; \ ADDZE t3; \ - ANDCC $3, t2, h2; \ - AND t2, t4, t0; \ + RLDICL $0, t2, $62, h2; \ + AND t2, t4, h0; \ ADDC t0, h0, h0; \ - ADDE t3, h1, h1; \ + ADDE t3, t1, h1; \ SLD $62, t3, t4; \ SRD $2, t2; \ ADDZE h2; \ @@ -75,6 +72,7 @@ TEXT ·update(SB), $0-32 loop: POLY1305_ADD(R4, R8, R9, R10, R20, R21, R22) + PCALIGN $16 multiply: POLY1305_MUL(R8, R9, R10, R11, R12, R16, R17, R18, R14, R20, R21) ADD $-16, R5 diff --git a/vendor/golang.org/x/crypto/ocsp/ocsp.go b/vendor/golang.org/x/crypto/ocsp/ocsp.go index 4269ed11..bf225953 100644 --- a/vendor/golang.org/x/crypto/ocsp/ocsp.go +++ b/vendor/golang.org/x/crypto/ocsp/ocsp.go @@ -279,21 +279,22 @@ func getOIDFromHashAlgorithm(target crypto.Hash) asn1.ObjectIdentifier { // This is the exposed reflection of the internal OCSP structures. -// The status values that can be expressed in OCSP. See RFC 6960. +// The status values that can be expressed in OCSP. See RFC 6960. +// These are used for the Response.Status field. const ( // Good means that the certificate is valid. - Good = iota + Good = 0 // Revoked means that the certificate has been deliberately revoked. - Revoked + Revoked = 1 // Unknown means that the OCSP responder doesn't know about the certificate. - Unknown + Unknown = 2 // ServerFailed is unused and was never used (see // https://go-review.googlesource.com/#/c/18944). ParseResponse will // return a ResponseError when an error response is parsed. - ServerFailed + ServerFailed = 3 ) -// The enumerated reasons for revoking a certificate. See RFC 5280. +// The enumerated reasons for revoking a certificate. See RFC 5280. const ( Unspecified = 0 KeyCompromise = 1 diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go index 5e8158bb..46ceac34 100644 --- a/vendor/golang.org/x/exp/slices/slices.go +++ b/vendor/golang.org/x/exp/slices/slices.go @@ -209,25 +209,37 @@ func Insert[S ~[]E, E any](s S, i int, v ...E) S { return s } +// clearSlice sets all elements up to the length of s to the zero value of E. +// We may use the builtin clear func instead, and remove clearSlice, when upgrading +// to Go 1.21+. +func clearSlice[S ~[]E, E any](s S) { + var zero E + for i := range s { + s[i] = zero + } +} + // Delete removes the elements s[i:j] from s, returning the modified slice. -// Delete panics if s[i:j] is not a valid slice of s. -// Delete is O(len(s)-j), so if many items must be deleted, it is better to +// Delete panics if j > len(s) or s[i:j] is not a valid slice of s. +// Delete is O(len(s)-i), so if many items must be deleted, it is better to // make a single call deleting them all together than to delete one at a time. -// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those -// elements contain pointers you might consider zeroing those elements so that -// objects they reference can be garbage collected. +// Delete zeroes the elements s[len(s)-(j-i):len(s)]. func Delete[S ~[]E, E any](s S, i, j int) S { - _ = s[i:j] // bounds check + _ = s[i:j:len(s)] // bounds check - return append(s[:i], s[j:]...) + if i == j { + return s + } + + oldlen := len(s) + s = append(s[:i], s[j:]...) + clearSlice(s[len(s):oldlen]) // zero/nil out the obsolete elements, for GC + return s } // DeleteFunc removes any elements from s for which del returns true, // returning the modified slice. -// When DeleteFunc removes m elements, it might not modify the elements -// s[len(s)-m:len(s)]. If those elements contain pointers you might consider -// zeroing those elements so that objects they reference can be garbage -// collected. +// DeleteFunc zeroes the elements between the new length and the original length. func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { i := IndexFunc(s, del) if i == -1 { @@ -240,11 +252,13 @@ func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { i++ } } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC return s[:i] } // Replace replaces the elements s[i:j] by the given v, and returns the // modified slice. Replace panics if s[i:j] is not a valid slice of s. +// When len(v) < (j-i), Replace zeroes the elements between the new length and the original length. func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { _ = s[i:j] // verify that i:j is a valid subslice @@ -272,6 +286,7 @@ func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { if i+len(v) != j { copy(r[i+len(v):], s[j:]) } + clearSlice(s[tot:]) // zero/nil out the obsolete elements, for GC return r } @@ -345,9 +360,7 @@ func Clone[S ~[]E, E any](s S) S { // This is like the uniq command found on Unix. // Compact modifies the contents of the slice s and returns the modified slice, // which may have a smaller length. -// When Compact discards m elements in total, it might not modify the elements -// s[len(s)-m:len(s)]. If those elements contain pointers you might consider -// zeroing those elements so that objects they reference can be garbage collected. +// Compact zeroes the elements between the new length and the original length. func Compact[S ~[]E, E comparable](s S) S { if len(s) < 2 { return s @@ -361,11 +374,13 @@ func Compact[S ~[]E, E comparable](s S) S { i++ } } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC return s[:i] } // CompactFunc is like [Compact] but uses an equality function to compare elements. // For runs of elements that compare equal, CompactFunc keeps the first one. +// CompactFunc zeroes the elements between the new length and the original length. func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { if len(s) < 2 { return s @@ -379,6 +394,7 @@ func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { i++ } } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC return s[:i] } diff --git a/vendor/golang.org/x/net/html/token.go b/vendor/golang.org/x/net/html/token.go index de67f938..3c57880d 100644 --- a/vendor/golang.org/x/net/html/token.go +++ b/vendor/golang.org/x/net/html/token.go @@ -910,9 +910,6 @@ func (z *Tokenizer) readTagAttrKey() { return } switch c { - case ' ', '\n', '\r', '\t', '\f', '/': - z.pendingAttr[0].end = z.raw.end - 1 - return case '=': if z.pendingAttr[0].start+1 == z.raw.end { // WHATWG 13.2.5.32, if we see an equals sign before the attribute name @@ -920,7 +917,9 @@ func (z *Tokenizer) readTagAttrKey() { continue } fallthrough - case '>': + case ' ', '\n', '\r', '\t', '\f', '/', '>': + // WHATWG 13.2.5.33 Attribute name state + // We need to reconsume the char in the after attribute name state to support the / character z.raw.end-- z.pendingAttr[0].end = z.raw.end return @@ -939,6 +938,11 @@ func (z *Tokenizer) readTagAttrVal() { if z.err != nil { return } + if c == '/' { + // WHATWG 13.2.5.34 After attribute name state + // U+002F SOLIDUS (/) - Switch to the self-closing start tag state. + return + } if c != '=' { z.raw.end-- return diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index c1f6b90d..e2b298d8 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -1510,13 +1510,12 @@ func (mh *MetaHeadersFrame) checkPseudos() error { } func (fr *Framer) maxHeaderStringLen() int { - v := fr.maxHeaderListSize() - if uint32(int(v)) == v { - return int(v) + v := int(fr.maxHeaderListSize()) + if v < 0 { + // If maxHeaderListSize overflows an int, use no limit (0). + return 0 } - // They had a crazy big number for MaxHeaderBytes anyway, - // so give them unlimited header lengths: - return 0 + return v } // readMetaFrame returns 0 or more CONTINUATION frames from fr and diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index df578b86..c2a5b44b 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -2911,6 +2911,15 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { fl = &cs.flow } if !fl.add(int32(f.Increment)) { + // For stream, the sender sends RST_STREAM with an error code of FLOW_CONTROL_ERROR + if cs != nil { + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeFlowControl, + }) + return nil + } + return ConnectionError(ErrCodeFlowControl) } cc.cond.Broadcast() diff --git a/vendor/golang.org/x/sys/unix/aliases.go b/vendor/golang.org/x/sys/unix/aliases.go index e7d3df4b..b0e41985 100644 --- a/vendor/golang.org/x/sys/unix/aliases.go +++ b/vendor/golang.org/x/sys/unix/aliases.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) && go1.9 +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos package unix diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index c6492020..fdcaa974 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -584,7 +584,7 @@ ccflags="$@" $2 ~ /^KEY_(SPEC|REQKEY_DEFL)_/ || $2 ~ /^KEYCTL_/ || $2 ~ /^PERF_/ || - $2 ~ /^SECCOMP_MODE_/ || + $2 ~ /^SECCOMP_/ || $2 ~ /^SEEK_/ || $2 ~ /^SCHED_/ || $2 ~ /^SPLICE_/ || diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go index 16dc6993..2f0fa76e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build darwin && go1.12 +//go:build darwin package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index 64d1bb4d..2b57e0f7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -13,6 +13,7 @@ package unix import ( + "errors" "sync" "unsafe" ) @@ -169,25 +170,26 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { func Uname(uname *Utsname) error { mib := []_C_int{CTL_KERN, KERN_OSTYPE} n := unsafe.Sizeof(uname.Sysname) - if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil { + // Suppress ENOMEM errors to be compatible with the C library __xuname() implementation. + if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } mib = []_C_int{CTL_KERN, KERN_HOSTNAME} n = unsafe.Sizeof(uname.Nodename) - if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } mib = []_C_int{CTL_KERN, KERN_OSRELEASE} n = unsafe.Sizeof(uname.Release) - if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } mib = []_C_int{CTL_KERN, KERN_VERSION} n = unsafe.Sizeof(uname.Version) - if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } @@ -205,7 +207,7 @@ func Uname(uname *Utsname) error { mib = []_C_int{CTL_HW, HW_MACHINE} n = unsafe.Sizeof(uname.Machine) - if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 0f85e29e..5682e262 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1849,6 +1849,105 @@ func Dup2(oldfd, newfd int) error { //sys Fsmount(fd int, flags int, mountAttrs int) (fsfd int, err error) //sys Fsopen(fsName string, flags int) (fd int, err error) //sys Fspick(dirfd int, pathName string, flags int) (fd int, err error) + +//sys fsconfig(fd int, cmd uint, key *byte, value *byte, aux int) (err error) + +func fsconfigCommon(fd int, cmd uint, key string, value *byte, aux int) (err error) { + var keyp *byte + if keyp, err = BytePtrFromString(key); err != nil { + return + } + return fsconfig(fd, cmd, keyp, value, aux) +} + +// FsconfigSetFlag is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_FLAG. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +func FsconfigSetFlag(fd int, key string) (err error) { + return fsconfigCommon(fd, FSCONFIG_SET_FLAG, key, nil, 0) +} + +// FsconfigSetString is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_STRING. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// value is the parameter value to set. +func FsconfigSetString(fd int, key string, value string) (err error) { + var valuep *byte + if valuep, err = BytePtrFromString(value); err != nil { + return + } + return fsconfigCommon(fd, FSCONFIG_SET_STRING, key, valuep, 0) +} + +// FsconfigSetBinary is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_BINARY. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// value is the parameter value to set. +func FsconfigSetBinary(fd int, key string, value []byte) (err error) { + if len(value) == 0 { + return EINVAL + } + return fsconfigCommon(fd, FSCONFIG_SET_BINARY, key, &value[0], len(value)) +} + +// FsconfigSetPath is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_PATH. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// path is a non-empty path for specified key. +// atfd is a file descriptor at which to start lookup from or AT_FDCWD. +func FsconfigSetPath(fd int, key string, path string, atfd int) (err error) { + var valuep *byte + if valuep, err = BytePtrFromString(path); err != nil { + return + } + return fsconfigCommon(fd, FSCONFIG_SET_PATH, key, valuep, atfd) +} + +// FsconfigSetPathEmpty is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_PATH_EMPTY. The same as +// FconfigSetPath but with AT_PATH_EMPTY implied. +func FsconfigSetPathEmpty(fd int, key string, path string, atfd int) (err error) { + var valuep *byte + if valuep, err = BytePtrFromString(path); err != nil { + return + } + return fsconfigCommon(fd, FSCONFIG_SET_PATH_EMPTY, key, valuep, atfd) +} + +// FsconfigSetFd is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_FD. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// value is a file descriptor to be assigned to specified key. +func FsconfigSetFd(fd int, key string, value int) (err error) { + return fsconfigCommon(fd, FSCONFIG_SET_FD, key, nil, value) +} + +// FsconfigCreate is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_CMD_CREATE. +// +// fd is the filesystem context to act upon. +func FsconfigCreate(fd int) (err error) { + return fsconfig(fd, FSCONFIG_CMD_CREATE, nil, nil, 0) +} + +// FsconfigReconfigure is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_CMD_RECONFIGURE. +// +// fd is the filesystem context to act upon. +func FsconfigReconfigure(fd int) (err error) { + return fsconfig(fd, FSCONFIG_CMD_RECONFIGURE, nil, nil, 0) +} + //sys Getdents(fd int, buf []byte) (n int, err error) = SYS_GETDENTS64 //sysnb Getpgid(pid int) (pgid int, err error) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index a5d3ff8d..36bf8399 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1785,6 +1785,8 @@ const ( LANDLOCK_ACCESS_FS_REMOVE_FILE = 0x20 LANDLOCK_ACCESS_FS_TRUNCATE = 0x4000 LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2 + LANDLOCK_ACCESS_NET_BIND_TCP = 0x1 + LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2 LANDLOCK_CREATE_RULESET_VERSION = 0x1 LINUX_REBOOT_CMD_CAD_OFF = 0x0 LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef @@ -2465,6 +2467,7 @@ const ( PR_MCE_KILL_GET = 0x22 PR_MCE_KILL_LATE = 0x0 PR_MCE_KILL_SET = 0x1 + PR_MDWE_NO_INHERIT = 0x2 PR_MDWE_REFUSE_EXEC_GAIN = 0x1 PR_MPX_DISABLE_MANAGEMENT = 0x2c PR_MPX_ENABLE_MANAGEMENT = 0x2b @@ -2669,8 +2672,9 @@ const ( RTAX_FEATURES = 0xc RTAX_FEATURE_ALLFRAG = 0x8 RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf + RTAX_FEATURE_MASK = 0x1f RTAX_FEATURE_SACK = 0x2 + RTAX_FEATURE_TCP_USEC_TS = 0x10 RTAX_FEATURE_TIMESTAMP = 0x4 RTAX_HOPLIMIT = 0xa RTAX_INITCWND = 0xb @@ -2913,9 +2917,38 @@ const ( SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x1d SC_LOG_FLUSH = 0x100000 + SECCOMP_ADDFD_FLAG_SEND = 0x2 + SECCOMP_ADDFD_FLAG_SETFD = 0x1 + SECCOMP_FILTER_FLAG_LOG = 0x2 + SECCOMP_FILTER_FLAG_NEW_LISTENER = 0x8 + SECCOMP_FILTER_FLAG_SPEC_ALLOW = 0x4 + SECCOMP_FILTER_FLAG_TSYNC = 0x1 + SECCOMP_FILTER_FLAG_TSYNC_ESRCH = 0x10 + SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV = 0x20 + SECCOMP_GET_ACTION_AVAIL = 0x2 + SECCOMP_GET_NOTIF_SIZES = 0x3 + SECCOMP_IOCTL_NOTIF_RECV = 0xc0502100 + SECCOMP_IOCTL_NOTIF_SEND = 0xc0182101 + SECCOMP_IOC_MAGIC = '!' SECCOMP_MODE_DISABLED = 0x0 SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 + SECCOMP_RET_ACTION = 0x7fff0000 + SECCOMP_RET_ACTION_FULL = 0xffff0000 + SECCOMP_RET_ALLOW = 0x7fff0000 + SECCOMP_RET_DATA = 0xffff + SECCOMP_RET_ERRNO = 0x50000 + SECCOMP_RET_KILL = 0x0 + SECCOMP_RET_KILL_PROCESS = 0x80000000 + SECCOMP_RET_KILL_THREAD = 0x0 + SECCOMP_RET_LOG = 0x7ffc0000 + SECCOMP_RET_TRACE = 0x7ff00000 + SECCOMP_RET_TRAP = 0x30000 + SECCOMP_RET_USER_NOTIF = 0x7fc00000 + SECCOMP_SET_MODE_FILTER = 0x1 + SECCOMP_SET_MODE_STRICT = 0x0 + SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP = 0x1 + SECCOMP_USER_NOTIF_FLAG_CONTINUE = 0x1 SECRETMEM_MAGIC = 0x5345434d SECURITYFS_MAGIC = 0x73636673 SEEK_CUR = 0x1 @@ -3075,6 +3108,7 @@ const ( SOL_TIPC = 0x10f SOL_TLS = 0x11a SOL_UDP = 0x11 + SOL_VSOCK = 0x11f SOL_X25 = 0x106 SOL_XDP = 0x11b SOMAXCONN = 0x1000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 4920821c..42ff8c3c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index a0c1e411..dca43600 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -282,6 +282,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index c6398556..5cca668a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -288,6 +288,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 47cc62e2..d8cae6d1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -278,6 +278,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 27ac4a09..28e39afd 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -275,6 +275,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 54694642..cd66e92c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 3adb81d7..c1595eba 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 2dfe98f0..ee9456b0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index f5398f84..8cfca81e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index c54f152d..60b0deb3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -336,6 +336,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 76057dc7..f90aa728 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -340,6 +340,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index e0c3725e..ba9e0150 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -340,6 +340,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 18f2813e..07cdfd6e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -272,6 +272,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 11619d4e..2f1dd214 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -344,6 +344,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 396d994d..f40519d9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -335,6 +335,9 @@ const ( SCM_TIMESTAMPNS = 0x21 SCM_TXTIME = 0x3f SCM_WIFI_STATUS = 0x25 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x400000 SFD_NONBLOCK = 0x4000 SF_FP = 0x38 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 1488d271..87d8612a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -906,6 +906,16 @@ func Fspick(dirfd int, pathName string, flags int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fsconfig(fd int, cmd uint, key *byte, value *byte, aux int) (err error) { + _, _, e1 := Syscall6(SYS_FSCONFIG, uintptr(fd), uintptr(cmd), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(value)), uintptr(aux), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index fcf3ecbd..0cc3ce49 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -448,4 +448,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index f56dc250..856d92d6 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -371,4 +371,7 @@ const ( SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 974bf246..8d467094 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -412,4 +412,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 39a2739e..edc17324 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -315,4 +315,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index cf9c9d77..445eba20 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -309,4 +309,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 10b7362e..adba01bc 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -432,4 +432,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 4450 SYS_CACHESTAT = 4451 SYS_FCHMODAT2 = 4452 + SYS_MAP_SHADOW_STACK = 4453 + SYS_FUTEX_WAKE = 4454 + SYS_FUTEX_WAIT = 4455 + SYS_FUTEX_REQUEUE = 4456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index cd4d8b4f..014c4e9c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -362,4 +362,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 5450 SYS_CACHESTAT = 5451 SYS_FCHMODAT2 = 5452 + SYS_MAP_SHADOW_STACK = 5453 + SYS_FUTEX_WAKE = 5454 + SYS_FUTEX_WAIT = 5455 + SYS_FUTEX_REQUEUE = 5456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 2c0efca8..ccc97d74 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -362,4 +362,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 5450 SYS_CACHESTAT = 5451 SYS_FCHMODAT2 = 5452 + SYS_MAP_SHADOW_STACK = 5453 + SYS_FUTEX_WAKE = 5454 + SYS_FUTEX_WAIT = 5455 + SYS_FUTEX_REQUEUE = 5456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index a72e31d3..ec2b64a9 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -432,4 +432,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 4450 SYS_CACHESTAT = 4451 SYS_FCHMODAT2 = 4452 + SYS_MAP_SHADOW_STACK = 4453 + SYS_FUTEX_WAKE = 4454 + SYS_FUTEX_WAIT = 4455 + SYS_FUTEX_REQUEUE = 4456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index c7d1e374..21a839e3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -439,4 +439,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index f4d4838c..c11121ec 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -411,4 +411,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index b64f0e59..909b631f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -411,4 +411,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 95711195..e49bed16 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -316,4 +316,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index f94e943b..66017d2d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -377,4 +377,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index ba0c2bc5..47bab18d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -390,4 +390,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index bbf8399f..eff6bcde 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -174,7 +174,8 @@ type FscryptPolicyV2 struct { Contents_encryption_mode uint8 Filenames_encryption_mode uint8 Flags uint8 - _ [4]uint8 + Log2_data_unit_size uint8 + _ [3]uint8 Master_key_identifier [16]uint8 } @@ -455,60 +456,63 @@ type Ucred struct { } type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 - Pacing_rate uint64 - Max_pacing_rate uint64 - Bytes_acked uint64 - Bytes_received uint64 - Segs_out uint32 - Segs_in uint32 - Notsent_bytes uint32 - Min_rtt uint32 - Data_segs_in uint32 - Data_segs_out uint32 - Delivery_rate uint64 - Busy_time uint64 - Rwnd_limited uint64 - Sndbuf_limited uint64 - Delivered uint32 - Delivered_ce uint32 - Bytes_sent uint64 - Bytes_retrans uint64 - Dsack_dups uint32 - Reord_seen uint32 - Rcv_ooopack uint32 - Snd_wnd uint32 - Rcv_wnd uint32 - Rehash uint32 + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 + Pacing_rate uint64 + Max_pacing_rate uint64 + Bytes_acked uint64 + Bytes_received uint64 + Segs_out uint32 + Segs_in uint32 + Notsent_bytes uint32 + Min_rtt uint32 + Data_segs_in uint32 + Data_segs_out uint32 + Delivery_rate uint64 + Busy_time uint64 + Rwnd_limited uint64 + Sndbuf_limited uint64 + Delivered uint32 + Delivered_ce uint32 + Bytes_sent uint64 + Bytes_retrans uint64 + Dsack_dups uint32 + Reord_seen uint32 + Rcv_ooopack uint32 + Snd_wnd uint32 + Rcv_wnd uint32 + Rehash uint32 + Total_rto uint16 + Total_rto_recoveries uint16 + Total_rto_time uint32 } type CanFilter struct { @@ -551,7 +555,7 @@ const ( SizeofIPv6MTUInfo = 0x20 SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc - SizeofTCPInfo = 0xf0 + SizeofTCPInfo = 0xf8 SizeofCanFilter = 0x8 SizeofTCPRepairOpt = 0x8 ) @@ -832,6 +836,15 @@ const ( FSPICK_EMPTY_PATH = 0x8 FSMOUNT_CLOEXEC = 0x1 + + FSCONFIG_SET_FLAG = 0x0 + FSCONFIG_SET_STRING = 0x1 + FSCONFIG_SET_BINARY = 0x2 + FSCONFIG_SET_PATH = 0x3 + FSCONFIG_SET_PATH_EMPTY = 0x4 + FSCONFIG_SET_FD = 0x5 + FSCONFIG_CMD_CREATE = 0x6 + FSCONFIG_CMD_RECONFIGURE = 0x7 ) type OpenHow struct { @@ -1546,6 +1559,7 @@ const ( IFLA_DEVLINK_PORT = 0x3e IFLA_GSO_IPV4_MAX_SIZE = 0x3f IFLA_GRO_IPV4_MAX_SIZE = 0x40 + IFLA_DPLL_PIN = 0x41 IFLA_PROTO_DOWN_REASON_UNSPEC = 0x0 IFLA_PROTO_DOWN_REASON_MASK = 0x1 IFLA_PROTO_DOWN_REASON_VALUE = 0x2 @@ -1561,6 +1575,7 @@ const ( IFLA_INET6_ICMP6STATS = 0x6 IFLA_INET6_TOKEN = 0x7 IFLA_INET6_ADDR_GEN_MODE = 0x8 + IFLA_INET6_RA_MTU = 0x9 IFLA_BR_UNSPEC = 0x0 IFLA_BR_FORWARD_DELAY = 0x1 IFLA_BR_HELLO_TIME = 0x2 @@ -1608,6 +1623,9 @@ const ( IFLA_BR_MCAST_MLD_VERSION = 0x2c IFLA_BR_VLAN_STATS_PER_PORT = 0x2d IFLA_BR_MULTI_BOOLOPT = 0x2e + IFLA_BR_MCAST_QUERIER_STATE = 0x2f + IFLA_BR_FDB_N_LEARNED = 0x30 + IFLA_BR_FDB_MAX_LEARNED = 0x31 IFLA_BRPORT_UNSPEC = 0x0 IFLA_BRPORT_STATE = 0x1 IFLA_BRPORT_PRIORITY = 0x2 @@ -1645,6 +1663,14 @@ const ( IFLA_BRPORT_BACKUP_PORT = 0x22 IFLA_BRPORT_MRP_RING_OPEN = 0x23 IFLA_BRPORT_MRP_IN_OPEN = 0x24 + IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT = 0x25 + IFLA_BRPORT_MCAST_EHT_HOSTS_CNT = 0x26 + IFLA_BRPORT_LOCKED = 0x27 + IFLA_BRPORT_MAB = 0x28 + IFLA_BRPORT_MCAST_N_GROUPS = 0x29 + IFLA_BRPORT_MCAST_MAX_GROUPS = 0x2a + IFLA_BRPORT_NEIGH_VLAN_SUPPRESS = 0x2b + IFLA_BRPORT_BACKUP_NHID = 0x2c IFLA_INFO_UNSPEC = 0x0 IFLA_INFO_KIND = 0x1 IFLA_INFO_DATA = 0x2 @@ -1666,6 +1692,9 @@ const ( IFLA_MACVLAN_MACADDR = 0x4 IFLA_MACVLAN_MACADDR_DATA = 0x5 IFLA_MACVLAN_MACADDR_COUNT = 0x6 + IFLA_MACVLAN_BC_QUEUE_LEN = 0x7 + IFLA_MACVLAN_BC_QUEUE_LEN_USED = 0x8 + IFLA_MACVLAN_BC_CUTOFF = 0x9 IFLA_VRF_UNSPEC = 0x0 IFLA_VRF_TABLE = 0x1 IFLA_VRF_PORT_UNSPEC = 0x0 @@ -1689,9 +1718,22 @@ const ( IFLA_XFRM_UNSPEC = 0x0 IFLA_XFRM_LINK = 0x1 IFLA_XFRM_IF_ID = 0x2 + IFLA_XFRM_COLLECT_METADATA = 0x3 IFLA_IPVLAN_UNSPEC = 0x0 IFLA_IPVLAN_MODE = 0x1 IFLA_IPVLAN_FLAGS = 0x2 + NETKIT_NEXT = -0x1 + NETKIT_PASS = 0x0 + NETKIT_DROP = 0x2 + NETKIT_REDIRECT = 0x7 + NETKIT_L2 = 0x0 + NETKIT_L3 = 0x1 + IFLA_NETKIT_UNSPEC = 0x0 + IFLA_NETKIT_PEER_INFO = 0x1 + IFLA_NETKIT_PRIMARY = 0x2 + IFLA_NETKIT_POLICY = 0x3 + IFLA_NETKIT_PEER_POLICY = 0x4 + IFLA_NETKIT_MODE = 0x5 IFLA_VXLAN_UNSPEC = 0x0 IFLA_VXLAN_ID = 0x1 IFLA_VXLAN_GROUP = 0x2 @@ -1722,6 +1764,8 @@ const ( IFLA_VXLAN_GPE = 0x1b IFLA_VXLAN_TTL_INHERIT = 0x1c IFLA_VXLAN_DF = 0x1d + IFLA_VXLAN_VNIFILTER = 0x1e + IFLA_VXLAN_LOCALBYPASS = 0x1f IFLA_GENEVE_UNSPEC = 0x0 IFLA_GENEVE_ID = 0x1 IFLA_GENEVE_REMOTE = 0x2 @@ -1736,6 +1780,7 @@ const ( IFLA_GENEVE_LABEL = 0xb IFLA_GENEVE_TTL_INHERIT = 0xc IFLA_GENEVE_DF = 0xd + IFLA_GENEVE_INNER_PROTO_INHERIT = 0xe IFLA_BAREUDP_UNSPEC = 0x0 IFLA_BAREUDP_PORT = 0x1 IFLA_BAREUDP_ETHERTYPE = 0x2 @@ -1748,6 +1793,8 @@ const ( IFLA_GTP_FD1 = 0x2 IFLA_GTP_PDP_HASHSIZE = 0x3 IFLA_GTP_ROLE = 0x4 + IFLA_GTP_CREATE_SOCKETS = 0x5 + IFLA_GTP_RESTART_COUNT = 0x6 IFLA_BOND_UNSPEC = 0x0 IFLA_BOND_MODE = 0x1 IFLA_BOND_ACTIVE_SLAVE = 0x2 @@ -1777,6 +1824,9 @@ const ( IFLA_BOND_AD_ACTOR_SYSTEM = 0x1a IFLA_BOND_TLB_DYNAMIC_LB = 0x1b IFLA_BOND_PEER_NOTIF_DELAY = 0x1c + IFLA_BOND_AD_LACP_ACTIVE = 0x1d + IFLA_BOND_MISSED_MAX = 0x1e + IFLA_BOND_NS_IP6_TARGET = 0x1f IFLA_BOND_AD_INFO_UNSPEC = 0x0 IFLA_BOND_AD_INFO_AGGREGATOR = 0x1 IFLA_BOND_AD_INFO_NUM_PORTS = 0x2 @@ -1792,6 +1842,7 @@ const ( IFLA_BOND_SLAVE_AD_AGGREGATOR_ID = 0x6 IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE = 0x7 IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE = 0x8 + IFLA_BOND_SLAVE_PRIO = 0x9 IFLA_VF_INFO_UNSPEC = 0x0 IFLA_VF_INFO = 0x1 IFLA_VF_UNSPEC = 0x0 @@ -1850,8 +1901,16 @@ const ( IFLA_STATS_LINK_XSTATS_SLAVE = 0x3 IFLA_STATS_LINK_OFFLOAD_XSTATS = 0x4 IFLA_STATS_AF_SPEC = 0x5 + IFLA_STATS_GETSET_UNSPEC = 0x0 + IFLA_STATS_GET_FILTERS = 0x1 + IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS = 0x2 IFLA_OFFLOAD_XSTATS_UNSPEC = 0x0 IFLA_OFFLOAD_XSTATS_CPU_HIT = 0x1 + IFLA_OFFLOAD_XSTATS_HW_S_INFO = 0x2 + IFLA_OFFLOAD_XSTATS_L3_STATS = 0x3 + IFLA_OFFLOAD_XSTATS_HW_S_INFO_UNSPEC = 0x0 + IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST = 0x1 + IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED = 0x2 IFLA_XDP_UNSPEC = 0x0 IFLA_XDP_FD = 0x1 IFLA_XDP_ATTACHED = 0x2 @@ -1881,6 +1940,11 @@ const ( IFLA_RMNET_UNSPEC = 0x0 IFLA_RMNET_MUX_ID = 0x1 IFLA_RMNET_FLAGS = 0x2 + IFLA_MCTP_UNSPEC = 0x0 + IFLA_MCTP_NET = 0x1 + IFLA_DSA_UNSPEC = 0x0 + IFLA_DSA_CONDUIT = 0x1 + IFLA_DSA_MASTER = 0x1 ) const ( @@ -3399,7 +3463,7 @@ const ( DEVLINK_PORT_FN_ATTR_STATE = 0x2 DEVLINK_PORT_FN_ATTR_OPSTATE = 0x3 DEVLINK_PORT_FN_ATTR_CAPS = 0x4 - DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x4 + DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x5 ) type FsverityDigest struct { @@ -4183,7 +4247,8 @@ const ( ) type LandlockRulesetAttr struct { - Access_fs uint64 + Access_fs uint64 + Access_net uint64 } type LandlockPathBeneathAttr struct { @@ -5134,7 +5199,7 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x1b + NL80211_FREQUENCY_ATTR_MAX = 0x1c NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc @@ -5547,7 +5612,7 @@ const ( NL80211_REGDOM_TYPE_CUSTOM_WORLD = 0x2 NL80211_REGDOM_TYPE_INTERSECTION = 0x3 NL80211_REGDOM_TYPE_WORLD = 0x1 - NL80211_REG_RULE_ATTR_MAX = 0x7 + NL80211_REG_RULE_ATTR_MAX = 0x8 NL80211_REKEY_DATA_AKM = 0x4 NL80211_REKEY_DATA_KCK = 0x2 NL80211_REKEY_DATA_KEK = 0x1 diff --git a/vendor/golang.org/x/sys/windows/env_windows.go b/vendor/golang.org/x/sys/windows/env_windows.go index b8ad1925..d4577a42 100644 --- a/vendor/golang.org/x/sys/windows/env_windows.go +++ b/vendor/golang.org/x/sys/windows/env_windows.go @@ -37,14 +37,17 @@ func (token Token) Environ(inheritExisting bool) (env []string, err error) { return nil, err } defer DestroyEnvironmentBlock(block) - blockp := unsafe.Pointer(block) - for { - entry := UTF16PtrToString((*uint16)(blockp)) - if len(entry) == 0 { - break + size := unsafe.Sizeof(*block) + for *block != 0 { + // find NUL terminator + end := unsafe.Pointer(block) + for *(*uint16)(end) != 0 { + end = unsafe.Add(end, size) } - env = append(env, entry) - blockp = unsafe.Add(blockp, 2*(len(entry)+1)) + + entry := unsafe.Slice(block, (uintptr(end)-uintptr(unsafe.Pointer(block)))/size) + env = append(env, UTF16ToString(entry)) + block = (*uint16)(unsafe.Add(end, size)) } return env, nil } diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index ffb8708c..6395a031 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -125,8 +125,7 @@ func UTF16PtrToString(p *uint16) string { for ptr := unsafe.Pointer(p); *(*uint16)(ptr) != 0; n++ { ptr = unsafe.Pointer(uintptr(ptr) + unsafe.Sizeof(*p)) } - - return string(utf16.Decode(unsafe.Slice(p, n))) + return UTF16ToString(unsafe.Slice(p, n)) } func Getpagesize() int { return 4096 } diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go index b2a0b7c6..a8d7b06a 100644 --- a/vendor/golang.org/x/tools/go/packages/doc.go +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -15,22 +15,10 @@ Load passes most patterns directly to the underlying build tool. The default build tool is the go command. Its supported patterns are described at https://pkg.go.dev/cmd/go#hdr-Package_lists_and_patterns. +Other build systems may be supported by providing a "driver"; +see [The driver protocol]. -Load may be used in Go projects that use alternative build systems, by -installing an appropriate "driver" program for the build system and -specifying its location in the GOPACKAGESDRIVER environment variable. -For example, -https://github.com/bazelbuild/rules_go/wiki/Editor-and-tool-integration -explains how to use the driver for Bazel. -The driver program is responsible for interpreting patterns in its -preferred notation and reporting information about the packages that -they identify. -(See driverRequest and driverResponse types for the JSON -schema used by the protocol. -Though the protocol is supported, these types are currently unexported; -see #64608 for a proposal to publish them.) - -Regardless of driver, all patterns with the prefix "query=", where query is a +All patterns with the prefix "query=", where query is a non-empty string of letters from [a-z], are reserved and may be interpreted as query operators. @@ -86,7 +74,29 @@ for details. Most tools should pass their command-line arguments (after any flags) uninterpreted to [Load], so that it can interpret them according to the conventions of the underlying build system. + See the Example function for typical usage. + +# The driver protocol + +[Load] may be used to load Go packages even in Go projects that use +alternative build systems, by installing an appropriate "driver" +program for the build system and specifying its location in the +GOPACKAGESDRIVER environment variable. +For example, +https://github.com/bazelbuild/rules_go/wiki/Editor-and-tool-integration +explains how to use the driver for Bazel. + +The driver program is responsible for interpreting patterns in its +preferred notation and reporting information about the packages that +those patterns identify. Drivers must also support the special "file=" +and "pattern=" patterns described above. + +The patterns are provided as positional command-line arguments. A +JSON-encoded [DriverRequest] message providing additional information +is written to the driver's standard input. The driver must write a +JSON-encoded [DriverResponse] message to its standard output. (This +message differs from the JSON schema produced by 'go list'.) */ package packages // import "golang.org/x/tools/go/packages" diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go index 7db1d129..4335c1eb 100644 --- a/vendor/golang.org/x/tools/go/packages/external.go +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -2,12 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// This file enables an external tool to intercept package requests. -// If the tool is present then its results are used in preference to -// the go list command. - package packages +// This file defines the protocol that enables an external "driver" +// tool to supply package metadata in place of 'go list'. + import ( "bytes" "encoding/json" @@ -17,31 +16,71 @@ import ( "strings" ) -// The Driver Protocol +// DriverRequest defines the schema of a request for package metadata +// from an external driver program. The JSON-encoded DriverRequest +// message is provided to the driver program's standard input. The +// query patterns are provided as command-line arguments. // -// The driver, given the inputs to a call to Load, returns metadata about the packages specified. -// This allows for different build systems to support go/packages by telling go/packages how the -// packages' source is organized. -// The driver is a binary, either specified by the GOPACKAGESDRIVER environment variable or in -// the path as gopackagesdriver. It's given the inputs to load in its argv. See the package -// documentation in doc.go for the full description of the patterns that need to be supported. -// A driver receives as a JSON-serialized driverRequest struct in standard input and will -// produce a JSON-serialized driverResponse (see definition in packages.go) in its standard output. - -// driverRequest is used to provide the portion of Load's Config that is needed by a driver. -type driverRequest struct { +// See the package documentation for an overview. +type DriverRequest struct { Mode LoadMode `json:"mode"` + // Env specifies the environment the underlying build system should be run in. Env []string `json:"env"` + // BuildFlags are flags that should be passed to the underlying build system. BuildFlags []string `json:"build_flags"` + // Tests specifies whether the patterns should also return test packages. Tests bool `json:"tests"` + // Overlay maps file paths (relative to the driver's working directory) to the byte contents // of overlay files. Overlay map[string][]byte `json:"overlay"` } +// DriverResponse defines the schema of a response from an external +// driver program, providing the results of a query for package +// metadata. The driver program must write a JSON-encoded +// DriverResponse message to its standard output. +// +// See the package documentation for an overview. +type DriverResponse struct { + // NotHandled is returned if the request can't be handled by the current + // driver. If an external driver returns a response with NotHandled, the + // rest of the DriverResponse is ignored, and go/packages will fallback + // to the next driver. If go/packages is extended in the future to support + // lists of multiple drivers, go/packages will fall back to the next driver. + NotHandled bool + + // Compiler and Arch are the arguments pass of types.SizesFor + // to get a types.Sizes to use when type checking. + Compiler string + Arch string + + // Roots is the set of package IDs that make up the root packages. + // We have to encode this separately because when we encode a single package + // we cannot know if it is one of the roots as that requires knowledge of the + // graph it is part of. + Roots []string `json:",omitempty"` + + // Packages is the full set of packages in the graph. + // The packages are not connected into a graph. + // The Imports if populated will be stubs that only have their ID set. + // Imports will be connected and then type and syntax information added in a + // later pass (see refine). + Packages []*Package + + // GoVersion is the minor version number used by the driver + // (e.g. the go command on the PATH) when selecting .go files. + // Zero means unknown. + GoVersion int +} + +// driver is the type for functions that query the build system for the +// packages named by the patterns. +type driver func(cfg *Config, patterns ...string) (*DriverResponse, error) + // findExternalDriver returns the file path of a tool that supplies // the build system package structure, or "" if not found." // If GOPACKAGESDRIVER is set in the environment findExternalTool returns its @@ -64,8 +103,8 @@ func findExternalDriver(cfg *Config) driver { return nil } } - return func(cfg *Config, words ...string) (*driverResponse, error) { - req, err := json.Marshal(driverRequest{ + return func(cfg *Config, words ...string) (*DriverResponse, error) { + req, err := json.Marshal(DriverRequest{ Mode: cfg.Mode, Env: cfg.Env, BuildFlags: cfg.BuildFlags, @@ -92,7 +131,7 @@ func findExternalDriver(cfg *Config) driver { fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd), stderr) } - var response driverResponse + var response DriverResponse if err := json.Unmarshal(buf.Bytes(), &response); err != nil { return nil, err } diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index cd375fbc..22305d9c 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -35,23 +35,23 @@ type goTooOldError struct { error } -// responseDeduper wraps a driverResponse, deduplicating its contents. +// responseDeduper wraps a DriverResponse, deduplicating its contents. type responseDeduper struct { seenRoots map[string]bool seenPackages map[string]*Package - dr *driverResponse + dr *DriverResponse } func newDeduper() *responseDeduper { return &responseDeduper{ - dr: &driverResponse{}, + dr: &DriverResponse{}, seenRoots: map[string]bool{}, seenPackages: map[string]*Package{}, } } -// addAll fills in r with a driverResponse. -func (r *responseDeduper) addAll(dr *driverResponse) { +// addAll fills in r with a DriverResponse. +func (r *responseDeduper) addAll(dr *DriverResponse) { for _, pkg := range dr.Packages { r.addPackage(pkg) } @@ -128,7 +128,7 @@ func (state *golistState) mustGetEnv() map[string]string { // goListDriver uses the go list command to interpret the patterns and produce // the build system package structure. // See driver for more details. -func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { +func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error) { // Make sure that any asynchronous go commands are killed when we return. parentCtx := cfg.Context if parentCtx == nil { @@ -146,16 +146,18 @@ func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { } // Fill in response.Sizes asynchronously if necessary. - var sizeserr error - var sizeswg sync.WaitGroup if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { - sizeswg.Add(1) + errCh := make(chan error) go func() { compiler, arch, err := packagesdriver.GetSizesForArgsGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner) - sizeserr = err response.dr.Compiler = compiler response.dr.Arch = arch - sizeswg.Done() + errCh <- err + }() + defer func() { + if sizesErr := <-errCh; sizesErr != nil { + err = sizesErr + } }() } @@ -208,10 +210,7 @@ extractQueries: } } - sizeswg.Wait() - if sizeserr != nil { - return nil, sizeserr - } + // (We may yet return an error due to defer.) return response.dr, nil } @@ -266,7 +265,7 @@ func (state *golistState) runContainsQueries(response *responseDeduper, queries // adhocPackage attempts to load or construct an ad-hoc package for a given // query, if the original call to the driver produced inadequate results. -func (state *golistState) adhocPackage(pattern, query string) (*driverResponse, error) { +func (state *golistState) adhocPackage(pattern, query string) (*DriverResponse, error) { response, err := state.createDriverResponse(query) if err != nil { return nil, err @@ -357,7 +356,7 @@ func otherFiles(p *jsonPackage) [][]string { // createDriverResponse uses the "go list" command to expand the pattern // words and return a response for the specified packages. -func (state *golistState) createDriverResponse(words ...string) (*driverResponse, error) { +func (state *golistState) createDriverResponse(words ...string) (*DriverResponse, error) { // go list uses the following identifiers in ImportPath and Imports: // // "p" -- importable package or main (command) @@ -384,7 +383,7 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse pkgs := make(map[string]*Package) additionalErrors := make(map[string][]Error) // Decode the JSON and convert it to Package form. - response := &driverResponse{ + response := &DriverResponse{ GoVersion: goVersion, } for dec := json.NewDecoder(buf); dec.More(); { diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 81e9e6a7..f33b0afc 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -206,43 +206,6 @@ type Config struct { Overlay map[string][]byte } -// driver is the type for functions that query the build system for the -// packages named by the patterns. -type driver func(cfg *Config, patterns ...string) (*driverResponse, error) - -// driverResponse contains the results for a driver query. -type driverResponse struct { - // NotHandled is returned if the request can't be handled by the current - // driver. If an external driver returns a response with NotHandled, the - // rest of the driverResponse is ignored, and go/packages will fallback - // to the next driver. If go/packages is extended in the future to support - // lists of multiple drivers, go/packages will fall back to the next driver. - NotHandled bool - - // Compiler and Arch are the arguments pass of types.SizesFor - // to get a types.Sizes to use when type checking. - Compiler string - Arch string - - // Roots is the set of package IDs that make up the root packages. - // We have to encode this separately because when we encode a single package - // we cannot know if it is one of the roots as that requires knowledge of the - // graph it is part of. - Roots []string `json:",omitempty"` - - // Packages is the full set of packages in the graph. - // The packages are not connected into a graph. - // The Imports if populated will be stubs that only have their ID set. - // Imports will be connected and then type and syntax information added in a - // later pass (see refine). - Packages []*Package - - // GoVersion is the minor version number used by the driver - // (e.g. the go command on the PATH) when selecting .go files. - // Zero means unknown. - GoVersion int -} - // Load loads and returns the Go packages named by the given patterns. // // Config specifies loading options; @@ -291,7 +254,7 @@ func Load(cfg *Config, patterns ...string) ([]*Package, error) { // no external driver, or the driver returns a response with NotHandled set, // defaultDriver will fall back to the go list driver. // The boolean result indicates that an external driver handled the request. -func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, bool, error) { +func defaultDriver(cfg *Config, patterns ...string) (*DriverResponse, bool, error) { if driver := findExternalDriver(cfg); driver != nil { response, err := driver(cfg, patterns...) if err != nil { @@ -303,7 +266,10 @@ func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, bool, erro } response, err := goListDriver(cfg, patterns...) - return response, false, err + if err != nil { + return nil, false, err + } + return response, false, nil } // A Package describes a loaded Go package. @@ -648,7 +614,7 @@ func newLoader(cfg *Config) *loader { // refine connects the supplied packages into a graph and then adds type // and syntax information as requested by the LoadMode. -func (ld *loader) refine(response *driverResponse) ([]*Package, error) { +func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { roots := response.Roots rootMap := make(map[string]int, len(roots)) for i, root := range roots { diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 9bde15e3..9fffa9ad 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -224,6 +224,7 @@ func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte // Gather the relevant packages from the manifest. items := make([]GetPackagesItem, r.uint64()) + uniquePkgPaths := make(map[string]bool) for i := range items { pkgPathOff := r.uint64() pkgPath := p.stringAt(pkgPathOff) @@ -248,6 +249,12 @@ func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte } items[i].nameIndex = nameIndex + + uniquePkgPaths[pkgPath] = true + } + // Debugging #63822; hypothesis: there are duplicate PkgPaths. + if len(uniquePkgPaths) != len(items) { + reportf("found duplicate PkgPaths while reading export data manifest: %v", items) } // Request packages all at once from the client, diff --git a/vendor/golang.org/x/tools/internal/versions/versions_go121.go b/vendor/golang.org/x/tools/internal/versions/versions.go similarity index 80% rename from vendor/golang.org/x/tools/internal/versions/versions_go121.go rename to vendor/golang.org/x/tools/internal/versions/versions.go index cf4a7d03..e16f6c33 100644 --- a/vendor/golang.org/x/tools/internal/versions/versions_go121.go +++ b/vendor/golang.org/x/tools/internal/versions/versions.go @@ -2,11 +2,14 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !go1.22 -// +build !go1.22 - package versions +// Note: If we use build tags to use go/versions when go >=1.22, +// we run into go.dev/issue/53737. Under some operations users would see an +// import of "go/versions" even if they would not compile the file. +// For example, during `go get -u ./...` (go.dev/issue/64490) we do not try to include +// For this reason, this library just a clone of go/versions for the moment. + // Lang returns the Go language version for version x. // If x is not a valid version, Lang returns the empty string. // For example: diff --git a/vendor/golang.org/x/tools/internal/versions/versions_go122.go b/vendor/golang.org/x/tools/internal/versions/versions_go122.go deleted file mode 100644 index c1c1814b..00000000 --- a/vendor/golang.org/x/tools/internal/versions/versions_go122.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.22 -// +build go1.22 - -package versions - -import ( - "go/version" -) - -// Lang returns the Go language version for version x. -// If x is not a valid version, Lang returns the empty string. -// For example: -// -// Lang("go1.21rc2") = "go1.21" -// Lang("go1.21.2") = "go1.21" -// Lang("go1.21") = "go1.21" -// Lang("go1") = "go1" -// Lang("bad") = "" -// Lang("1.21") = "" -func Lang(x string) string { return version.Lang(x) } - -// Compare returns -1, 0, or +1 depending on whether -// x < y, x == y, or x > y, interpreted as Go versions. -// The versions x and y must begin with a "go" prefix: "go1.21" not "1.21". -// Invalid versions, including the empty string, compare less than -// valid versions and equal to each other. -// The language version "go1.21" compares less than the -// release candidate and eventual releases "go1.21rc1" and "go1.21.0". -// Custom toolchain suffixes are ignored during comparison: -// "go1.21.0" and "go1.21.0-bigcorp" are equal. -func Compare(x, y string) int { return version.Compare(x, y) } - -// IsValid reports whether the version x is valid. -func IsValid(x string) bool { return version.IsValid(x) } diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index 1bc92248..ab0fbb79 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -1,8 +1,8 @@ # gRPC-Go -[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://pkg.go.dev/badge/google.golang.org/grpc)][API] [![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) +[![codecov](https://codecov.io/gh/grpc/grpc-go/graph/badge.svg)](https://codecov.io/gh/grpc/grpc-go) The [Go][] implementation of [gRPC][]: A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go index 712fef4d..52d530d7 100644 --- a/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -121,9 +121,9 @@ func (a *Attributes) String() string { return sb.String() } -func str(x any) string { +func str(x any) (s string) { if v, ok := x.(fmt.Stringer); ok { - return v.String() + return fmt.Sprint(v) } else if v, ok := x.(string); ok { return v } diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index b6377f44..d79560a2 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -30,6 +30,7 @@ import ( "google.golang.org/grpc/channelz" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" @@ -39,6 +40,8 @@ import ( var ( // m is a map from name to balancer builder. m = make(map[string]Builder) + + logger = grpclog.Component("balancer") ) // Register registers the balancer builder to the balancer map. b.Name @@ -51,6 +54,12 @@ var ( // an init() function), and is not thread-safe. If multiple Balancers are // registered with the same name, the one registered last will take effect. func Register(b Builder) { + if strings.ToLower(b.Name()) != b.Name() { + // TODO: Skip the use of strings.ToLower() to index the map after v1.59 + // is released to switch to case sensitive balancer registry. Also, + // remove this warning and update the docstrings for Register and Get. + logger.Warningf("Balancer registered with name %q. grpc-go will be switching to case sensitive balancer registries soon", b.Name()) + } m[strings.ToLower(b.Name())] = b } @@ -70,6 +79,12 @@ func init() { // Note that the compare is done in a case-insensitive fashion. // If no builder is register with the name, nil will be returned. func Get(name string) Builder { + if strings.ToLower(name) != name { + // TODO: Skip the use of strings.ToLower() to index the map after v1.59 + // is released to switch to case sensitive balancer registry. Also, + // remove this warning and update the docstrings for Register and Get. + logger.Warningf("Balancer retrieved for name %q. grpc-go will be switching to case sensitive balancer registries soon", name) + } if b, ok := m[strings.ToLower(name)]; ok { return b } diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index ff7fea10..429c389e 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -337,8 +337,8 @@ func (cc *ClientConn) exitIdleMode() error { return errConnClosing } if cc.idlenessState != ccIdlenessStateIdle { - cc.mu.Unlock() channelz.Infof(logger, cc.channelzID, "ClientConn asked to exit idle mode, current mode is %v", cc.idlenessState) + cc.mu.Unlock() return nil } @@ -404,13 +404,13 @@ func (cc *ClientConn) exitIdleMode() error { // name resolver, load balancer and any subchannels. func (cc *ClientConn) enterIdleMode() error { cc.mu.Lock() + defer cc.mu.Unlock() + if cc.conns == nil { - cc.mu.Unlock() return ErrClientConnClosing } if cc.idlenessState != ccIdlenessStateActive { - channelz.Errorf(logger, cc.channelzID, "ClientConn asked to enter idle mode, current mode is %v", cc.idlenessState) - cc.mu.Unlock() + channelz.Warningf(logger, cc.channelzID, "ClientConn asked to enter idle mode, current mode is %v", cc.idlenessState) return nil } @@ -431,14 +431,14 @@ func (cc *ClientConn) enterIdleMode() error { cc.balancerWrapper.enterIdleMode() cc.csMgr.updateState(connectivity.Idle) cc.idlenessState = ccIdlenessStateIdle - cc.mu.Unlock() + cc.addTraceEvent("entering idle mode") go func() { - cc.addTraceEvent("entering idle mode") for ac := range conns { ac.tearDown(errConnIdling) } }() + return nil } @@ -804,6 +804,12 @@ func init() { internal.SubscribeToConnectivityStateChanges = func(cc *ClientConn, s grpcsync.Subscriber) func() { return cc.csMgr.pubSub.Subscribe(s) } + internal.EnterIdleModeForTesting = func(cc *ClientConn) error { + return cc.enterIdleMode() + } + internal.ExitIdleModeForTesting = func(cc *ClientConn) error { + return cc.exitIdleMode() + } } func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 1fd0d5c1..cfc9fd85 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -644,6 +644,7 @@ func defaultDialOptions() dialOptions { UseProxy: true, }, recvBufferPool: nopBufferPool{}, + idleTimeout: 30 * time.Minute, } } @@ -680,8 +681,8 @@ func WithResolvers(rs ...resolver.Builder) DialOption { // channel will exit idle mode when the Connect() method is called or when an // RPC is initiated. // -// By default this feature is disabled, which can also be explicitly configured -// by passing zero to this function. +// A default timeout of 30 minutes will be used if this dial option is not set +// at dial time and idleness can be disabled by passing a timeout of zero. // // # Experimental // diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 69d5580b..5ebf88d7 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -38,6 +38,10 @@ const Identity = "identity" // Compressor is used for compressing and decompressing when sending or // receiving messages. +// +// If a Compressor implements `DecompressedSize(compressedBytes []byte) int`, +// gRPC will invoke it to determine the size of the buffer allocated for the +// result of decompression. A return value of -1 indicates unknown size. type Compressor interface { // Compress writes the data written to wc to w after compressing it. If an // error occurs while initializing the compressor, that error is returned @@ -51,15 +55,6 @@ type Compressor interface { // coding header. The result must be static; the result cannot change // between calls. Name() string - // If a Compressor implements - // DecompressedSize(compressedBytes []byte) int, gRPC will call it - // to determine the size of the buffer allocated for the result of decompression. - // Return -1 to indicate unknown size. - // - // Experimental - // - // Notice: This API is EXPERIMENTAL and may be changed or removed in a - // later release. } var registeredCompressor = make(map[string]Compressor) diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go index 5fc0ee3d..fed1c011 100644 --- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -23,6 +23,8 @@ package backoff import ( + "context" + "errors" "time" grpcbackoff "google.golang.org/grpc/backoff" @@ -71,3 +73,37 @@ func (bc Exponential) Backoff(retries int) time.Duration { } return time.Duration(backoff) } + +// ErrResetBackoff is the error to be returned by the function executed by RunF, +// to instruct the latter to reset its backoff state. +var ErrResetBackoff = errors.New("reset backoff state") + +// RunF provides a convenient way to run a function f repeatedly until the +// context expires or f returns a non-nil error that is not ErrResetBackoff. +// When f returns ErrResetBackoff, RunF continues to run f, but resets its +// backoff state before doing so. backoff accepts an integer representing the +// number of retries, and returns the amount of time to backoff. +func RunF(ctx context.Context, f func() error, backoff func(int) time.Duration) { + attempt := 0 + timer := time.NewTimer(0) + for ctx.Err() == nil { + select { + case <-timer.C: + case <-ctx.Done(): + timer.Stop() + return + } + + err := f() + if errors.Is(err, ErrResetBackoff) { + timer.Reset(0) + attempt = 0 + continue + } + if err != nil { + return + } + timer.Reset(backoff(attempt)) + attempt++ + } +} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index c8a8c76d..0d94c63e 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -175,6 +175,12 @@ var ( // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra // metadata to RPCs. GRPCResolverSchemeExtraMetadata string = "xds" + + // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode. + EnterIdleModeForTesting any // func(*grpc.ClientConn) error + + // ExitIdleModeForTesting gets the ClientConn to exit IDLE mode. + ExitIdleModeForTesting any // func(*grpc.ClientConn) error ) // HealthChecker defines the signature of the client-side LB channel health checking function. diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index 4cf85cad..03ef2fed 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -43,6 +43,34 @@ type Status struct { s *spb.Status } +// NewWithProto returns a new status including details from statusProto. This +// is meant to be used by the gRPC library only. +func NewWithProto(code codes.Code, message string, statusProto []string) *Status { + if len(statusProto) != 1 { + // No grpc-status-details bin header, or multiple; just ignore. + return &Status{s: &spb.Status{Code: int32(code), Message: message}} + } + st := &spb.Status{} + if err := proto.Unmarshal([]byte(statusProto[0]), st); err != nil { + // Probably not a google.rpc.Status proto; do not provide details. + return &Status{s: &spb.Status{Code: int32(code), Message: message}} + } + if st.Code == int32(code) { + // The codes match between the grpc-status header and the + // grpc-status-details-bin header; use the full details proto. + return &Status{s: st} + } + return &Status{ + s: &spb.Status{ + Code: int32(codes.Internal), + Message: fmt.Sprintf( + "grpc-status-details-bin mismatch: grpc-status=%v, grpc-message=%q, grpc-status-details-bin=%+v", + code, message, st, + ), + }, + } +} + // New returns a Status representing c and msg. func New(c codes.Code, msg string) *Status { return &Status{s: &spb.Status{Code: int32(c), Message: msg}} diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 98f80e3f..17f7a21b 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -220,18 +220,20 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro h.Set("Grpc-Message", encodeGrpcMessage(m)) } + s.hdrMu.Lock() if p := st.Proto(); p != nil && len(p.Details) > 0 { + delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. panic(err) } - h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes)) + h.Set(grpcStatusDetailsBinHeader, encodeBinHeader(stBytes)) } - if md := s.Trailer(); len(md) > 0 { - for k, vv := range md { + if len(s.trailer) > 0 { + for k, vv := range s.trailer { // Clients don't tolerate reading restricted headers after some non restricted ones were sent. if isReservedHeader(k) { continue @@ -243,6 +245,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } } } + s.hdrMu.Unlock() }) if err == nil { // transport has not been closed @@ -287,7 +290,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { } // writeCustomHeaders sets custom headers set on the stream via SetHeader -// on the first write call (Write, WriteHeader, or WriteStatus). +// on the first write call (Write, WriteHeader, or WriteStatus) func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { h := ht.rw.Header() @@ -344,7 +347,7 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { return err } -func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) { +func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) { // With this transport type there will be exactly 1 stream: this HTTP request. ctx := ht.req.Context() diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index badab8ac..d6f5c493 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -1399,7 +1399,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { mdata = make(map[string][]string) contentTypeErr = "malformed header: missing HTTP content-type" grpcMessage string - statusGen *status.Status recvCompress string httpStatusCode *int httpStatusErr string @@ -1434,12 +1433,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { rawStatusCode = codes.Code(uint32(code)) case "grpc-message": grpcMessage = decodeGrpcMessage(hf.Value) - case "grpc-status-details-bin": - var err error - statusGen, err = decodeGRPCStatusDetails(hf.Value) - if err != nil { - headerError = fmt.Sprintf("transport: malformed grpc-status-details-bin: %v", err) - } case ":status": if hf.Value == "200" { httpStatusErr = "" @@ -1548,14 +1541,12 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } - if statusGen == nil { - statusGen = status.New(rawStatusCode, grpcMessage) - } + status := istatus.NewWithProto(rawStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader]) // If client received END_STREAM from server while stream was still active, // send RST_STREAM. rstStream := s.getState() == streamActive - t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, statusGen, mdata, true) + t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, status, mdata, true) } // readServerPreface reads and handles the initial settings frame from the diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index c06db679..6fa1eb41 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -342,7 +342,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, // operateHeaders takes action on the decoded headers. Returns an error if fatal // error encountered and transport needs to close, otherwise returns nil. -func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) error { +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream)) error { // Acquire max stream ID lock for entire duration t.maxStreamMu.Lock() defer t.maxStreamMu.Unlock() @@ -561,7 +561,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } if t.inTapHandle != nil { var err error - if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil { + if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method, Header: mdata}); err != nil { t.mu.Unlock() if t.logger.V(logLevel) { t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err) @@ -592,7 +592,6 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( s.requestRead = func(n int) { t.adjustWindow(s, uint32(n)) } - s.ctx = traceCtx(s.ctx, s.method) for _, sh := range t.stats { s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) inHeader := &stats.InHeader{ @@ -630,7 +629,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( // HandleStreams receives incoming streams using the given handler. This is // typically run in a separate goroutine. // traceCtx attaches trace to ctx and returns the new context. -func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { +func (t *http2Server) HandleStreams(handle func(*Stream)) { defer close(t.readerDone) for { t.controlBuf.throttle() @@ -665,7 +664,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. } switch frame := frame.(type) { case *http2.MetaHeadersFrame: - if err := t.operateHeaders(frame, handle, traceCtx); err != nil { + if err := t.operateHeaders(frame, handle); err != nil { t.Close(err) break } @@ -1053,12 +1052,15 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) if p := st.Proto(); p != nil && len(p.Details) > 0 { + // Do not use the user's grpc-status-details-bin (if present) if we are + // even attempting to set our own. + delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err) } else { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) + headerFields = append(headerFields, hpack.HeaderField{Name: grpcStatusDetailsBinHeader, Value: encodeBinHeader(stBytes)}) } } diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 19581400..dc29d590 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -34,12 +34,9 @@ import ( "time" "unicode/utf8" - "github.com/golang/protobuf/proto" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" - spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" ) const ( @@ -88,6 +85,8 @@ var ( } ) +var grpcStatusDetailsBinHeader = "grpc-status-details-bin" + // isReservedHeader checks whether hdr belongs to HTTP2 headers // reserved by gRPC protocol. Any other headers are classified as the // user-specified metadata. @@ -103,7 +102,6 @@ func isReservedHeader(hdr string) bool { "grpc-message", "grpc-status", "grpc-timeout", - "grpc-status-details-bin", // Intentionally exclude grpc-previous-rpc-attempts and // grpc-retry-pushback-ms, which are "reserved", but their API // intentionally works via metadata. @@ -154,18 +152,6 @@ func decodeMetadataHeader(k, v string) (string, error) { return v, nil } -func decodeGRPCStatusDetails(rawDetails string) (*status.Status, error) { - v, err := decodeBinHeader(rawDetails) - if err != nil { - return nil, err - } - st := &spb.Status{} - if err = proto.Unmarshal(v, st); err != nil { - return nil, err - } - return status.FromProto(st), nil -} - type timeoutUnit uint8 const ( diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 74a811fc..aac056e7 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -698,7 +698,7 @@ type ClientTransport interface { // Write methods for a given Stream will be called serially. type ServerTransport interface { // HandleStreams receives incoming streams using the given handler. - HandleStreams(func(*Stream), func(context.Context, string) context.Context) + HandleStreams(func(*Stream)) // WriteHeader sends the header metadata for the given stream. // WriteHeader may not be called on all streams. diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index eeae92fb..8f60d421 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -983,7 +983,7 @@ func (s *Server) serveStreams(st transport.ServerTransport) { f := func() { defer streamQuota.release() defer wg.Done() - s.handleStream(st, stream, s.traceInfo(st, stream)) + s.handleStream(st, stream) } if s.opts.numServerWorkers > 0 { @@ -995,12 +995,6 @@ func (s *Server) serveStreams(st transport.ServerTransport) { } } go f() - }, func(ctx context.Context, method string) context.Context { - if !EnableTracing { - return ctx - } - tr := trace.New("grpc.Recv."+methodFamily(method), method) - return trace.NewContext(ctx, tr) }) wg.Wait() } @@ -1049,30 +1043,6 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { s.serveStreams(st) } -// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. -// If tracing is not enabled, it returns nil. -func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { - if !EnableTracing { - return nil - } - tr, ok := trace.FromContext(stream.Context()) - if !ok { - return nil - } - - trInfo = &traceInfo{ - tr: tr, - firstLine: firstLine{ - client: false, - remoteAddr: st.RemoteAddr(), - }, - } - if dl, ok := stream.Context().Deadline(); ok { - trInfo.firstLine.deadline = time.Until(dl) - } - return trInfo -} - func (s *Server) addConn(addr string, st transport.ServerTransport) bool { s.mu.Lock() defer s.mu.Unlock() @@ -1133,7 +1103,7 @@ func (s *Server) incrCallsFailed() { atomic.AddInt64(&s.czData.callsFailed, 1) } -func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { +func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err) @@ -1152,7 +1122,7 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str err = t.Write(stream, hdr, payload, opts) if err == nil { for _, sh := range s.opts.statsHandlers { - sh.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + sh.HandleRPC(ctx, outPayload(false, msg, data, payload, time.Now())) } } return err @@ -1194,7 +1164,7 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info } } -func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { +func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { shs := s.opts.statsHandlers if len(shs) != 0 || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { @@ -1208,7 +1178,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. IsClientStream: false, IsServerStream: false, } - sh.HandleRPC(stream.Context(), statsBegin) + sh.HandleRPC(ctx, statsBegin) } if trInfo != nil { trInfo.tr.LazyLog(&trInfo.firstLine, false) @@ -1240,7 +1210,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if err != nil && err != io.EOF { end.Error = toRPCErr(err) } - sh.HandleRPC(stream.Context(), end) + sh.HandleRPC(ctx, end) } if channelz.IsOn() { @@ -1262,7 +1232,6 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } } if len(binlogs) != 0 { - ctx := stream.Context() md, _ := metadata.FromIncomingContext(ctx) logEntry := &binarylog.ClientHeader{ Header: md, @@ -1348,7 +1317,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } for _, sh := range shs { - sh.HandleRPC(stream.Context(), &stats.InPayload{ + sh.HandleRPC(ctx, &stats.InPayload{ RecvTime: time.Now(), Payload: v, Length: len(d), @@ -1362,7 +1331,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Message: d, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), cm) + binlog.Log(ctx, cm) } } if trInfo != nil { @@ -1370,7 +1339,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } return nil } - ctx := NewContextWithServerTransportStream(stream.Context(), stream) + ctx = NewContextWithServerTransportStream(ctx, stream) reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt) if appErr != nil { appStatus, ok := status.FromError(appErr) @@ -1395,7 +1364,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Header: h, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), sh) + binlog.Log(ctx, sh) } } st := &binarylog.ServerTrailer{ @@ -1403,7 +1372,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } return appErr @@ -1418,7 +1387,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if stream.SendCompress() != sendCompressorName { comp = encoding.GetCompressor(stream.SendCompress()) } - if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { + if err := s.sendResponse(ctx, t, stream, reply, cp, opts, comp); err != nil { if err == io.EOF { // The entire stream is done (for unary RPC only). return err @@ -1445,8 +1414,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), sh) - binlog.Log(stream.Context(), st) + binlog.Log(ctx, sh) + binlog.Log(ctx, st) } } return err @@ -1460,8 +1429,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Message: reply, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), sh) - binlog.Log(stream.Context(), sm) + binlog.Log(ctx, sh) + binlog.Log(ctx, sm) } } if channelz.IsOn() { @@ -1479,7 +1448,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } return t.WriteStatus(stream, statusOK) @@ -1521,7 +1490,7 @@ func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, inf } } -func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { +func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { if channelz.IsOn() { s.incrCallsStarted() } @@ -1535,10 +1504,10 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp IsServerStream: sd.ServerStreams, } for _, sh := range shs { - sh.HandleRPC(stream.Context(), statsBegin) + sh.HandleRPC(ctx, statsBegin) } } - ctx := NewContextWithServerTransportStream(stream.Context(), stream) + ctx = NewContextWithServerTransportStream(ctx, stream) ss := &serverStream{ ctx: ctx, t: t, @@ -1574,7 +1543,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp end.Error = toRPCErr(err) } for _, sh := range shs { - sh.HandleRPC(stream.Context(), end) + sh.HandleRPC(ctx, end) } } @@ -1616,7 +1585,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp logEntry.PeerAddr = peer.Addr } for _, binlog := range ss.binlogs { - binlog.Log(stream.Context(), logEntry) + binlog.Log(ctx, logEntry) } } @@ -1694,7 +1663,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp Err: appErr, } for _, binlog := range ss.binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } t.WriteStatus(ss.s, appStatus) @@ -1712,33 +1681,50 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp Err: appErr, } for _, binlog := range ss.binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } return t.WriteStatus(ss.s, statusOK) } -func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { + ctx := stream.Context() + var ti *traceInfo + if EnableTracing { + tr := trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()) + ctx = trace.NewContext(ctx, tr) + ti = &traceInfo{ + tr: tr, + firstLine: firstLine{ + client: false, + remoteAddr: t.RemoteAddr(), + }, + } + if dl, ok := ctx.Deadline(); ok { + ti.firstLine.deadline = time.Until(dl) + } + } + sm := stream.Method() if sm != "" && sm[0] == '/' { sm = sm[1:] } pos := strings.LastIndex(sm, "/") if pos == -1 { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true) + ti.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ti.tr.SetError() } channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } - if trInfo != nil { - trInfo.tr.Finish() + if ti != nil { + ti.tr.Finish() } return } @@ -1748,17 +1734,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str srv, knownService := s.services[service] if knownService { if md, ok := srv.methods[method]; ok { - s.processUnaryRPC(t, stream, srv, md, trInfo) + s.processUnaryRPC(ctx, t, stream, srv, md, ti) return } if sd, ok := srv.streams[method]; ok { - s.processStreamingRPC(t, stream, srv, sd, trInfo) + s.processStreamingRPC(ctx, t, stream, srv, sd, ti) return } } // Unknown service, or known server unknown method. if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { - s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) + s.processStreamingRPC(ctx, t, stream, nil, unknownDesc, ti) return } var errDesc string @@ -1767,19 +1753,19 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str } else { errDesc = fmt.Sprintf("unknown method %v for service %v", method, service) } - if trInfo != nil { - trInfo.tr.LazyPrintf("%s", errDesc) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyPrintf("%s", errDesc) + ti.tr.SetError() } if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ti.tr.SetError() } channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } - if trInfo != nil { - trInfo.tr.Finish() + if ti != nil { + ti.tr.Finish() } } diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go index bfa5dfa4..07f01257 100644 --- a/vendor/google.golang.org/grpc/tap/tap.go +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -27,6 +27,8 @@ package tap import ( "context" + + "google.golang.org/grpc/metadata" ) // Info defines the relevant information needed by the handles. @@ -34,6 +36,10 @@ type Info struct { // FullMethodName is the string of grpc method (in the format of // /package.service/method). FullMethodName string + + // Header contains the header metadata received. + Header metadata.MD + // TODO: More to be added. } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 724ad210..6d2cadd7 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.58.3" +const Version = "1.59.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index bbc9e2e3..bb480f1f 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -93,6 +93,9 @@ git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpc # - Ensure all ptypes proto packages are renamed when importing. not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" +# - Ensure all usages of grpc_testing package are renamed when importing. +not git grep "\(import \|^\s*\)\"google.golang.org/grpc/interop/grpc_testing" -- "*.go" + # - Ensure all xds proto imports are renamed to *pb or *grpc. git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' diff --git a/vendor/modules.txt b/vendor/modules.txt index 4d903dc7..e2683ba2 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -18,7 +18,7 @@ github.com/Microsoft/go-winio/internal/socket github.com/Microsoft/go-winio/internal/stringbuffer github.com/Microsoft/go-winio/pkg/guid github.com/Microsoft/go-winio/vhd -# github.com/Microsoft/hcsshim v0.12.0-rc.1 +# github.com/Microsoft/hcsshim v0.12.0-rc.3 ## explicit; go 1.18 github.com/Microsoft/hcsshim github.com/Microsoft/hcsshim/computestorage @@ -80,12 +80,15 @@ github.com/chzyer/readline # github.com/containerd/cgroups/v3 v3.0.2 ## explicit; go 1.18 github.com/containerd/cgroups/v3/cgroup1/stats -# github.com/containerd/containerd v1.7.9 +# github.com/containerd/containerd v1.7.13 ## explicit; go 1.19 github.com/containerd/containerd/errdefs github.com/containerd/containerd/log github.com/containerd/containerd/pkg/userns github.com/containerd/containerd/platforms +# github.com/containerd/errdefs v0.1.0 +## explicit; go 1.20 +github.com/containerd/errdefs # github.com/containerd/log v0.1.0 ## explicit; go 1.20 github.com/containerd/log @@ -108,7 +111,7 @@ github.com/containernetworking/cni/pkg/types/create github.com/containernetworking/cni/pkg/types/internal github.com/containernetworking/cni/pkg/utils github.com/containernetworking/cni/pkg/version -# github.com/containernetworking/plugins v1.3.0 +# github.com/containernetworking/plugins v1.4.0 ## explicit; go 1.20 github.com/containernetworking/plugins/pkg/ns # github.com/containers/buildah v1.33.5 @@ -137,8 +140,9 @@ github.com/containers/buildah/pkg/rusage github.com/containers/buildah/pkg/sshagent github.com/containers/buildah/pkg/util github.com/containers/buildah/util -# github.com/containers/common v0.57.4 -## explicit; go 1.18 +# github.com/containers/common v0.58.0 +## explicit; go 1.20 +github.com/containers/common/internal github.com/containers/common/internal/attributedstring github.com/containers/common/libimage github.com/containers/common/libimage/define @@ -147,6 +151,7 @@ github.com/containers/common/libimage/manifests github.com/containers/common/libimage/platform github.com/containers/common/libnetwork/cni github.com/containers/common/libnetwork/etchosts +github.com/containers/common/libnetwork/internal/rootlessnetns github.com/containers/common/libnetwork/internal/util github.com/containers/common/libnetwork/netavark github.com/containers/common/libnetwork/network @@ -191,6 +196,7 @@ github.com/containers/common/pkg/ssh github.com/containers/common/pkg/subscriptions github.com/containers/common/pkg/supplemented github.com/containers/common/pkg/sysinfo +github.com/containers/common/pkg/systemd github.com/containers/common/pkg/timetype github.com/containers/common/pkg/umask github.com/containers/common/pkg/util @@ -199,7 +205,7 @@ github.com/containers/common/version # github.com/containers/conmon v2.0.20+incompatible ## explicit github.com/containers/conmon/runner/config -# github.com/containers/image/v5 v5.29.2 +# github.com/containers/image/v5 v5.30.0 ## explicit; go 1.19 github.com/containers/image/v5/copy github.com/containers/image/v5/directory @@ -383,8 +389,8 @@ github.com/containers/psgo/internal/dev github.com/containers/psgo/internal/host github.com/containers/psgo/internal/proc github.com/containers/psgo/internal/process -# github.com/containers/storage v1.51.0 -## explicit; go 1.19 +# github.com/containers/storage v1.53.0 +## explicit; go 1.20 github.com/containers/storage github.com/containers/storage/drivers github.com/containers/storage/drivers/aufs @@ -404,12 +410,14 @@ github.com/containers/storage/pkg/chunked github.com/containers/storage/pkg/chunked/compressor github.com/containers/storage/pkg/chunked/dump github.com/containers/storage/pkg/chunked/internal +github.com/containers/storage/pkg/chunked/toc github.com/containers/storage/pkg/config github.com/containers/storage/pkg/devicemapper github.com/containers/storage/pkg/directory github.com/containers/storage/pkg/dmesg github.com/containers/storage/pkg/fileutils github.com/containers/storage/pkg/fsutils +github.com/containers/storage/pkg/fsverity github.com/containers/storage/pkg/homedir github.com/containers/storage/pkg/idmap github.com/containers/storage/pkg/idtools @@ -443,7 +451,7 @@ github.com/coreos/go-systemd/v22/dbus github.com/coreos/go-systemd/v22/internal/dlopen github.com/coreos/go-systemd/v22/journal github.com/coreos/go-systemd/v22/sdjournal -# github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 +# github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f ## explicit github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer # github.com/cyphar/filepath-securejoin v0.2.4 @@ -460,15 +468,15 @@ github.com/disiqueira/gotree/v3 github.com/distribution/reference # github.com/docker/distribution v2.8.3+incompatible ## explicit -github.com/docker/distribution/reference github.com/docker/distribution/registry/api/errcode github.com/docker/distribution/registry/api/v2 github.com/docker/distribution/registry/client/auth/challenge -# github.com/docker/docker v24.0.7+incompatible +# github.com/docker/docker v25.0.3+incompatible ## explicit github.com/docker/docker/api github.com/docker/docker/api/types github.com/docker/docker/api/types/blkiodev +github.com/docker/docker/api/types/checkpoint github.com/docker/docker/api/types/container github.com/docker/docker/api/types/events github.com/docker/docker/api/types/filters @@ -479,11 +487,14 @@ github.com/docker/docker/api/types/registry github.com/docker/docker/api/types/strslice github.com/docker/docker/api/types/swarm github.com/docker/docker/api/types/swarm/runtime +github.com/docker/docker/api/types/system github.com/docker/docker/api/types/time github.com/docker/docker/api/types/versions github.com/docker/docker/api/types/volume github.com/docker/docker/client github.com/docker/docker/errdefs +github.com/docker/docker/image/spec/specs-go/v1 +github.com/docker/docker/internal/multierror github.com/docker/docker/pkg/archive github.com/docker/docker/pkg/homedir github.com/docker/docker/pkg/idtools @@ -494,15 +505,14 @@ github.com/docker/docker/pkg/meminfo github.com/docker/docker/pkg/namesgenerator github.com/docker/docker/pkg/parsers github.com/docker/docker/pkg/pools -github.com/docker/docker/pkg/process github.com/docker/docker/pkg/stdcopy github.com/docker/docker/pkg/system -# github.com/docker/docker-credential-helpers v0.8.0 +# github.com/docker/docker-credential-helpers v0.8.1 ## explicit; go 1.19 github.com/docker/docker-credential-helpers/client github.com/docker/docker-credential-helpers/credentials -# github.com/docker/go-connections v0.4.1-0.20231031175723-0b8c1f4e07a0 -## explicit; go 1.13 +# github.com/docker/go-connections v0.5.0 +## explicit; go 1.18 github.com/docker/go-connections/nat github.com/docker/go-connections/sockets github.com/docker/go-connections/tlsconfig @@ -513,13 +523,16 @@ github.com/docker/go-plugins-helpers/volume # github.com/docker/go-units v0.5.0 ## explicit github.com/docker/go-units +# github.com/felixge/httpsnoop v1.0.4 +## explicit; go 1.13 +github.com/felixge/httpsnoop # github.com/fsnotify/fsnotify v1.7.0 ## explicit; go 1.17 github.com/fsnotify/fsnotify # github.com/fsouza/go-dockerclient v1.10.0 ## explicit; go 1.20 github.com/fsouza/go-dockerclient -# github.com/go-jose/go-jose/v3 v3.0.1 +# github.com/go-jose/go-jose/v3 v3.0.2 ## explicit; go 1.12 github.com/go-jose/go-jose/v3 github.com/go-jose/go-jose/v3/cipher @@ -535,6 +548,9 @@ github.com/go-logfmt/logfmt ## explicit; go 1.18 github.com/go-logr/logr github.com/go-logr/logr/funcr +# github.com/go-logr/stdr v1.2.2 +## explicit; go 1.16 +github.com/go-logr/stdr # github.com/go-openapi/analysis v0.21.4 ## explicit; go 1.13 github.com/go-openapi/analysis @@ -544,8 +560,8 @@ github.com/go-openapi/analysis/internal/flatten/operations github.com/go-openapi/analysis/internal/flatten/replace github.com/go-openapi/analysis/internal/flatten/schutils github.com/go-openapi/analysis/internal/flatten/sortref -# github.com/go-openapi/errors v0.20.4 -## explicit; go 1.14 +# github.com/go-openapi/errors v0.21.1 +## explicit; go 1.19 github.com/go-openapi/errors # github.com/go-openapi/jsonpointer v0.19.6 ## explicit; go 1.13 @@ -563,11 +579,11 @@ github.com/go-openapi/runtime # github.com/go-openapi/spec v0.20.9 ## explicit; go 1.13 github.com/go-openapi/spec -# github.com/go-openapi/strfmt v0.21.7 +# github.com/go-openapi/strfmt v0.22.2 ## explicit; go 1.19 github.com/go-openapi/strfmt -# github.com/go-openapi/swag v0.22.4 -## explicit; go 1.18 +# github.com/go-openapi/swag v0.22.10 +## explicit; go 1.19 github.com/go-openapi/swag # github.com/go-openapi/validate v0.22.1 ## explicit; go 1.14 @@ -599,7 +615,7 @@ github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/go-containerregistry v0.16.1 +# github.com/google/go-containerregistry v0.19.0 ## explicit; go 1.18 github.com/google/go-containerregistry/pkg/name github.com/google/go-containerregistry/pkg/v1 @@ -617,7 +633,7 @@ github.com/google/pprof/profile # github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 ## explicit; go 1.13 github.com/google/shlex -# github.com/google/uuid v1.4.0 +# github.com/google/uuid v1.6.0 ## explicit github.com/google/uuid # github.com/gorilla/mux v1.8.1 @@ -647,8 +663,8 @@ github.com/jpillora/backoff # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go -# github.com/klauspost/compress v1.17.3 -## explicit; go 1.19 +# github.com/klauspost/compress v1.17.7 +## explicit; go 1.20 github.com/klauspost/compress github.com/klauspost/compress/flate github.com/klauspost/compress/fse @@ -663,14 +679,14 @@ github.com/klauspost/pgzip # github.com/kr/fs v0.1.0 ## explicit github.com/kr/fs -# github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 -## explicit; go 1.18 +# github.com/letsencrypt/boulder v0.0.0-20230907030200-6d76a0f91e1e +## explicit; go 1.20 github.com/letsencrypt/boulder/core -github.com/letsencrypt/boulder/errors github.com/letsencrypt/boulder/goodkey github.com/letsencrypt/boulder/identifier github.com/letsencrypt/boulder/probs github.com/letsencrypt/boulder/revocation +github.com/letsencrypt/boulder/strictyaml # github.com/mailru/easyjson v0.7.7 ## explicit; go 1.12 github.com/mailru/easyjson/buffer @@ -687,8 +703,8 @@ github.com/mattn/go-runewidth # github.com/mattn/go-shellwords v1.0.12 ## explicit; go 1.13 github.com/mattn/go-shellwords -# github.com/mattn/go-sqlite3 v1.14.18 -## explicit; go 1.16 +# github.com/mattn/go-sqlite3 v1.14.22 +## explicit; go 1.19 github.com/mattn/go-sqlite3 # github.com/miekg/pkcs11 v1.1.1 ## explicit; go 1.12 @@ -714,6 +730,9 @@ github.com/moby/sys/mountinfo # github.com/moby/sys/sequential v0.5.0 ## explicit; go 1.17 github.com/moby/sys/sequential +# github.com/moby/sys/user v0.1.0 +## explicit; go 1.17 +github.com/moby/sys/user # github.com/moby/term v0.5.0 ## explicit; go 1.18 github.com/moby/term @@ -777,12 +796,11 @@ github.com/onsi/gomega/types # github.com/opencontainers/go-digest v1.0.0 ## explicit; go 1.13 github.com/opencontainers/go-digest -github.com/opencontainers/go-digest/digestset -# github.com/opencontainers/image-spec v1.1.0-rc5 +# github.com/opencontainers/image-spec v1.1.0 ## explicit; go 1.18 github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 -# github.com/opencontainers/runc v1.1.10 => github.com/opencontainers/runc v1.1.1-0.20230904132852-a0466dd76f23 +# github.com/opencontainers/runc v1.1.12 => github.com/opencontainers/runc v1.1.1-0.20230904132852-a0466dd76f23 ## explicit; go 1.20 github.com/opencontainers/runc/libcontainer/apparmor github.com/opencontainers/runc/libcontainer/cgroups @@ -794,7 +812,7 @@ github.com/opencontainers/runc/libcontainer/devices github.com/opencontainers/runc/libcontainer/user github.com/opencontainers/runc/libcontainer/userns github.com/opencontainers/runc/libcontainer/utils -# github.com/opencontainers/runtime-spec v1.1.1-0.20230922153023-c0e90434df2a +# github.com/opencontainers/runtime-spec v1.2.0 ## explicit github.com/opencontainers/runtime-spec/specs-go # github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc @@ -863,7 +881,7 @@ github.com/rivo/uniseg # github.com/seccomp/libseccomp-golang v0.10.0 ## explicit; go 1.14 github.com/seccomp/libseccomp-golang -# github.com/secure-systems-lab/go-securesystemslib v0.7.0 +# github.com/secure-systems-lab/go-securesystemslib v0.8.0 ## explicit; go 1.20 github.com/secure-systems-lab/go-securesystemslib/encrypted # github.com/sigstore/fulcio v1.4.3 @@ -872,7 +890,7 @@ github.com/sigstore/fulcio/pkg/certificate # github.com/sigstore/rekor v1.2.2 ## explicit; go 1.19 github.com/sigstore/rekor/pkg/generated/models -# github.com/sigstore/sigstore v1.7.5 +# github.com/sigstore/sigstore v1.8.2 ## explicit; go 1.20 github.com/sigstore/sigstore/pkg/cryptoutils github.com/sigstore/sigstore/pkg/signature @@ -890,7 +908,7 @@ github.com/spf13/pflag # github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 ## explicit github.com/stefanberger/go-pkcs11uri -# github.com/sylabs/sif/v2 v2.15.0 +# github.com/sylabs/sif/v2 v2.15.1 ## explicit; go 1.20 github.com/sylabs/sif/v2/pkg/sif # github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 @@ -913,7 +931,7 @@ github.com/ulikunitz/xz/lzma github.com/vbatts/tar-split/archive/tar github.com/vbatts/tar-split/tar/asm github.com/vbatts/tar-split/tar/storage -# github.com/vbauerster/mpb/v8 v8.6.2 +# github.com/vbauerster/mpb/v8 v8.7.2 ## explicit; go 1.17 github.com/vbauerster/mpb/v8 github.com/vbauerster/mpb/v8/cwriter @@ -926,11 +944,11 @@ github.com/vishvananda/netlink/nl # github.com/vishvananda/netns v0.0.4 ## explicit; go 1.17 github.com/vishvananda/netns -# go.etcd.io/bbolt v1.3.8 +# go.etcd.io/bbolt v1.3.9 ## explicit; go 1.17 go.etcd.io/bbolt -# go.mongodb.org/mongo-driver v1.11.3 -## explicit; go 1.13 +# go.mongodb.org/mongo-driver v1.14.0 +## explicit; go 1.18 go.mongodb.org/mongo-driver/bson go.mongodb.org/mongo-driver/bson/bsoncodec go.mongodb.org/mongo-driver/bson/bsonoptions @@ -948,7 +966,30 @@ go.opencensus.io/internal go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/tracestate -# golang.org/x/crypto v0.18.0 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 +## explicit; go 1.19 +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil +# go.opentelemetry.io/otel v1.19.0 +## explicit; go 1.20 +go.opentelemetry.io/otel +go.opentelemetry.io/otel/attribute +go.opentelemetry.io/otel/baggage +go.opentelemetry.io/otel/codes +go.opentelemetry.io/otel/internal +go.opentelemetry.io/otel/internal/attribute +go.opentelemetry.io/otel/internal/baggage +go.opentelemetry.io/otel/internal/global +go.opentelemetry.io/otel/propagation +go.opentelemetry.io/otel/semconv/v1.17.0 +# go.opentelemetry.io/otel/metric v1.19.0 +## explicit; go 1.20 +go.opentelemetry.io/otel/metric +go.opentelemetry.io/otel/metric/embedded +# go.opentelemetry.io/otel/trace v1.19.0 +## explicit; go 1.20 +go.opentelemetry.io/otel/trace +# golang.org/x/crypto v0.21.0 ## explicit; go 1.18 golang.org/x/crypto/argon2 golang.org/x/crypto/bcrypt @@ -980,15 +1021,15 @@ golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/knownhosts golang.org/x/crypto/twofish golang.org/x/crypto/xts -# golang.org/x/exp v0.0.0-20231006140011-7918f672742d +# golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 ## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/maps golang.org/x/exp/slices -# golang.org/x/mod v0.14.0 +# golang.org/x/mod v0.15.0 ## explicit; go 1.18 golang.org/x/mod/semver -# golang.org/x/net v0.20.0 +# golang.org/x/net v0.22.0 ## explicit; go 1.18 golang.org/x/net/context golang.org/x/net/html @@ -1003,7 +1044,7 @@ golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace -# golang.org/x/oauth2 v0.16.0 +# golang.org/x/oauth2 v0.18.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/clientcredentials @@ -1012,14 +1053,14 @@ golang.org/x/oauth2/internal ## explicit; go 1.18 golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.16.0 +# golang.org/x/sys v0.18.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.16.0 +# golang.org/x/term v0.18.0 ## explicit; go 1.18 golang.org/x/term # golang.org/x/text v0.14.0 @@ -1044,7 +1085,7 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/tools v0.16.1 +# golang.org/x/tools v0.18.0 ## explicit; go 1.18 golang.org/x/tools/cmd/stringer golang.org/x/tools/go/ast/inspector @@ -1074,10 +1115,10 @@ google.golang.org/appengine/internal/log google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.58.3 +# google.golang.org/grpc v1.59.0 ## explicit; go 1.19 google.golang.org/grpc google.golang.org/grpc/attributes