diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 61ca936db..b0cfcd904 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -33,7 +33,7 @@ jobs: make lint make gofmt - tests: + tests_podmanv4: runs-on: ubuntu-latest container: image: quay.io/containers/podman:v4 @@ -52,6 +52,25 @@ jobs: make .install.ginkgo make test + tests_podmanv5: + runs-on: ubuntu-latest + container: + image: quay.io/podman/upstream:latest + volumes: + - container_volume:/var/lib/containers + options: "--device /dev/fuse:rw --security-opt label=disable --security-opt seccomp=unconfined --privileged" + steps: + - uses: actions/setup-go@v3 + with: + go-version: '1.21.4' + - uses: actions/checkout@v3 + - run: | + yum -y install make gcc glib2-devel glibc-devel glibc-static device-mapper-devel + export GOBIN=$(pwd)/bin/ + export CGO_ENABLED=1 + make .install.ginkgo + make test + build_binary: runs-on: ubuntu-latest container: diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml index b8b523d23..7a8401c15 100644 --- a/.github/workflows/pr.yml +++ b/.github/workflows/pr.yml @@ -69,7 +69,7 @@ jobs: make lint make gofmt - tests: + tests_podmanv4: runs-on: ubuntu-latest container: image: quay.io/containers/podman:v4 @@ -88,6 +88,25 @@ jobs: make .install.ginkgo make test + tests_podmanv5: + runs-on: ubuntu-latest + container: + image: quay.io/podman/upstream:latest + volumes: + - container_volume:/var/lib/containers + options: "--device /dev/fuse:rw --security-opt label=disable --security-opt seccomp=unconfined --privileged" + steps: + - uses: actions/setup-go@v3 + with: + go-version: '1.21.4' + - uses: actions/checkout@v3 + - run: | + yum -y install make gcc glib2-devel glibc-devel glibc-static device-mapper-devel + export GOBIN=$(pwd)/bin/ + export CGO_ENABLED=1 + make .install.ginkgo + make test + build_binary: runs-on: ubuntu-latest container: diff --git a/README.md b/README.md index 0e1e41e59..2684aa48f 100644 --- a/README.md +++ b/README.md @@ -5,9 +5,9 @@ [![Go Report](https://img.shields.io/badge/go%20report-A%2B-brightgreen.svg)](https://goreportcard.com/report/github.com/containers/prometheus-podman-exporter) ![Go](https://github.com/containers/prometheus-podman-exporter/workflows/Go/badge.svg) -Prometheus exporter for podman v4.x environment exposing containers, pods, images, volumes and networks information. +Prometheus exporter for podman v4 and v5 environment exposing containers, pods, images, volumes and networks information. -prometheus-podman-exporter uses the podman v4.x (libpod) library to fetch the statistics and therefore no need to enable podman.socket service unless using the container image. +prometheus-podman-exporter uses the podman (libpod) library to fetch the statistics and therefore no need to enable podman.socket service unless using the container image. - [**Installation**](#installation) - [**Usage and Options**](#usage-and-options) diff --git a/go.mod b/go.mod index d845e518b..008e8ab69 100644 --- a/go.mod +++ b/go.mod @@ -3,9 +3,9 @@ module github.com/containers/prometheus-podman-exporter go 1.20 require ( - github.com/containers/common v0.57.4 - github.com/containers/image/v5 v5.29.2 - github.com/containers/podman/v4 v4.9.3 + github.com/containers/common v0.58.0 + github.com/containers/image/v5 v5.30.0 + github.com/containers/podman/v5 v5.0.0 github.com/go-kit/log v0.2.1 github.com/onsi/ginkgo/v2 v2.17.0 github.com/onsi/gomega v1.32.0 @@ -22,7 +22,7 @@ require ( github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect github.com/BurntSushi/toml v1.3.2 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect - github.com/Microsoft/hcsshim v0.12.0-rc.1 // indirect + github.com/Microsoft/hcsshim v0.12.0-rc.3 // indirect github.com/VividCortex/ewma v1.2.0 // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect github.com/aead/serpent v0.0.0-20160714141033-fba169763ea6 // indirect @@ -33,47 +33,50 @@ require ( github.com/checkpoint-restore/checkpointctl v1.1.0 // indirect github.com/checkpoint-restore/go-criu/v7 v7.0.0 // indirect github.com/chzyer/readline v1.5.1 // indirect - github.com/containerd/cgroups/v3 v3.0.2 // indirect - github.com/containerd/containerd v1.7.9 // indirect + github.com/containerd/cgroups/v3 v3.0.3 // indirect + github.com/containerd/containerd v1.7.13 // indirect + github.com/containerd/errdefs v0.1.0 // indirect github.com/containerd/log v0.1.0 // indirect github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect github.com/containerd/typeurl/v2 v2.1.1 // indirect github.com/containernetworking/cni v1.1.2 // indirect - github.com/containernetworking/plugins v1.3.0 // indirect - github.com/containers/buildah v1.33.5 // indirect + github.com/containernetworking/plugins v1.4.0 // indirect + github.com/containers/buildah v1.35.1 // indirect github.com/containers/conmon v2.0.20+incompatible // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect - github.com/containers/luksy v0.0.0-20231030195837-b5a7f79da98b // indirect + github.com/containers/luksy v0.0.0-20240212203526-ceb12d4fd50c // indirect github.com/containers/ocicrypt v1.1.9 // indirect - github.com/containers/psgo v1.8.0 // indirect - github.com/containers/storage v1.51.0 // indirect + github.com/containers/psgo v1.9.0 // indirect + github.com/containers/storage v1.53.0 // indirect github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09 // indirect - github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 // indirect + github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/disiqueira/gotree/v3 v3.0.2 // indirect github.com/distribution/reference v0.5.0 // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker v24.0.7+incompatible // indirect - github.com/docker/docker-credential-helpers v0.8.0 // indirect - github.com/docker/go-connections v0.4.1-0.20231031175723-0b8c1f4e07a0 // indirect + github.com/docker/docker v25.0.3+incompatible // indirect + github.com/docker/docker-credential-helpers v0.8.1 // indirect + github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-plugins-helpers v0.0.0-20211224144127-6eecb7beb651 // indirect github.com/docker/go-units v0.5.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/fsouza/go-dockerclient v1.10.0 // indirect - github.com/go-jose/go-jose/v3 v3.0.1 // indirect + github.com/fsouza/go-dockerclient v1.10.1 // indirect + github.com/go-jose/go-jose/v3 v3.0.3 // indirect github.com/go-logfmt/logfmt v0.5.1 // indirect github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/analysis v0.21.4 // indirect - github.com/go-openapi/errors v0.20.4 // indirect + github.com/go-openapi/errors v0.21.1 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/loads v0.21.2 // indirect github.com/go-openapi/runtime v0.26.0 // indirect github.com/go-openapi/spec v0.20.9 // indirect - github.com/go-openapi/strfmt v0.21.7 // indirect - github.com/go-openapi/swag v0.22.4 // indirect + github.com/go-openapi/strfmt v0.22.2 // indirect + github.com/go-openapi/swag v0.22.10 // indirect github.com/go-openapi/validate v0.22.1 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 // indirect @@ -81,14 +84,14 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/go-cmp v0.6.0 // indirect - github.com/google/go-containerregistry v0.16.1 // indirect + github.com/google/go-containerregistry v0.19.0 // indirect github.com/google/go-intervals v0.0.2 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20230323073829-e72429f035bd // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/google/uuid v1.4.0 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/gorilla/mux v1.8.1 // indirect - github.com/gorilla/schema v1.2.0 // indirect + github.com/gorilla/schema v1.2.1 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect @@ -96,15 +99,15 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.3 // indirect + github.com/klauspost/compress v1.17.7 // indirect github.com/klauspost/pgzip v1.2.6 // indirect github.com/kr/fs v0.1.0 // indirect - github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 // indirect + github.com/letsencrypt/boulder v0.0.0-20230907030200-6d76a0f91e1e // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/manifoldco/promptui v0.9.0 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect github.com/mattn/go-shellwords v1.0.12 // indirect - github.com/mattn/go-sqlite3 v1.14.18 // indirect + github.com/mattn/go-sqlite3 v1.14.22 // indirect github.com/miekg/pkcs11 v1.1.1 // indirect github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect @@ -112,6 +115,7 @@ require ( github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/mountinfo v0.7.1 // indirect github.com/moby/sys/sequential v0.5.0 // indirect + github.com/moby/sys/user v0.1.0 // indirect github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect @@ -120,59 +124,63 @@ require ( github.com/nxadm/tail v1.4.11 // indirect github.com/oklog/ulid v1.3.1 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0-rc5 // indirect - github.com/opencontainers/runc v1.1.10 // indirect - github.com/opencontainers/runtime-spec v1.1.1-0.20230922153023-c0e90434df2a // indirect + github.com/opencontainers/image-spec v1.1.0 // indirect + github.com/opencontainers/runc v1.1.12 // indirect + github.com/opencontainers/runtime-spec v1.2.0 // indirect github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc // indirect github.com/opencontainers/selinux v1.11.0 // indirect - github.com/openshift/imagebuilder v1.2.6-0.20231110114814-35a50d57f722 // indirect + github.com/openshift/imagebuilder v1.2.6 // indirect github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect github.com/pkg/sftp v1.13.6 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/proglottis/gpgme v0.1.3 // indirect github.com/prometheus/client_model v0.5.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect - github.com/rivo/uniseg v0.4.4 // indirect + github.com/rivo/uniseg v0.4.7 // indirect github.com/seccomp/libseccomp-golang v0.10.0 // indirect - github.com/secure-systems-lab/go-securesystemslib v0.7.0 // indirect + github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect github.com/sigstore/fulcio v1.4.3 // indirect github.com/sigstore/rekor v1.2.2 // indirect - github.com/sigstore/sigstore v1.7.5 // indirect + github.com/sigstore/sigstore v1.8.2 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect - github.com/sylabs/sif/v2 v2.15.0 // indirect + github.com/sylabs/sif/v2 v2.15.1 // indirect github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect github.com/tchap/go-patricia/v2 v2.3.1 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect github.com/ulikunitz/xz v0.5.11 // indirect github.com/vbatts/tar-split v0.11.5 // indirect - github.com/vbauerster/mpb/v8 v8.6.2 // indirect + github.com/vbauerster/mpb/v8 v8.7.2 // indirect github.com/vishvananda/netlink v1.2.1-beta.2 // indirect github.com/vishvananda/netns v0.0.4 // indirect - go.etcd.io/bbolt v1.3.8 // indirect - go.mongodb.org/mongo-driver v1.11.3 // indirect + go.etcd.io/bbolt v1.3.9 // indirect + go.mongodb.org/mongo-driver v1.14.0 // indirect go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect go.opencensus.io v0.24.0 // indirect - golang.org/x/crypto v0.18.0 // indirect - golang.org/x/exp v0.0.0-20231006140011-7918f672742d // indirect - golang.org/x/mod v0.14.0 // indirect - golang.org/x/net v0.20.0 // indirect - golang.org/x/oauth2 v0.16.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect + go.opentelemetry.io/otel v1.22.0 // indirect + go.opentelemetry.io/otel/metric v1.22.0 // indirect + go.opentelemetry.io/otel/trace v1.22.0 // indirect + golang.org/x/crypto v0.21.0 // indirect + golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect + golang.org/x/mod v0.15.0 // indirect + golang.org/x/net v0.22.0 // indirect + golang.org/x/oauth2 v0.18.0 // indirect golang.org/x/sync v0.6.0 // indirect - golang.org/x/sys v0.16.0 // indirect - golang.org/x/term v0.16.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/term v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect - golang.org/x/tools v0.17.0 // indirect + golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.18.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 // indirect - google.golang.org/grpc v1.58.3 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0 // indirect + google.golang.org/grpc v1.61.0 // indirect google.golang.org/protobuf v1.33.0 // indirect - gopkg.in/go-jose/go-jose.v2 v2.6.1 // indirect + gopkg.in/go-jose/go-jose.v2 v2.6.3 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/kubernetes v1.28.4 // indirect sigs.k8s.io/yaml v1.4.0 // indirect tags.cncf.io/container-device-interface v0.6.2 // indirect tags.cncf.io/container-device-interface/specs-go v0.6.0 // indirect diff --git a/go.sum b/go.sum index 323e73a1c..265b6770c 100644 --- a/go.sum +++ b/go.sum @@ -1,128 +1,38 @@ -bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= -github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= -github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= -github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= -github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.2.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= -github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= -github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= -github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= -github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= -github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= -github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= -github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= -github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= -github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= -github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= -github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= -github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= -github.com/Microsoft/hcsshim v0.9.4/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= -github.com/Microsoft/hcsshim v0.12.0-rc.1 h1:Hy+xzYujv7urO5wrgcG58SPMOXNLrj4WCJbySs2XX/A= -github.com/Microsoft/hcsshim v0.12.0-rc.1/go.mod h1:Y1a1S0QlYp1mBpyvGiuEdOfZqnao+0uX5AWHXQ5NhZU= -github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= -github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/Microsoft/hcsshim v0.12.0-rc.3 h1:5GNGrobGs/sN/0nFO21W9k4lFn+iXXZAE8fCZbmdRak= +github.com/Microsoft/hcsshim v0.12.0-rc.3/go.mod h1:WuNfcaYNaw+KpCEsZCIM6HCEmu0c5HfXpi+dDSmveP0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= github.com/aead/serpent v0.0.0-20160714141033-fba169763ea6 h1:5L8Mj9Co9sJVgW3TpYk2gxGJnDjsYuboNTcRmbtGKGs= github.com/aead/serpent v0.0.0-20160714141033-fba169763ea6/go.mod h1:3HgLJ9d18kXMLQlJvIY3+FszZYMxCz8WfE2MQ7hDY0w= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= -github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= -github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= -github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= -github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= -github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= -github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= -github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= -github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= -github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/checkpoint-restore/checkpointctl v1.1.0 h1:plS/2zBzbAXO6DH/H+TqD7ZGhz8iQVb+NLgsOJSTWaw= github.com/checkpoint-restore/checkpointctl v1.1.0/go.mod h1:DtPd9M4bt/jdt+7DodFxm0lrzdevabk3cbni/FL4BY0= -github.com/checkpoint-restore/go-criu/v6 v6.3.0/go.mod h1:rrRTN/uSwY2X+BPRl/gkulo9gsKOSAeVp9/K2tv7xZI= github.com/checkpoint-restore/go-criu/v7 v7.0.0 h1:R4UF/njKOuq8ooG7naFGsCeKsjv5j+rIhgFgSSeC2KY= github.com/checkpoint-restore/go-criu/v7 v7.0.0/go.mod h1:xD1v3cPww1QYpJR3+XTTdC8hYubPnptIPsT1daXhbr4= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -134,279 +44,113 @@ github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObk github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04= github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= -github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= -github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= -github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= -github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= -github.com/cilium/ebpf v0.11.0/go.mod h1:WE7CZAnqOL2RouJ4f1uyNhqr2P4CCvXFIqdRDUgWsVs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= -github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= -github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= -github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= -github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= -github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= -github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= -github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= -github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= -github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= -github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= -github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= -github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= -github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= -github.com/containerd/cgroups/v3 v3.0.2 h1:f5WFqIVSgo5IZmtTT3qVBo6TzI1ON6sycSBKkymb9L0= -github.com/containerd/cgroups/v3 v3.0.2/go.mod h1:JUgITrzdFqp42uI2ryGA+ge0ap/nxzYgkGmIcetmErE= -github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= -github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= -github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= -github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= -github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= -github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= -github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= -github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= -github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= -github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= -github.com/containerd/containerd v1.7.9 h1:KOhK01szQbM80YfW1H6RZKh85PHGqY/9OcEZ35Je8sc= -github.com/containerd/containerd v1.7.9/go.mod h1:0/W44LWEYfSHoxBtsHIiNU/duEkgpMokemafHVCpq9Y= -github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= -github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= -github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= -github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= -github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= -github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= -github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= -github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= -github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= -github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= -github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= -github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= -github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= -github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= -github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= -github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= -github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= -github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= -github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= -github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= -github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= +github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= +github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= +github.com/containerd/containerd v1.7.13 h1:wPYKIeGMN8vaggSKuV1X0wZulpMz4CrgEsZdaCyB6Is= +github.com/containerd/containerd v1.7.13/go.mod h1:zT3up6yTRfEUa6+GsITYIJNgSVL9NQ4x4h1RPzk0Wu4= +github.com/containerd/errdefs v0.1.0 h1:m0wCRBiu1WJT/Fr+iOoQHMQS/eP5myQ8lCv4Dz5ZURM= +github.com/containerd/errdefs v0.1.0/go.mod h1:YgWiiHtLmSeBrvpw+UfPijzbLaB77mEG1WwJTDETIV0= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= -github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= -github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= -github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= -github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= -github.com/containerd/stargz-snapshotter/estargz v0.12.0/go.mod h1:AIQ59TewBFJ4GOPEQXujcrJ/EKxh5xXZegW1rkR1P/M= github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU= github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk= -github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= -github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= -github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= -github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= -github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= -github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= -github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= -github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= -github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= -github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= github.com/containerd/typeurl/v2 v2.1.1 h1:3Q4Pt7i8nYwy2KmQWIw2+1hTvwTE/6w9FqcttATPO/4= github.com/containerd/typeurl/v2 v2.1.1/go.mod h1:IDp2JFvbwZ31H8dQbEIY7sDl2L3o3HZj1hsSQlywkQ0= -github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= -github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= -github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= -github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= -github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= -github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl31EQbXALQ= github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= -github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= -github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= -github.com/containernetworking/plugins v1.3.0 h1:QVNXMT6XloyMUoO2wUOqWTC1hWFV62Q6mVDp5H1HnjM= -github.com/containernetworking/plugins v1.3.0/go.mod h1:Pc2wcedTQQCVuROOOaLBPPxrEXqqXBFt3cZ+/yVg6l0= -github.com/containers/buildah v1.33.5 h1:BGzGAbK6B5VLOQlqX0V8DrteMbKGh6Q8ntHTahGqGMY= -github.com/containers/buildah v1.33.5/go.mod h1:w3paMzMa5/3uaNSi2xq4hsQDTH9aFkoBmrHyPQ8Ixjo= -github.com/containers/common v0.57.4 h1:kmfBad92kUjP5X44BPpOwMe+eZQqaKETfS+ASeL0g+g= -github.com/containers/common v0.57.4/go.mod h1:o3L3CyOI9yr+JC8l4dZgvqTxcjs3qdKmkek00uchgvw= +github.com/containernetworking/plugins v1.4.0 h1:+w22VPYgk7nQHw7KT92lsRmuToHvb7wwSv9iTbXzzic= +github.com/containernetworking/plugins v1.4.0/go.mod h1:UYhcOyjefnrQvKvmmyEKsUA+M9Nfn7tqULPpH0Pkcj0= +github.com/containers/buildah v1.35.1 h1:m4TF6V8b06cS4jH9/t39PUsUIjzDQg/P14FLpwjr40Y= +github.com/containers/buildah v1.35.1/go.mod h1:vVSVUlTu8+99H5j43gBJscpkb/quZvdJg78+6X1HeTM= +github.com/containers/common v0.58.0 h1:iQuwMxDD4ubZ9s1tmgdsiaHxMU4TdVBpV6kctJc6Bk8= +github.com/containers/common v0.58.0/go.mod h1:l3vMqanJGj7tZ3W/i76gEJ128VXgFUO1tLaohJXPvdk= github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg= github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I= -github.com/containers/image/v5 v5.29.2 h1:b8U0XYWhaQbKucK73IbmSm8WQyKAhKDbAHQc45XlsOw= -github.com/containers/image/v5 v5.29.2/go.mod h1:kQ7qcDsps424ZAz24thD+x7+dJw1vgur3A9tTDsj97E= +github.com/containers/image/v5 v5.30.0 h1:CmHeSwI6W2kTRWnUsxATDFY5TEX4b58gPkaQcEyrLIA= +github.com/containers/image/v5 v5.30.0/go.mod h1:gSD8MVOyqBspc0ynLsuiMR9qmt8UQ4jpVImjmK0uXfk= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= -github.com/containers/luksy v0.0.0-20231030195837-b5a7f79da98b h1:8XvNAm+g7ivwPUkyiHvBs7z356JWpK9a0FDaek86+sY= -github.com/containers/luksy v0.0.0-20231030195837-b5a7f79da98b/go.mod h1:menB9p4o5HckgcLW6cO0+dl6+axkVmSqKlrNcratsh4= -github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= -github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= -github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/containers/luksy v0.0.0-20240212203526-ceb12d4fd50c h1:6zalnZZODMOqNZBww9VAM1Mq5EZ3J+S8vYGCo2yg39M= +github.com/containers/luksy v0.0.0-20240212203526-ceb12d4fd50c/go.mod h1:A/RMGaYhtzfW6L3whYRU+0GGEFocTYyQBqlWSb2UNEM= github.com/containers/ocicrypt v1.1.9 h1:2Csfba4jse85Raxk5HIyEk8OwZNjRvfkhEGijOjIdEM= github.com/containers/ocicrypt v1.1.9/go.mod h1:dTKx1918d8TDkxXvarscpNVY+lyPakPNFN4jwA9GBys= -github.com/containers/podman/v4 v4.9.3 h1:3tEnvIqijxBYtILRdHcbn0UNHAyUiQ1Y5hcvkYmutZA= -github.com/containers/podman/v4 v4.9.3/go.mod h1:J2qLop+mWjAOxh0QQyYPdnPA3jI6ay2eU0OKakgMniQ= -github.com/containers/psgo v1.8.0 h1:2loGekmGAxM9ir5OsXWEfGwFxorMPYnc6gEDsGFQvhY= -github.com/containers/psgo v1.8.0/go.mod h1:T8ZxnX3Ur4RvnhxFJ7t8xJ1F48RhiZB4rSrOaR/qGHc= -github.com/containers/storage v1.43.0/go.mod h1:uZ147thiIFGdVTjMmIw19knttQnUCl3y9zjreHrg11s= -github.com/containers/storage v1.51.0 h1:AowbcpiWXzAjHosKz7MKvPEqpyX+ryZA/ZurytRrFNA= -github.com/containers/storage v1.51.0/go.mod h1:ybl8a3j1PPtpyaEi/5A6TOFs+5TrEyObeKJzVtkUlfc= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= -github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= -github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/containers/podman/v5 v5.0.0 h1:DVjeY4oTbI9qsxQfBTswerS8xvlAy0KywLN9yNc/MAA= +github.com/containers/podman/v5 v5.0.0/go.mod h1:lJXhiseM72otkIcp0sVDUn9aFyScHqmHmlarbboRX4I= +github.com/containers/psgo v1.9.0 h1:eJ74jzSaCHnWt26OlKZROSyUyRcGDf+gYBdXnxrMW4g= +github.com/containers/psgo v1.9.0/go.mod h1:0YoluUm43Mz2UnBIh1P+6V6NWcbpTL5uRtXyOcH0B5A= +github.com/containers/storage v1.53.0 h1:VSES3C/u1pxjTJIXvLrSmyP7OBtDky04oGu07UvdTEA= +github.com/containers/storage v1.53.0/go.mod h1:pujcoOSc+upx15Jirdkebhtd8uJiLwbSd/mYT6zDJK8= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09 h1:OoRAFlvDGCUqDLampLQjk0yeeSGdF9zzst/3G9IkBbc= github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09/go.mod h1:m2r/smMKsKwgMSAoFKHaa68ImdCSNuKE1MxvQ64xuCQ= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= -github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 h1:2Dx4IHfC1yHWI12AxQDJM1QbRCDfk6M+blLzlZCXdrc= -github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= -github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f h1:eHnXnuK47UlSTOQexbzxAZfekVz6i+LKRdj1CU5DPaM= +github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= -github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= -github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= -github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= -github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/disiqueira/gotree/v3 v3.0.2 h1:ik5iuLQQoufZBNPY518dXhiO5056hyNBIK9lWhkNRq8= github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= -github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v24.0.7+incompatible h1:wa/nIwYFW7BVTGa7SWPVyyXU9lgORqUb1xfI36MSkFg= -github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= -github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/cli v25.0.3+incompatible h1:KLeNs7zws74oFuVhgZQ5ONGZiXUUdgsdy6/EsX/6284= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM= -github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= -github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8= -github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-connections v0.4.1-0.20231031175723-0b8c1f4e07a0 h1:dPD5pdqsujF9jz2NQMQCDzrBSAF3M6kIxmfU98IOp9c= -github.com/docker/go-connections v0.4.1-0.20231031175723-0b8c1f4e07a0/go.mod h1:a6bNUGTbQBsY6VRHTr4h/rkOXjl244DyRD0tx3fgq4Q= -github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= -github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= -github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/docker v25.0.3+incompatible h1:D5fy/lYmY7bvZa0XTZ5/UJPljor41F+vdyJG5luQLfQ= +github.com/docker/docker v25.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.8.1 h1:j/eKUktUltBtMzKqmfLB0PAgqYyMHOp5vfsD1807oKo= +github.com/docker/docker-credential-helpers v0.8.1/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= -github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-plugins-helpers v0.0.0-20211224144127-6eecb7beb651 h1:YcvzLmdrP/b8kLAGJ8GT7bdncgCAiWxJZIlt84D+RJg= github.com/docker/go-plugins-helpers v0.0.0-20211224144127-6eecb7beb651/go.mod h1:LFyLie6XcDbyKGeVK6bHe+9aJTYCxWLBg5IrJZOaXKA= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= -github.com/facebookgo/limitgroup v0.0.0-20150612190941-6abd8d71ec01 h1:IeaD1VDVBPlx3viJT9Md8if8IxxJnO+x0JCGb054heg= -github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52 h1:a4DFiKFJiDRGFD1qIcqGLX/WlUMD9dyLSLDt+9QZgt8= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= -github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/fsouza/go-dockerclient v1.10.0 h1:ppSBsbR60I1DFbV4Ag7LlHlHakHFRNLk9XakATW1yVQ= -github.com/fsouza/go-dockerclient v1.10.0/go.mod h1:+iNzAW78AzClIBTZ6WFjkaMvOgz68GyCJ236b1opLTs= -github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= -github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= -github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= -github.com/go-jose/go-jose/v3 v3.0.1 h1:pWmKFVtt+Jl0vBZTIpz/eAKwsm6LkIxDVVbFHKkchhA= -github.com/go-jose/go-jose/v3 v3.0.1/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/fsouza/go-dockerclient v1.10.1 h1:bSU5Wu2ARdub+iv9VtoDsN8yBUI0vgflmshbeQLKhvc= +github.com/fsouza/go-dockerclient v1.10.1/go.mod h1:dyzGriw6v3pK4O4O1u/X+vXxDDsrnLLkCqYkcLsDq2k= +github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k= +github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ= github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.20.4 h1:unTcVm6PispJsMECE3zWgvG4xTiKda1LIR5rCRWLG6M= -github.com/go-openapi/errors v0.20.4/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= -github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/errors v0.21.1 h1:rVisxQPdETctjlYntm0Ek4dKf68nAQocCloCT50vWuI= +github.com/go-openapi/errors v0.21.1/go.mod h1:LyiY9bgc7AVVh6wtVvMYEyoj3KJYNoRw92mmvnMWgj8= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= -github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= -github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= @@ -416,8 +160,6 @@ github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8en github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= github.com/go-openapi/runtime v0.26.0 h1:HYOFtG00FM1UvqrcxbEJg/SwvDRvYLQKGhw2zaQjTcc= github.com/go-openapi/runtime v0.26.0/go.mod h1:QgRGeZwrUcSHdeh4Ka9Glvo0ug1LC5WyE+EV88plZrQ= -github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= -github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= github.com/go-openapi/spec v0.20.9 h1:xnlYNQAwKd2VQRRfwTEI0DcK+2cbuvI/0c7jx3gA8/8= @@ -425,16 +167,14 @@ github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6 github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= -github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k= -github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= -github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/strfmt v0.22.2 h1:DPYOrm6gexCfZZfXUaXFS4+Jw6HAaIIG0SZ5630f8yw= +github.com/go-openapi/strfmt v0.22.2/go.mod h1:HB/b7TCm91rno75Dembc1dFW/0FPLk5CEXsoF9ReNc4= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= -github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.10 h1:4y86NVn7Z2yYd6pfS4Z+Nyh3aAUL3Nul+LMbhFKy0gA= +github.com/go-openapi/swag v0.22.10/go.mod h1:Cnn8BYtRlx6BNE3DPN86f/xkapGIcLWzh3CLEb4C1jI= github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU= github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= @@ -466,44 +206,18 @@ github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWe github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= -github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= -github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= -github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= -github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 h1:sQspH8M4niEijh3PFscJRLDnkL547IeP7kpPe3uUhEg= github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466/go.mod h1:ZiQxhyQ+bbbfxUKVvjfO498oPYvtYhZzycal3G/NHmU= -github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= -github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -517,158 +231,83 @@ github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiu github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= -github.com/google/go-containerregistry v0.16.1 h1:rUEt426sR6nyrL3gt+18ibRcvYpKYdpsa5ZW7MA08dQ= -github.com/google/go-containerregistry v0.16.1/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ= +github.com/google/go-containerregistry v0.19.0 h1:uIsMRBV7m/HDkDxE/nXMnv1q+lOOSPlQ/ywc5JbB8Ic= +github.com/google/go-containerregistry v0.19.0/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ= github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM= github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20230323073829-e72429f035bd h1:r8yyd+DJDmsUhGrRBxH5Pj7KeFK5l+Y3FsgT8keqKtk= github.com/google/pprof v0.0.0-20230323073829-e72429f035bd/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= -github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= -github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= -github.com/gorilla/schema v1.2.0 h1:YufUaxZYCKGFuAq3c96BOhjgd5nmXiOY9NGzF247Tsc= -github.com/gorilla/schema v1.2.0/go.mod h1:kgLaKoK1FELgZqMAVxx/5cbj0kT+57qxUrAlIO2eleU= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/gorilla/schema v1.2.1 h1:tjDxcmdb+siIqkTNoV+qRH2mjYdr2hHe5MKXbp61ziM= +github.com/gorilla/schema v1.2.1/go.mod h1:Dg5SSm5PV60mhF2NFaTV1xuYYj8tV8NOPRo4FggUMnM= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0 h1:RtRsiaGvWxcwd8y3BiRZxsylPT8hLWZ5SPcfI+3IDNk= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/honeycombio/beeline-go v1.10.0 h1:cUDe555oqvw8oD76BQJ8alk7FP0JZ/M/zXpNvOEDLDc= -github.com/honeycombio/libhoney-go v1.16.0 h1:kPpqoz6vbOzgp7jC6SR7SkNj7rua7rgxvznI6M3KdHc= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= github.com/jinzhu/copier v0.4.0 h1:w3ciUoD19shMCRargcpm0cm91ytaBhDvuRpz1ODO/U8= github.com/jinzhu/copier v0.4.0/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= -github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548 h1:dYTbLf4m0a5u0KLmPfB6mgxbcV7588bOCx79hxa5Sr4= -github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= +github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/compress v1.17.3 h1:qkRjuerhUU1EmXLYGkSH6EZL+vPSxIrYjLNAK4slzwA= -github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= -github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg= +github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 h1:unJdfS94Y3k85TKy+mvKzjW5R9rIC+Lv4KGbE7uNu0I= -github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6/go.mod h1:PUgW5vI9ANEaV6qv9a6EKu8gAySgwf0xrzG9xIB/CK0= -github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/letsencrypt/boulder v0.0.0-20230907030200-6d76a0f91e1e h1:RLTpX495BXToqxpM90Ws4hXEo4Wfh81jr9DX1n/4WOo= +github.com/letsencrypt/boulder v0.0.0-20230907030200-6d76a0f91e1e/go.mod h1:EAuqr9VFWxBi9nD5jc/EA2MT1RFty9288TF6zdtYoCU= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= @@ -676,69 +315,42 @@ github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYt github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= -github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= -github.com/mattn/go-sqlite3 v1.14.18 h1:JL0eqdCOq6DJVNPSvArO/bIV9/P7fbGrV00LZHc+5aI= -github.com/mattn/go-sqlite3 v1.14.18/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= -github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= +github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= -github.com/mistifyio/go-zfs/v3 v3.0.0/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k= github.com/mistifyio/go-zfs/v3 v3.0.1 h1:YaoXgBePoMA12+S1u/ddkv+QqxcfiZK4prI6HPnkFiU= github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/moby/buildkit v0.12.5 h1:RNHH1l3HDhYyZafr5EgstEu8aGNCwyfvMtrQDtjH9T0= github.com/moby/buildkit v0.12.5/go.mod h1:YGwjA2loqyiYfZeEo8FtI7z4x5XponAaIWsWcSjWwso= -github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= -github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= -github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= -github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= github.com/moby/sys/mountinfo v0.7.1 h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g= github.com/moby/sys/mountinfo v0.7.1/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= -github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= -github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg= +github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= @@ -746,423 +358,213 @@ github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.17.0 h1:kdnunFXpBjbzN56hcJHrXZ8M+LOkenKA7NnBzTNigTI= github.com/onsi/ginkgo/v2 v2.17.0/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= -github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk= github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg= -github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= -github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= +github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= github.com/opencontainers/runc v1.1.1-0.20230904132852-a0466dd76f23 h1:CjJqzUWt07IJR6gO+Ron5qEcXomyLZLgURiSCXN6vXM= github.com/opencontainers/runc v1.1.1-0.20230904132852-a0466dd76f23/go.mod h1:UkHdGiHfjdRR/suiePnXB844WcjZ0RcfGm2mQS/V5jM= -github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.1.1-0.20230823135140-4fec88fd00a4/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.1.1-0.20230922153023-c0e90434df2a h1:ekgJlqTI6efJ57J7tqvIOYtdPnJRe8MxUZHbZAC021Y= -github.com/opencontainers/runtime-spec v1.1.1-0.20230922153023-c0e90434df2a/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= +github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc h1:d2hUh5O6MRBvStV55MQ8we08t42zSTqBbscoQccWmMc= github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc/go.mod h1:8tx1helyqhUC65McMm3x7HmOex8lO2/v9zPuxmKHurs= -github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= -github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= -github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= -github.com/opencontainers/selinux v1.10.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU= github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= -github.com/openshift/imagebuilder v1.2.6-0.20231110114814-35a50d57f722 h1:vhEmg+NeucmSYnT2j9ukkZLrR/ZOFUuUiGhxlBAlW8U= -github.com/openshift/imagebuilder v1.2.6-0.20231110114814-35a50d57f722/go.mod h1:+rSifDZnwJPSW2uYHl7ePSVxq4DEu1VlhNR1uIz/Lm4= +github.com/openshift/imagebuilder v1.2.6 h1:ge+HILDVaB3c65KhH0nrM/Z1f9EdN8NUqxigd4qGqqo= +github.com/openshift/imagebuilder v1.2.6/go.mod h1:6VbTJ5CK7+OOTWcQlc/Cp86ML7pKlxOwCJNESQPbtgw= github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M= github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= -github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo= github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/proglottis/gpgme v0.1.3 h1:Crxx0oz4LKB3QXc5Ea0J19K/3ICfy3ftr5exgUK1AU0= github.com/proglottis/gpgme v0.1.3/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0= -github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= -github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= -github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= github.com/prometheus/exporter-toolkit v0.11.0 h1:yNTsuZ0aNCNFQ3aFTD2uhPOvr4iD7fdBvKPAEGkNf+g= github.com/prometheus/exporter-toolkit v0.11.0/go.mod h1:BVnENhnNecpwoTLiABx7mrPB/OLRIgN74qlQbV+FK1Q= -github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= -github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y= github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2rWM6M9YjLpY= github.com/seccomp/libseccomp-golang v0.10.0/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= -github.com/secure-systems-lab/go-securesystemslib v0.7.0 h1:OwvJ5jQf9LnIAS83waAjPbcMsODrTQUpJ02eNLUoxBg= -github.com/secure-systems-lab/go-securesystemslib v0.7.0/go.mod h1:/2gYnlnHVQ6xeGtfIqFy7Do03K4cdCY0A/GlJLDKLHI= -github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA= +github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU= +github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= github.com/sigstore/fulcio v1.4.3 h1:9JcUCZjjVhRF9fmhVuz6i1RyhCc/EGCD7MOl+iqCJLQ= github.com/sigstore/fulcio v1.4.3/go.mod h1:BQPWo7cfxmJwgaHlphUHUpFkp5+YxeJes82oo39m5og= github.com/sigstore/rekor v1.2.2 h1:5JK/zKZvcQpL/jBmHvmFj3YbpDMBQnJQ6ygp8xdF3bY= github.com/sigstore/rekor v1.2.2/go.mod h1:FGnWBGWzeNceJnp0x9eDFd41mI8aQqCjj+Zp0IEs0Qg= -github.com/sigstore/sigstore v1.7.5 h1:ij55dBhLwjICmLTBJZm7SqoQLdsu/oowDanACcJNs48= -github.com/sigstore/sigstore v1.7.5/go.mod h1:9OCmYWhzuq/G4e1cy9m297tuMRJ1LExyrXY3ZC3Zt/s= -github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= -github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sigstore/sigstore v1.8.2 h1:0Ttjcn3V0fVQXlYq7+oHaaHkGFIt3ywm7SF4JTU/l8c= +github.com/sigstore/sigstore v1.8.2/go.mod h1:CHVcSyknCcjI4K2ZhS1SI28r0tcQyBlwtALG536x1DY= github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM= github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 h1:lIOOHPEbXzO3vnmx2gok1Tfs31Q8GQqKLc8vVqyQq/I= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= -github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/sylabs/sif/v2 v2.15.0 h1:Nv0tzksFnoQiQ2eUwpAis9nVqEu4c3RcNSxX8P3Cecw= -github.com/sylabs/sif/v2 v2.15.0/go.mod h1:X1H7eaPz6BAxA84POMESXoXfTqgAnLQkujyF/CQFWTc= -github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/sylabs/sif/v2 v2.15.1 h1:75BcunPOY11fVhe02/WHuNLTfDd3OHH0ex0MuuNMYX0= +github.com/sylabs/sif/v2 v2.15.1/go.mod h1:YiwCUdZOhiohnPbyxuxvCZa+03HwAaiC+vfAKZPR8nQ= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= -github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes= github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8= -github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI= github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts= github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk= -github.com/vbauerster/mpb/v8 v8.6.2 h1:9EhnJGQRtvgDVCychJgR96EDCOqgg2NsMuk5JUcX4DA= -github.com/vbauerster/mpb/v8 v8.6.2/go.mod h1:oVJ7T+dib99kZ/VBjoBaC8aPXiSAihnzuKmotuihyFo= -github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= -github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= -github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vbauerster/mpb/v8 v8.7.2 h1:SMJtxhNho1MV3OuFgS1DAzhANN1Ejc5Ct+0iSaIkB14= +github.com/vbauerster/mpb/v8 v8.7.2/go.mod h1:ZFnrjzspgDHoxYLGvxIruiNk73GNTPG4YHgVNpR10VY= github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= -github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= -github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= -github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU= -github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= -github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= -github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= -github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= -github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= -go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA= -go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= -go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= +go.etcd.io/bbolt v1.3.9 h1:8x7aARPEXiXbHmtUwAIv7eV2fQFHrLLavdiJ3uzJXoI= +go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE= go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= -go.mongodb.org/mongo-driver v1.11.3 h1:Ql6K6qYHEzB6xvu4+AU0BoRoqf9vFPcc4o7MUIdPW8Y= -go.mongodb.org/mongo-driver v1.11.3/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= -go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= +go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= +go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 h1:CCriYyAfq1Br1aIYettdHZTy8mBTIPo7We18TuO/bak= go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q= +go.opentelemetry.io/otel v1.22.0 h1:xS7Ku+7yTFvDfDraDIJVpw7XPyuHlB9MCiqqX5mcJ6Y= +go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg= +go.opentelemetry.io/otel/metric v1.22.0 h1:lypMQnGyJYeuYPhOM/bgjbFM6WE44W1/T45er4d8Hhg= +go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY= +go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= +go.opentelemetry.io/otel/trace v1.22.0 h1:Hg6pPujv0XG9QaVbGOBVHunyuLcCC3jN7WEhPx83XD0= +go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= -golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= -golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= -golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ= +golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= -golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= +golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= +golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1171,107 +573,49 @@ golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220817070843-5a390386f1f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220823224334-20c2bfdbfe24/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= -golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= -golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= @@ -1279,142 +623,49 @@ golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= -golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= +golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= +golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= -google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 h1:N3bU/SQDCDyD6R528GJ/PwW9KjYcJA3dgyH+MovAkIM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA= -google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3 h1:1hfbdAfFbkmpg41000wDVqr7jUpK/Yo+LPnIxxGzmkg= +google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 h1:JpwMPBpFN3uKhdaekDpiNlImDdkUAyiJ6ez/uxGaUSo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0 h1:/jFB8jK5R3Sq3i/lmeZO0cATSzFfZaJq1J2Euan3XKU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0/go.mod h1:FUoWkonphQm3RhTS+kOEhF8h0iDpm4tdXolVCeZ9KKA= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= -google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= +google.golang.org/grpc v1.61.0 h1:TOvOcuXn30kRao+gfcvsebNEa5iZIiLkisYEkf7R7o0= +google.golang.org/grpc v1.61.0/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1423,46 +674,27 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/alexcesaro/statsd.v2 v2.0.0 h1:FXkZSCZIH17vLCO5sO2UucTHsH9pc+17F6pl3JVCwMc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= -gopkg.in/go-jose/go-jose.v2 v2.6.1 h1:qEzJlIDmG9q5VO0M/o8tGS65QMHMS1w01TQJB1VPJ4U= -gopkg.in/go-jose/go-jose.v2 v2.6.1/go.mod h1:zzZDPkNNw/c9IE7Z9jr11mBZQhKQTMzoEEIoEdZlFBI= +gopkg.in/go-jose/go-jose.v2 v2.6.3 h1:nt80fvSDlhKWQgSWyHyy5CfmlQr+asih51R8PTWNKKs= +gopkg.in/go-jose/go-jose.v2 v2.6.3/go.mod h1:zzZDPkNNw/c9IE7Z9jr11mBZQhKQTMzoEEIoEdZlFBI= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= @@ -1474,64 +706,9 @@ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= -gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= -gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= +gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= -k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= -k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= -k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= -k8s.io/apimachinery v0.26.5 h1:hTQVhJao2piX7vSgCn4Lwd6E0o/+TJIH4NqRf+q4EmE= -k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= -k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= -k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= -k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= -k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= -k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= -k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= -k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= -k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= -k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= -k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= -k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= -k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= -k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= -k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= -k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= -k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= -k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= -k8s.io/kubernetes v1.28.4 h1:aRNxs5jb8FVTtlnxeA4FSDBVKuFwA8Gw40/U2zReBYA= -k8s.io/kubernetes v1.28.4/go.mod h1:BTzDCKYAlu6LL9ITbfjwgwIrJ30hlTgbv0eXDoA/WoA= -k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20230505201702-9f6742963106 h1:EObNQ3TW2D+WptiYXlApGNLVy0zm/JIBVY9i+M4wpAU= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= -sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= tags.cncf.io/container-device-interface v0.6.2 h1:dThE6dtp/93ZDGhqaED2Pu374SOeUkBfuvkLuiTdwzg= diff --git a/pdcs/container.go b/pdcs/container.go index b3588e4f7..4668ec917 100644 --- a/pdcs/container.go +++ b/pdcs/container.go @@ -5,9 +5,9 @@ import ( "sync" "time" - "github.com/containers/podman/v4/cmd/podman/registry" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v5/cmd/podman/registry" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/domain/entities" klog "github.com/go-kit/log" "github.com/go-kit/log/level" ) @@ -155,6 +155,16 @@ func ContainersStats() ([]ContainerStat, error) { } for _, rep := range statReport { + var ( + netInput uint64 + netOutput uint64 + ) + + for _, net := range rep.Network { + netInput += net.RxBytes + netOutput += net.TxBytes + } + stat = append(stat, ContainerStat{ ID: getID(rep.ContainerID), Name: rep.Name, @@ -163,8 +173,8 @@ func ContainersStats() ([]ContainerStat, error) { CPUSystem: float64(rep.CPUSystemNano) / nano, MemUsage: rep.MemUsage, MemLimit: rep.MemLimit, - NetInput: rep.NetInput, - NetOutput: rep.NetOutput, + NetInput: netInput, + NetOutput: netOutput, BlockInput: rep.BlockInput, BlockOutput: rep.BlockOutput, }) diff --git a/pdcs/events.go b/pdcs/events.go index 86a117389..ab33256e9 100644 --- a/pdcs/events.go +++ b/pdcs/events.go @@ -4,9 +4,9 @@ import ( "context" "log" - "github.com/containers/podman/v4/cmd/podman/registry" - "github.com/containers/podman/v4/libpod/events" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v5/cmd/podman/registry" + "github.com/containers/podman/v5/libpod/events" + "github.com/containers/podman/v5/pkg/domain/entities" klog "github.com/go-kit/log" "github.com/go-kit/log/level" ) diff --git a/pdcs/image.go b/pdcs/image.go index c04844ab6..8cd028e37 100644 --- a/pdcs/image.go +++ b/pdcs/image.go @@ -5,8 +5,8 @@ import ( "sync" "github.com/containers/image/v5/docker/reference" - "github.com/containers/podman/v4/cmd/podman/registry" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v5/cmd/podman/registry" + "github.com/containers/podman/v5/pkg/domain/entities" ) var imageRep ImageReport diff --git a/pdcs/network.go b/pdcs/network.go index 52dfd1518..895b0d867 100644 --- a/pdcs/network.go +++ b/pdcs/network.go @@ -4,8 +4,8 @@ import ( "strings" "github.com/containers/common/libnetwork/types" - "github.com/containers/podman/v4/cmd/podman/registry" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v5/cmd/podman/registry" + "github.com/containers/podman/v5/pkg/domain/entities" ) // Network implements network's basic information. diff --git a/pdcs/pod.go b/pdcs/pod.go index 03666194c..3022b7278 100644 --- a/pdcs/pod.go +++ b/pdcs/pod.go @@ -1,8 +1,8 @@ package pdcs import ( - "github.com/containers/podman/v4/cmd/podman/registry" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v5/cmd/podman/registry" + "github.com/containers/podman/v5/pkg/domain/entities" ) // Pod implements pod's basic information. diff --git a/pdcs/registry.go b/pdcs/registry.go index 2ef2d3de2..fb726f3df 100644 --- a/pdcs/registry.go +++ b/pdcs/registry.go @@ -4,7 +4,7 @@ import ( "io" "log" - "github.com/containers/podman/v4/cmd/podman/registry" + "github.com/containers/podman/v5/cmd/podman/registry" "github.com/sirupsen/logrus" "github.com/spf13/cobra" ) diff --git a/pdcs/system.go b/pdcs/system.go index 4b05e58ac..15fee9835 100644 --- a/pdcs/system.go +++ b/pdcs/system.go @@ -3,8 +3,8 @@ package pdcs import ( "strings" - "github.com/containers/podman/v4/cmd/podman/registry" - "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v5/cmd/podman/registry" + "github.com/containers/podman/v5/libpod/define" ) // System implements podman system information. diff --git a/pdcs/utils.go b/pdcs/utils.go index 4f4d78203..92b28b9ad 100644 --- a/pdcs/utils.go +++ b/pdcs/utils.go @@ -6,7 +6,7 @@ import ( "strings" "github.com/containers/common/libnetwork/types" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/domain/entities" "github.com/pkg/errors" ) diff --git a/pdcs/volume.go b/pdcs/volume.go index feac7dd2b..d547805b8 100644 --- a/pdcs/volume.go +++ b/pdcs/volume.go @@ -3,8 +3,8 @@ package pdcs import ( "context" - "github.com/containers/podman/v4/cmd/podman/registry" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v5/cmd/podman/registry" + "github.com/containers/podman/v5/pkg/domain/entities" ) // Volume implements volume's basic information. diff --git a/test/e2e/container_test.go b/test/e2e/container_test.go index 65a6f07ca..306173c5c 100644 --- a/test/e2e/container_test.go +++ b/test/e2e/container_test.go @@ -5,7 +5,8 @@ import ( "fmt" "os/exec" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/domain/entities" + "github.com/containers/prometheus-podman-exporter/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) @@ -27,9 +28,8 @@ var _ = Describe("Container", func() { Expect(err).To(BeNil()) var ( - cnt01Inpect []entities.ContainerInspectReport - cnt02Inpect []entities.ContainerInspectReport - cnt01Pod01Inspect entities.PodInspectReport + cnt01Inpect []entities.ContainerInspectReport + cnt02Inpect []entities.ContainerInspectReport ) cnt01InspectOutput, err := exec.Command("podman", "container", "inspect", testCnt01Name).Output() @@ -42,16 +42,14 @@ var _ = Describe("Container", func() { err = json.Unmarshal(cnt02InspectOutput, &cnt02Inpect) Expect(err).To(BeNil()) - pod01InspectOutput, err := exec.Command("podman", "pod", "inspect", testCnt01PodName).Output() - Expect(err).To(BeNil()) - err = json.Unmarshal(pod01InspectOutput, &cnt01Pod01Inspect) + cnt01Pod01Inspect, err := utils.PodInformation(testCnt01PodName) Expect(err).To(BeNil()) response := queryEndPoint() // podman_container_state expectedCnt01State := fmt.Sprintf("podman_container_state{id=\"%s\",pod_id=\"%s\",pod_name=\"%s\"} 0", - cnt01Inpect[0].ID[0:12], cnt01Pod01Inspect.ID[0:12], cnt01Pod01Inspect.Name) + cnt01Inpect[0].ID[0:12], cnt01Pod01Inspect.ID, cnt01Pod01Inspect.Name) expectedCnt02State := fmt.Sprintf("podman_container_state{id=\"%s\",pod_id=\"\",pod_name=\"\"} 0", cnt02Inpect[0].ID[0:12]) Expect(response).Should(ContainElement(ContainSubstring(expectedCnt01State))) @@ -59,7 +57,7 @@ var _ = Describe("Container", func() { // podman_container_created_seconds expectedCnt01Created := fmt.Sprintf("podman_container_created_seconds{id=\"%s\",pod_id=\"%s\",pod_name=\"%s\"}", - cnt01Inpect[0].ID[0:12], cnt01Pod01Inspect.ID[0:12], cnt01Pod01Inspect.Name) + cnt01Inpect[0].ID[0:12], cnt01Pod01Inspect.ID, cnt01Pod01Inspect.Name) expectedCnt02Created := fmt.Sprintf("podman_container_created_seconds{id=\"%s\",pod_id=\"\",pod_name=\"\"}", cnt02Inpect[0].ID[0:12]) Expect(response).Should(ContainElement(ContainSubstring(expectedCnt01Created))) @@ -67,7 +65,7 @@ var _ = Describe("Container", func() { // podman_container_exited_seconds expectedCnt01ExitedSeconds := fmt.Sprintf("podman_container_exited_seconds{id=\"%s\",pod_id=\"%s\",pod_name=\"%s\"}", - cnt01Inpect[0].ID[0:12], cnt01Pod01Inspect.ID[0:12], cnt01Pod01Inspect.Name) + cnt01Inpect[0].ID[0:12], cnt01Pod01Inspect.ID, cnt01Pod01Inspect.Name) expectedCnt02ExitedSeconds := fmt.Sprintf("podman_container_exited_seconds{id=\"%s\",pod_id=\"\",pod_name=\"\"}", cnt02Inpect[0].ID[0:12]) Expect(response).Should(ContainElement(ContainSubstring(expectedCnt01ExitedSeconds))) @@ -75,7 +73,7 @@ var _ = Describe("Container", func() { // podman_container_exit_code expectedCnt01ExitedCode := fmt.Sprintf("podman_container_exit_code{id=\"%s\",pod_id=\"%s\",pod_name=\"%s\"}", - cnt01Inpect[0].ID[0:12], cnt01Pod01Inspect.ID[0:12], cnt01Pod01Inspect.Name) + cnt01Inpect[0].ID[0:12], cnt01Pod01Inspect.ID, cnt01Pod01Inspect.Name) expectedCnt02ExitedCode := fmt.Sprintf("podman_container_exit_code{id=\"%s\",pod_id=\"\",pod_name=\"\"}", cnt02Inpect[0].ID[0:12]) Expect(response).Should(ContainElement(ContainSubstring(expectedCnt01ExitedCode))) @@ -83,7 +81,7 @@ var _ = Describe("Container", func() { // podman_container_info expectedCnt01Info := fmt.Sprintf("podman_container_info{id=\"%s\",image=\"%s\",name=\"%s\",pod_id=\"%s\",pod_name=\"%s\",ports=\"\"}", - cnt01Inpect[0].ID[0:12], testBusyBoxImage, testCnt01Name, cnt01Pod01Inspect.ID[0:12], cnt01Pod01Inspect.Name) + cnt01Inpect[0].ID[0:12], testBusyBoxImage, testCnt01Name, cnt01Pod01Inspect.ID, cnt01Pod01Inspect.Name) expectedCnt02Info := fmt.Sprintf("podman_container_info{id=\"%s\",image=\"%s\",name=\"%s\",pod_id=\"\",pod_name=\"\",ports=\"\"}", cnt02Inpect[0].ID[0:12], testBusyBoxImage, testCnt02Name) @@ -92,7 +90,7 @@ var _ = Describe("Container", func() { // podman_container_rw_size_bytes expectedCnt01RwSize := fmt.Sprintf("podman_container_rw_size_bytes{id=\"%s\",pod_id=\"%s\",pod_name=\"%s\"} 0", - cnt01Inpect[0].ID[0:12], cnt01Pod01Inspect.ID[0:12], cnt01Pod01Inspect.Name) + cnt01Inpect[0].ID[0:12], cnt01Pod01Inspect.ID, cnt01Pod01Inspect.Name) expectedCnt02RwSize := fmt.Sprintf("podman_container_rw_size_bytes{id=\"%s\",pod_id=\"\",pod_name=\"\"} 0", cnt02Inpect[0].ID[0:12]) @@ -101,7 +99,7 @@ var _ = Describe("Container", func() { // podman_container_rootfs_size_bytes expectedCnt01RootFsSize := fmt.Sprintf("podman_container_rootfs_size_bytes{id=\"%s\",pod_id=\"%s\",pod_name=\"%s\"}", - cnt01Inpect[0].ID[0:12], cnt01Pod01Inspect.ID[0:12], cnt01Pod01Inspect.Name) + cnt01Inpect[0].ID[0:12], cnt01Pod01Inspect.ID, cnt01Pod01Inspect.Name) expectedCnt02RootFsSize := fmt.Sprintf("podman_container_rootfs_size_bytes{id=\"%s\",pod_id=\"\",pod_name=\"\"}", cnt02Inpect[0].ID[0:12]) diff --git a/test/e2e/image_test.go b/test/e2e/image_test.go index 5ca4a5767..f0ff91962 100644 --- a/test/e2e/image_test.go +++ b/test/e2e/image_test.go @@ -5,7 +5,7 @@ import ( "fmt" "os/exec" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/domain/entities" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/test/e2e/pod_test.go b/test/e2e/pod_test.go index 7f17008a8..1dae0f487 100644 --- a/test/e2e/pod_test.go +++ b/test/e2e/pod_test.go @@ -1,11 +1,10 @@ package e2e_test import ( - "encoding/json" "fmt" "os/exec" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/prometheus-podman-exporter/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) @@ -26,49 +25,40 @@ var _ = Describe("Pod", func() { _, err = exec.Command("podman", "container", "create", "--pod", testPod02Name, "--name", testPod02ContainerName, testPod02ContainerImage).Output() Expect(err).To(BeNil()) - var ( - pod01Inspect entities.PodInspectReport - pod02Inspect entities.PodInspectReport - ) - - pod01InspectOutput, err := exec.Command("podman", "pod", "inspect", testPod01Name).Output() - Expect(err).To(BeNil()) - err = json.Unmarshal(pod01InspectOutput, &pod01Inspect) + pod01Inspect, err := utils.PodInformation(testPod01Name) Expect(err).To(BeNil()) - pod02InspectOutput, err := exec.Command("podman", "pod", "inspect", testPod02Name).Output() - Expect(err).To(BeNil()) - err = json.Unmarshal(pod02InspectOutput, &pod02Inspect) + pod02Inspect, err := utils.PodInformation(testPod02Name) Expect(err).To(BeNil()) response := queryEndPoint() // podman_pod_state - expectedPod01State := fmt.Sprintf("podman_pod_state{id=\"%s\"} 0", pod01Inspect.ID[0:12]) - expectedPod02State := fmt.Sprintf("podman_pod_state{id=\"%s\"} 0", pod02Inspect.ID[0:12]) + expectedPod01State := fmt.Sprintf("podman_pod_state{id=\"%s\"} 0", pod01Inspect.ID) + expectedPod02State := fmt.Sprintf("podman_pod_state{id=\"%s\"} 0", pod02Inspect.ID) Expect(response).Should(ContainElement(ContainSubstring(expectedPod01State))) Expect(response).Should(ContainElement(ContainSubstring(expectedPod02State))) // podman_pod_created_seconds - expectedPod01Created := fmt.Sprintf("podman_pod_created_seconds{id=\"%s\"}", pod01Inspect.ID[0:12]) - expectedPod02Created := fmt.Sprintf("podman_pod_created_seconds{id=\"%s\"}", pod02Inspect.ID[0:12]) + expectedPod01Created := fmt.Sprintf("podman_pod_created_seconds{id=\"%s\"}", pod01Inspect.ID) + expectedPod02Created := fmt.Sprintf("podman_pod_created_seconds{id=\"%s\"}", pod02Inspect.ID) Expect(response).Should(ContainElement(ContainSubstring(expectedPod01Created))) Expect(response).Should(ContainElement(ContainSubstring(expectedPod02Created))) // podman_pod_info expectedPod01Info := fmt.Sprintf("podman_pod_info{id=\"%s\",infra_id=\"%s\",name=\"%s\"} 1", - pod01Inspect.ID[0:12], pod01Inspect.InfraContainerID[0:12], testPod01Name) + pod01Inspect.ID, pod01Inspect.InfraID, pod01Inspect.Name) expectedPod02Info := fmt.Sprintf("podman_pod_info{id=\"%s\",infra_id=\"%s\",name=\"%s\"} 1", - pod02Inspect.ID[0:12], pod02Inspect.InfraContainerID[0:12], testPod02Name) + pod02Inspect.ID, pod02Inspect.InfraID, pod02Inspect.Name) Expect(response).Should(ContainElement(ContainSubstring(expectedPod01Info))) Expect(response).Should(ContainElement(ContainSubstring(expectedPod02Info))) // podman_pod_containers - expectedPod01Containers := fmt.Sprintf("podman_pod_containers{id=\"%s\"} 1", pod01Inspect.ID[0:12]) - expectedPod02Containers := fmt.Sprintf("podman_pod_containers{id=\"%s\"} 2", pod02Inspect.ID[0:12]) + expectedPod01Containers := fmt.Sprintf("podman_pod_containers{id=\"%s\"} 1", pod01Inspect.ID) + expectedPod02Containers := fmt.Sprintf("podman_pod_containers{id=\"%s\"} 2", pod02Inspect.ID) Expect(response).Should(ContainElement(ContainSubstring(expectedPod01Containers))) Expect(response).Should(ContainElement(ContainSubstring(expectedPod02Containers))) diff --git a/test/e2e/volume_test.go b/test/e2e/volume_test.go index f98ef6c9a..667a867df 100644 --- a/test/e2e/volume_test.go +++ b/test/e2e/volume_test.go @@ -5,7 +5,7 @@ import ( "fmt" "os/exec" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/domain/entities" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/test/e2e_em/container_test.go b/test/e2e_em/container_test.go index 1b2fa39e3..bd01ce879 100644 --- a/test/e2e_em/container_test.go +++ b/test/e2e_em/container_test.go @@ -5,7 +5,8 @@ import ( "fmt" "os/exec" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/domain/entities" + "github.com/containers/prometheus-podman-exporter/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) @@ -16,43 +17,38 @@ var _ = Describe("Container", func() { testCnt01Name := "exp_e2e_test_cnt01" testBusyBoxImage := "quay.io/quay/busybox:latest" - var ( - cnt01Inpect []entities.ContainerInspectReport - cnt01Pod01Inspect entities.PodInspectReport - ) + var cnt01Inpect []entities.ContainerInspectReport cnt01InspectOutput, err := exec.Command("podman", "container", "inspect", testCnt01Name).Output() Expect(err).To(BeNil()) err = json.Unmarshal(cnt01InspectOutput, &cnt01Inpect) Expect(err).To(BeNil()) - pod01InspectOutput, err := exec.Command("podman", "pod", "inspect", testCnt01PodName).Output() - Expect(err).To(BeNil()) - err = json.Unmarshal(pod01InspectOutput, &cnt01Pod01Inspect) + cnt01Pod01Inspect, err := utils.PodInformation(testCnt01PodName) Expect(err).To(BeNil()) response := queryEndPoint() expectedCnt01Info := fmt.Sprintf("podman_container_info{id=\"%s\",image=\"%s\",name=\"%s\",pod_id=\"%s\",pod_name=\"%s\",ports=\"\"}", - cnt01Inpect[0].ID[0:12], testBusyBoxImage, testCnt01Name, cnt01Pod01Inspect.ID[0:12], cnt01Pod01Inspect.Name) + cnt01Inpect[0].ID[0:12], testBusyBoxImage, testCnt01Name, cnt01Pod01Inspect.ID, cnt01Pod01Inspect.Name) expectedCnt01State := fmt.Sprintf("podman_container_state{id=\"%s\",image=\"%s\",name=\"%s\",pod_id=\"%s\",pod_name=\"%s\",ports=\"\"}", - cnt01Inpect[0].ID[0:12], testBusyBoxImage, testCnt01Name, cnt01Pod01Inspect.ID[0:12], cnt01Pod01Inspect.Name) + cnt01Inpect[0].ID[0:12], testBusyBoxImage, testCnt01Name, cnt01Pod01Inspect.ID, cnt01Pod01Inspect.Name) expectedCnt01Created := fmt.Sprintf("podman_container_created_seconds{id=\"%s\",image=\"%s\",name=\"%s\",pod_id=\"%s\",pod_name=\"%s\",ports=\"\"}", - cnt01Inpect[0].ID[0:12], testBusyBoxImage, testCnt01Name, cnt01Pod01Inspect.ID[0:12], cnt01Pod01Inspect.Name) + cnt01Inpect[0].ID[0:12], testBusyBoxImage, testCnt01Name, cnt01Pod01Inspect.ID, cnt01Pod01Inspect.Name) expectedCnt01ExitedSeconds := fmt.Sprintf("podman_container_exited_seconds{id=\"%s\",image=\"%s\",name=\"%s\",pod_id=\"%s\",pod_name=\"%s\",ports=\"\"}", - cnt01Inpect[0].ID[0:12], testBusyBoxImage, testCnt01Name, cnt01Pod01Inspect.ID[0:12], cnt01Pod01Inspect.Name) + cnt01Inpect[0].ID[0:12], testBusyBoxImage, testCnt01Name, cnt01Pod01Inspect.ID, cnt01Pod01Inspect.Name) expectedCnt01ExitedCode := fmt.Sprintf("podman_container_exit_code{id=\"%s\",image=\"%s\",name=\"%s\",pod_id=\"%s\",pod_name=\"%s\",ports=\"\"}", - cnt01Inpect[0].ID[0:12], testBusyBoxImage, testCnt01Name, cnt01Pod01Inspect.ID[0:12], cnt01Pod01Inspect.Name) + cnt01Inpect[0].ID[0:12], testBusyBoxImage, testCnt01Name, cnt01Pod01Inspect.ID, cnt01Pod01Inspect.Name) expectedCnt01RwSize := fmt.Sprintf("podman_container_rw_size_bytes{id=\"%s\",image=\"%s\",name=\"%s\",pod_id=\"%s\",pod_name=\"%s\",ports=\"\"}", - cnt01Inpect[0].ID[0:12], testBusyBoxImage, testCnt01Name, cnt01Pod01Inspect.ID[0:12], cnt01Pod01Inspect.Name) + cnt01Inpect[0].ID[0:12], testBusyBoxImage, testCnt01Name, cnt01Pod01Inspect.ID, cnt01Pod01Inspect.Name) expectedCnt01RootFsSize := fmt.Sprintf("podman_container_rootfs_size_bytes{id=\"%s\",image=\"%s\",name=\"%s\",pod_id=\"%s\",pod_name=\"%s\",ports=\"\"}", - cnt01Inpect[0].ID[0:12], testBusyBoxImage, testCnt01Name, cnt01Pod01Inspect.ID[0:12], cnt01Pod01Inspect.Name) + cnt01Inpect[0].ID[0:12], testBusyBoxImage, testCnt01Name, cnt01Pod01Inspect.ID, cnt01Pod01Inspect.Name) Expect(response).Should(ContainElement(ContainSubstring(expectedCnt01Info))) Expect(response).Should(ContainElement(ContainSubstring(expectedCnt01State))) diff --git a/test/e2e_em/image_test.go b/test/e2e_em/image_test.go index e14380d42..d77690daf 100644 --- a/test/e2e_em/image_test.go +++ b/test/e2e_em/image_test.go @@ -5,7 +5,7 @@ import ( "fmt" "os/exec" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/domain/entities" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/test/e2e_em/pod_test.go b/test/e2e_em/pod_test.go index b6913d957..f21532e23 100644 --- a/test/e2e_em/pod_test.go +++ b/test/e2e_em/pod_test.go @@ -1,11 +1,9 @@ package e2e_em_test import ( - "encoding/json" "fmt" - "os/exec" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/prometheus-podman-exporter/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) @@ -14,26 +12,22 @@ var _ = Describe("Pod", func() { It("pod metrics", func() { testPod01Name := "exp_e2e_test_pod01" - var pod01Inspect entities.PodInspectReport - - pod01InspectOutput, err := exec.Command("podman", "pod", "inspect", testPod01Name).Output() - Expect(err).To(BeNil()) - err = json.Unmarshal(pod01InspectOutput, &pod01Inspect) + pod01Inspect, err := utils.PodInformation(testPod01Name) Expect(err).To(BeNil()) response := queryEndPoint() expectedPod01Info := fmt.Sprintf("podman_pod_info{id=\"%s\",infra_id=\"%s\",name=\"%s\"}", - pod01Inspect.ID[0:12], pod01Inspect.InfraContainerID[0:12], testPod01Name) + pod01Inspect.ID, pod01Inspect.InfraID, pod01Inspect.Name) expectedPod01State := fmt.Sprintf("podman_pod_state{id=\"%s\",infra_id=\"%s\",name=\"%s\"}", - pod01Inspect.ID[0:12], pod01Inspect.InfraContainerID[0:12], testPod01Name) + pod01Inspect.ID, pod01Inspect.InfraID, pod01Inspect.Name) expectedPod01Created := fmt.Sprintf("podman_pod_created_seconds{id=\"%s\",infra_id=\"%s\",name=\"%s\"}", - pod01Inspect.ID[0:12], pod01Inspect.InfraContainerID[0:12], testPod01Name) + pod01Inspect.ID, pod01Inspect.InfraID, pod01Inspect.Name) expectedPod01Containers := fmt.Sprintf("podman_pod_containers{id=\"%s\",infra_id=\"%s\",name=\"%s\"}", - pod01Inspect.ID[0:12], pod01Inspect.InfraContainerID[0:12], testPod01Name) + pod01Inspect.ID, pod01Inspect.InfraID, pod01Inspect.Name) Expect(response).Should(ContainElement(ContainSubstring(expectedPod01Info))) Expect(response).Should(ContainElement(ContainSubstring(expectedPod01State))) diff --git a/test/e2e_em/volume_test.go b/test/e2e_em/volume_test.go index 50a6cbf6a..2781fd41b 100644 --- a/test/e2e_em/volume_test.go +++ b/test/e2e_em/volume_test.go @@ -5,7 +5,7 @@ import ( "fmt" "os/exec" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/domain/entities" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/test/utils/utils.go b/test/utils/utils.go new file mode 100644 index 000000000..b9e47e5ce --- /dev/null +++ b/test/utils/utils.go @@ -0,0 +1,63 @@ +package utils + +import ( + "encoding/json" + "os/exec" + "strings" + + "github.com/containers/podman/v5/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/domain/entities/types" +) + +type PodInfo struct { + ID string + InfraID string + Name string +} + +func PodInformation(name string) (*PodInfo, error) { + var podmanVersion types.SystemVersionReport + + podmanVersionReport, err := exec.Command("podman", "version", "-f", "json").Output() + if err != nil { + return nil, err + } + + err = json.Unmarshal(podmanVersionReport, &podmanVersion) + if err != nil { + return nil, err + } + + podInspectResult, err := exec.Command("podman", "pod", "inspect", name).Output() + if err != nil { + return nil, err + } + + if strings.Index(podmanVersion.Client.Version, "5") == 0 { + var podInspect []entities.PodInspectReport + + err = json.Unmarshal(podInspectResult, &podInspect) + if err != nil { + return nil, err + } + + return &PodInfo{ + Name: podInspect[0].Name, + InfraID: podInspect[0].InfraContainerID[0:12], + ID: podInspect[0].ID[0:12], + }, nil + } + + var podInspect entities.PodInspectReport + + err = json.Unmarshal(podInspectResult, &podInspect) + if err != nil { + return nil, err + } + + return &PodInfo{ + Name: podInspect.Name, + InfraID: podInspect.InfraContainerID[0:12], + ID: podInspect.ID[0:12], + }, nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/.golangci.yml b/vendor/github.com/Microsoft/hcsshim/.golangci.yml index abe77f57a..7d38a2fb9 100644 --- a/vendor/github.com/Microsoft/hcsshim/.golangci.yml +++ b/vendor/github.com/Microsoft/hcsshim/.golangci.yml @@ -20,6 +20,7 @@ linters: # - typecheck # - unused + - errorlint # error wrapping (eg, not using `errors.Is`, using `%s` instead of `%w` in `fmt.Errorf`) - gofmt # whether code was gofmt-ed - govet # enabled by default, but just to be sure - nolintlint # ill-formed or insufficient nolint directives @@ -53,6 +54,12 @@ issues: text: "^ST1003: should not use underscores in package names$" source: "^package cri_containerd$" + # don't bother with propper error wrapping in test code + - path: cri-containerd + linters: + - errorlint + text: "non-wrapping format verb for fmt.Errorf" + # This repo has a LOT of generated schema files, operating system bindings, and other # things that ST1003 from stylecheck won't like (screaming case Windows api constants for example). # There's also some structs that we *could* change the initialisms to be Go friendly diff --git a/vendor/github.com/Microsoft/hcsshim/Makefile b/vendor/github.com/Microsoft/hcsshim/Makefile index d8eb30b86..de6435894 100644 --- a/vendor/github.com/Microsoft/hcsshim/Makefile +++ b/vendor/github.com/Microsoft/hcsshim/Makefile @@ -29,12 +29,23 @@ ifeq "$(DEV_BUILD)" "1" DELTA_TARGET=out/delta-dev.tar.gz endif +ifeq "$(SNP_BUILD)" "1" +DELTA_TARGET=out/delta-snp.tar.gz +endif + # The link aliases for gcstools GCS_TOOLS=\ generichook \ install-drivers -.PHONY: all always rootfs test +# Common path prefix. +PATH_PREFIX:= +# These have PATH_PREFIX prepended to obtain the full path in recipies e.g. $(PATH_PREFIX)/$(VMGS_TOOL) +VMGS_TOOL:= +IGVM_TOOL:= +KERNEL_PATH:= + +.PHONY: all always rootfs test snp simple .DEFAULT_GOAL := all @@ -49,9 +60,58 @@ test: rootfs: out/rootfs.vhd -out/rootfs.vhd: out/rootfs.tar.gz bin/cmd/tar2ext4 +snp: out/kernelinitrd.vmgs out/rootfs.hash.vhd out/rootfs.vhd out/v2056.vmgs + +simple: out/simple.vmgs snp + +%.vmgs: %.bin + rm -f $@ + # du -BM returns the size of the bin file in M, eg 7M. The sed command replaces the M with *1024*1024 and then bc does the math to convert to bytes + $(PATH_PREFIX)/$(VMGS_TOOL) create --filepath $@ --filesize `du -BM $< | sed "s/M.*/*1024*1024/" | bc` + $(PATH_PREFIX)/$(VMGS_TOOL) write --filepath $@ --datapath $< -i=8 + +# Simplest debug UVM used to test changes to the linux kernel. No dmverity protection. Boots an initramdisk rather than directly booting a vhd disk. +out/simple.bin: out/initrd.img $(PATH_PREFIX)/$(KERNEL_PATH) boot/startup_simple.sh + rm -f $@ + python3 $(PATH_PREFIX)/$(IGVM_TOOL) -o $@ -kernel $(PATH_PREFIX)/$(KERNEL_PATH) -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 rdinit=/startup_simple.sh" -rdinit out/initrd.img -vtl 0 + +ROOTFS_DEVICE:=/dev/sda +VERITY_DEVICE:=/dev/sdb +# Debug build for use with uvmtester. UVM with dm-verity protected vhd disk mounted directly via the kernel command line. Ignores corruption in dm-verity protected disk. (Use dmesg to see if dm-verity is ignoring data corruption.) +out/v2056.bin: out/rootfs.vhd out/rootfs.hash.vhd $(PATH_PREFIX)/$(KERNEL_PATH) out/rootfs.hash.datasectors out/rootfs.hash.datablocksize out/rootfs.hash.hashblocksize out/rootfs.hash.datablocks out/rootfs.hash.rootdigest out/rootfs.hash.salt boot/startup_v2056.sh + rm -f $@ + python3 $(PATH_PREFIX)/$(IGVM_TOOL) -o $@ -kernel $(PATH_PREFIX)/$(KERNEL_PATH) -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(VERITY_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) 0 sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt) 1 ignore_corruption\" init=/startup_v2056.sh" -vtl 0 + +# Full UVM with dm-verity protected vhd disk mounted directly via the kernel command line. +out/kernelinitrd.bin: out/rootfs.vhd out/rootfs.hash.vhd out/rootfs.hash.datasectors out/rootfs.hash.datablocksize out/rootfs.hash.hashblocksize out/rootfs.hash.datablocks out/rootfs.hash.rootdigest out/rootfs.hash.salt $(PATH_PREFIX)/$(KERNEL_PATH) boot/startup.sh + rm -f $@ + python3 $(PATH_PREFIX)/$(IGVM_TOOL) -o $@ -kernel $(PATH_PREFIX)/$(KERNEL_PATH) -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(VERITY_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) 0 sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt)\" init=/startup.sh" -vtl 0 + +# Rule to make a vhd from a file. This is used to create the rootfs.hash.vhd from rootfs.hash. +%.vhd: % bin/cmd/tar2ext4 + ./bin/cmd/tar2ext4 -only-vhd -i $< -o $@ + +# Rule to make a vhd from an ext4 file. This is used to create the rootfs.vhd from rootfs.ext4. +%.vhd: %.ext4 bin/cmd/tar2ext4 + ./bin/cmd/tar2ext4 -only-vhd -i $< -o $@ + +%.hash %.hash.info %.hash.datablocks %.hash.rootdigest %hash.datablocksize %.hash.datasectors %.hash.hashblocksize: %.ext4 %.hash.salt + veritysetup format --no-superblock --salt $(shell cat out/rootfs.hash.salt) $< $*.hash > $*.hash.info + # Retrieve info required by dm-verity at boot time + # Get the blocksize of rootfs + cat $*.hash.info | awk '/^Root hash:/{ print $$3 }' > $*.hash.rootdigest + cat $*.hash.info | awk '/^Salt:/{ print $$2 }' > $*.hash.salt + cat $*.hash.info | awk '/^Data block size:/{ print $$4 }' > $*.hash.datablocksize + cat $*.hash.info | awk '/^Hash block size:/{ print $$4 }' > $*.hash.hashblocksize + cat $*.hash.info | awk '/^Data blocks:/{ print $$3 }' > $*.hash.datablocks + echo $$(( $$(cat $*.hash.datablocks) * $$(cat $*.hash.datablocksize) / 512 )) > $*.hash.datasectors + +out/rootfs.hash.salt: + hexdump -vn32 -e'8/4 "%08X" 1 "\n"' /dev/random > $@ + +out/rootfs.ext4: out/rootfs.tar.gz bin/cmd/tar2ext4 gzip -f -d ./out/rootfs.tar.gz - bin/cmd/tar2ext4 -vhd -i ./out/rootfs.tar -o $@ + ./bin/cmd/tar2ext4 -i ./out/rootfs.tar -o $@ out/rootfs.tar.gz: out/initrd.img rm -rf rootfs-conv @@ -74,6 +134,20 @@ out/delta-dev.tar.gz: out/delta.tar.gz bin/internal/tools/snp-report tar -zcf $@ -C rootfs-dev . rm -rf rootfs-dev +out/delta-snp.tar.gz: out/delta.tar.gz bin/internal/tools/snp-report boot/startup_v2056.sh boot/startup_simple.sh boot/startup.sh + rm -rf rootfs-snp + mkdir rootfs-snp + tar -xzf out/delta.tar.gz -C rootfs-snp + cp boot/startup_v2056.sh rootfs-snp/startup_v2056.sh + cp boot/startup_simple.sh rootfs-snp/startup_simple.sh + cp boot/startup.sh rootfs-snp/startup.sh + cp bin/internal/tools/snp-report rootfs-snp/bin/ + chmod a+x rootfs-snp/startup_v2056.sh + chmod a+x rootfs-snp/startup_simple.sh + chmod a+x rootfs-snp/startup.sh + tar -zcf $@ -C rootfs-snp . + rm -rf rootfs-snp + out/delta.tar.gz: bin/init bin/vsockexec bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths Makefile @mkdir -p out rm -rf rootfs @@ -94,7 +168,10 @@ out/delta.tar.gz: bin/init bin/vsockexec bin/cmd/gcs bin/cmd/gcstools bin/cmd/ho tar -zcf $@ -C rootfs . rm -rf rootfs -bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths bin/cmd/tar2ext4 bin/internal/tools/snp-report: +out/containerd-shim-runhcs-v1.exe: + GOOS=windows $(GO_BUILD) -o $@ $(SRCROOT)/cmd/containerd-shim-runhcs-v1 + +bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths bin/cmd/tar2ext4 bin/internal/tools/snp-report bin/cmd/dmverity-vhd: @mkdir -p $(dir $@) GOOS=linux $(GO_BUILD) -o $@ $(SRCROOT)/$(@:bin/%=%) @@ -108,4 +185,4 @@ bin/init: init/init.o vsockexec/vsock.o %.o: %.c @mkdir -p $(dir $@) - $(CC) $(CFLAGS) $(CPPFLAGS) -c -o $@ $< + $(CC) $(CFLAGS) $(CPPFLAGS) -c -o $@ $< \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/README.md b/vendor/github.com/Microsoft/hcsshim/README.md index 5a1361539..320438048 100644 --- a/vendor/github.com/Microsoft/hcsshim/README.md +++ b/vendor/github.com/Microsoft/hcsshim/README.md @@ -9,15 +9,18 @@ It is primarily used in the [Moby](https://github.com/moby/moby) and [Containerd ## Building While this repository can be used as a library of sorts to call the HCS apis, there are a couple binaries built out of the repository as well. The main ones being the Linux guest agent, and an implementation of the [runtime v2 containerd shim api](https://github.com/containerd/containerd/blob/master/runtime/v2/README.md). + ### Linux Hyper-V Container Guest Agent To build the Linux guest agent itself all that's needed is to set your GOOS to "Linux" and build out of ./cmd/gcs. + ```powershell C:\> $env:GOOS="linux" C:\> go build .\cmd\gcs\ ``` or on a Linux machine + ```sh > go build ./cmd/gcs ``` @@ -33,13 +36,15 @@ make all ``` If the build is successful, in the `./out` folder you should see: + ```sh > ls ./out/ delta.tar.gz initrd.img rootfs.tar.gz ``` ### Containerd Shim -For info on the Runtime V2 API: https://github.com/containerd/containerd/blob/master/runtime/v2/README.md. + +For info on the [Runtime V2 API](https://github.com/containerd/containerd/blob/master/runtime/v2/README.md). Contrary to the typical Linux architecture of shim -> runc, the runhcs shim is used both to launch and manage the lifetime of containers. @@ -48,7 +53,9 @@ C:\> $env:GOOS="windows" C:\> go build .\cmd\containerd-shim-runhcs-v1 ``` -Then place the binary in the same directory that Containerd is located at in your environment. A default Containerd configuration file can be generated by running: +Then place the binary in the same directory that Containerd is located at in your environment. +A default Containerd configuration file can be generated by running: + ```powershell .\containerd.exe config default | Out-File "C:\Program Files\containerd\config.toml" -Encoding ascii ``` @@ -56,6 +63,7 @@ Then place the binary in the same directory that Containerd is located at in you This config file will already have the shim set as the default runtime for cri interactions. To trial using the shim out with ctr.exe: + ```powershell C:\> ctr.exe run --runtime io.containerd.runhcs.v1 --rm mcr.microsoft.com/windows/nanoserver:2004 windows-test cmd /c "echo Hello World!" ``` @@ -64,16 +72,69 @@ C:\> ctr.exe run --runtime io.containerd.runhcs.v1 --rm mcr.microsoft.com/window This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us -the rights to use your contribution. For details, visit https://cla.microsoft.com. +the rights to use your contribution. For details, visit [Microsoft CLA](https://cla.microsoft.com). When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. -We also require that contributors [sign their commits](https://git-scm.com/docs/git-commit) using `git commit -s` or `git commit --signoff` to -certify they either authored the work themselves or otherwise have permission to use it in this project. Please see https://developercertificate.org/ for -more info, as well as to make sure that you can attest to the rules listed. Our CI uses the [DCO Github app](https://github.com/apps/dco) to ensure -that all commits in a given PR are signed-off. +We require that contributors sign their commits +to certify they either authored the work themselves or otherwise have permission to use it in this project. + +We also require that contributors sign their commits using using [`git commit --signoff`][git-commit-s] +to certify they either authored the work themselves or otherwise have permission to use it in this project. +A range of commits can be signed off using [`git rebase --signoff`][git-rebase-s]. + +Please see [the developer certificate](https://developercertificate.org) for more info, +as well as to make sure that you can attest to the rules listed. +Our CI uses the [DCO Github app](https://github.com/apps/dco) to ensure that all commits in a given PR are signed-off. + +### Linting + +Code must pass a linting stage, which uses [`golangci-lint`][lint]. +Since `./test` is a separate Go module, the linter is run from both the root and the +`test` directories. Additionally, the linter is run with `GOOS` set to both `windows` and +`linux`. + +The linting settings are stored in [`.golangci.yaml`](./.golangci.yaml), and can be run +automatically with VSCode by adding the following to your workspace or folder settings: + +```json + "go.lintTool": "golangci-lint", + "go.lintOnSave": "package", +``` + +Additional editor [integrations options are also available][lint-ide]. + +Alternatively, `golangci-lint` can be [installed][lint-install] and run locally: + +```shell +# use . or specify a path to only lint a package +# to show all lint errors, use flags "--max-issues-per-linter=0 --max-same-issues=0" +> golangci-lint run +``` + +To run across the entire repo for both `GOOS=windows` and `linux`: + +```powershell +> foreach ( $goos in ('windows', 'linux') ) { + foreach ( $repo in ('.', 'test') ) { + pwsh -Command "cd $repo && go env -w GOOS=$goos && golangci-lint.exe run --verbose" + } +} +``` + +### Go Generate + +The pipeline checks that auto-generated code, via `go generate`, are up to date. +Similar to the [linting stage](#linting), `go generate` is run in both the root and test Go modules. + +This can be done via: + +```shell +> go generate ./... +> cd test && go generate ./... +``` ## Code of Conduct @@ -83,7 +144,7 @@ contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additio ## Dependencies -This project requires Golang 1.17 or newer to build. +This project requires Golang 1.18 or newer to build. For system requirements to run this project, see the Microsoft docs on [Windows Container requirements](https://docs.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/system-requirements). @@ -100,3 +161,10 @@ For additional details, see [Report a Computer Security Vulnerability](https://t --------------- Copyright (c) 2018 Microsoft Corp. All rights reserved. + +[lint]: https://golangci-lint.run/ +[lint-ide]: https://golangci-lint.run/usage/integrations/#editor-integration +[lint-install]: https://golangci-lint.run/usage/install/#local-installation + +[git-commit-s]: https://git-scm.com/docs/git-commit#Documentation/git-commit.txt--s +[git-rebase-s]: https://git-scm.com/docs/git-rebase#Documentation/git-rebase.txt---signoff diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go b/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go index 54c4b3bc4..301a10888 100644 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go @@ -38,3 +38,31 @@ func AttachLayerStorageFilter(ctx context.Context, layerPath string, layerData L } return nil } + +// AttachOverlayFilter sets up a filter of the given type on a writable container layer. Currently the only +// supported filter types are WCIFS & UnionFS (defined in internal/hcs/schema2/layer.go) +// +// `volumePath` is volume path at which writable layer is mounted. If the +// path does not end in a `\` the platform will append it automatically. +// +// `layerData` is the parent read-only layer data. +func AttachOverlayFilter(ctx context.Context, volumePath string, layerData LayerData) (err error) { + title := "hcsshim::AttachOverlayFilter" + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("volumePath", volumePath), + ) + + bytes, err := json.Marshal(layerData) + if err != nil { + return err + } + + err = hcsAttachOverlayFilter(volumePath, string(bytes)) + if err != nil { + return errors.Wrap(err, "failed to attach overlay filter") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go b/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go index daf1bfff2..6e00e4a1f 100644 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go @@ -4,7 +4,9 @@ package computestorage import ( "context" + "encoding/json" + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" "github.com/Microsoft/hcsshim/internal/oc" "github.com/pkg/errors" "go.opencensus.io/trace" @@ -26,3 +28,27 @@ func DetachLayerStorageFilter(ctx context.Context, layerPath string) (err error) } return nil } + +// DetachOverlayFilter detaches the filter on a writable container layer. +// +// `volumePath` is a path to writable container volume. +func DetachOverlayFilter(ctx context.Context, volumePath string, filterType hcsschema.FileSystemFilterType) (err error) { + title := "hcsshim::DetachOverlayFilter" + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("volumePath", volumePath)) + + layerData := LayerData{} + layerData.FilterType = filterType + bytes, err := json.Marshal(layerData) + if err != nil { + return err + } + + err = hcsDetachOverlayFilter(volumePath, string(bytes)) + if err != nil { + return errors.Wrap(err, "failed to detach overlay filter") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go b/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go index c38d3aa5a..5af931f2f 100644 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go @@ -19,14 +19,17 @@ import ( //sys hcsFormatWritableLayerVhd(handle windows.Handle) (hr error) = computestorage.HcsFormatWritableLayerVhd? //sys hcsGetLayerVhdMountPath(vhdHandle windows.Handle, mountPath **uint16) (hr error) = computestorage.HcsGetLayerVhdMountPath? //sys hcsSetupBaseOSVolume(layerPath string, volumePath string, options string) (hr error) = computestorage.HcsSetupBaseOSVolume? +//sys hcsAttachOverlayFilter(volumePath string, layerData string) (hr error) = computestorage.HcsAttachOverlayFilter? +//sys hcsDetachOverlayFilter(volumePath string, layerData string) (hr error) = computestorage.HcsDetachOverlayFilter? type Version = hcsschema.Version type Layer = hcsschema.Layer // LayerData is the data used to describe parent layer information. type LayerData struct { - SchemaVersion Version `json:"SchemaVersion,omitempty"` - Layers []Layer `json:"Layers,omitempty"` + SchemaVersion Version `json:"SchemaVersion,omitempty"` + Layers []Layer `json:"Layers,omitempty"` + FilterType hcsschema.FileSystemFilterType `json:"FilterType,omitempty"` } // ExportLayerOptions are the set of options that are used with the `computestorage.HcsExportLayer` syscall. diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go index b996b35e6..53d0beb87 100644 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go @@ -43,8 +43,10 @@ var ( modcomputestorage = windows.NewLazySystemDLL("computestorage.dll") procHcsAttachLayerStorageFilter = modcomputestorage.NewProc("HcsAttachLayerStorageFilter") + procHcsAttachOverlayFilter = modcomputestorage.NewProc("HcsAttachOverlayFilter") procHcsDestroyLayer = modcomputestorage.NewProc("HcsDestroyLayer") procHcsDetachLayerStorageFilter = modcomputestorage.NewProc("HcsDetachLayerStorageFilter") + procHcsDetachOverlayFilter = modcomputestorage.NewProc("HcsDetachOverlayFilter") procHcsExportLayer = modcomputestorage.NewProc("HcsExportLayer") procHcsFormatWritableLayerVhd = modcomputestorage.NewProc("HcsFormatWritableLayerVhd") procHcsGetLayerVhdMountPath = modcomputestorage.NewProc("HcsGetLayerVhdMountPath") @@ -83,6 +85,35 @@ func _hcsAttachLayerStorageFilter(layerPath *uint16, layerData *uint16) (hr erro return } +func hcsAttachOverlayFilter(volumePath string, layerData string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(volumePath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(layerData) + if hr != nil { + return + } + return _hcsAttachOverlayFilter(_p0, _p1) +} + +func _hcsAttachOverlayFilter(volumePath *uint16, layerData *uint16) (hr error) { + hr = procHcsAttachOverlayFilter.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsAttachOverlayFilter.Addr(), 2, uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(layerData)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + func hcsDestroyLayer(layerPath string) (hr error) { var _p0 *uint16 _p0, hr = syscall.UTF16PtrFromString(layerPath) @@ -131,6 +162,35 @@ func _hcsDetachLayerStorageFilter(layerPath *uint16) (hr error) { return } +func hcsDetachOverlayFilter(volumePath string, layerData string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(volumePath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(layerData) + if hr != nil { + return + } + return _hcsDetachOverlayFilter(_p0, _p1) +} + +func _hcsDetachOverlayFilter(volumePath *uint16, layerData *uint16) (hr error) { + hr = procHcsDetachOverlayFilter.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsDetachOverlayFilter.Addr(), 2, uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(layerData)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + func hcsExportLayer(layerPath string, exportFolderPath string, layerData string, options string) (hr error) { var _p0 *uint16 _p0, hr = syscall.UTF16PtrFromString(layerPath) diff --git a/vendor/github.com/Microsoft/hcsshim/container.go b/vendor/github.com/Microsoft/hcsshim/container.go index c8f09f88b..0ad7f495a 100644 --- a/vendor/github.com/Microsoft/hcsshim/container.go +++ b/vendor/github.com/Microsoft/hcsshim/container.go @@ -75,7 +75,7 @@ func init() { func CreateContainer(id string, c *ContainerConfig) (Container, error) { fullConfig, err := mergemaps.MergeJSON(c, createContainerAdditionalJSON) if err != nil { - return nil, fmt.Errorf("failed to merge additional JSON '%s': %s", createContainerAdditionalJSON, err) + return nil, fmt.Errorf("failed to merge additional JSON '%s': %w", createContainerAdditionalJSON, err) } system, err := hcs.CreateComputeSystem(context.Background(), id, fullConfig) diff --git a/vendor/github.com/Microsoft/hcsshim/errors.go b/vendor/github.com/Microsoft/hcsshim/errors.go index 594bbfb7a..b441b0cd3 100644 --- a/vendor/github.com/Microsoft/hcsshim/errors.go +++ b/vendor/github.com/Microsoft/hcsshim/errors.go @@ -115,6 +115,7 @@ func (e *ContainerError) Error() string { s += " encountered an error during " + e.Operation } + //nolint:errorlint // legacy code switch e.Err.(type) { case nil: break @@ -145,6 +146,7 @@ func (e *ProcessError) Error() string { s += " encountered an error during " + e.Operation } + //nolint:errorlint // legacy code switch e.Err.(type) { case nil: break @@ -166,10 +168,10 @@ func (e *ProcessError) Error() string { // already exited, or does not exist. Both IsAlreadyStopped and IsNotExist // will currently return true when the error is ErrElementNotFound. func IsNotExist(err error) bool { - if _, ok := err.(EndpointNotFoundError); ok { + if _, ok := err.(EndpointNotFoundError); ok { //nolint:errorlint // legacy code return true } - if _, ok := err.(NetworkNotFoundError); ok { + if _, ok := err.(NetworkNotFoundError); ok { //nolint:errorlint // legacy code return true } return hcs.IsNotExist(getInnerError(err)) @@ -224,6 +226,7 @@ func IsAccessIsDenied(err error) bool { } func getInnerError(err error) error { + //nolint:errorlint // legacy code switch pe := err.(type) { case nil: return nil @@ -236,14 +239,14 @@ func getInnerError(err error) error { } func convertSystemError(err error, c *container) error { - if serr, ok := err.(*hcs.SystemError); ok { + if serr, ok := err.(*hcs.SystemError); ok { //nolint:errorlint // legacy code return &ContainerError{Container: c, Operation: serr.Op, Err: serr.Err, Events: serr.Events} } return err } func convertProcessError(err error, p *process) error { - if perr, ok := err.(*hcs.ProcessError); ok { + if perr, ok := err.(*hcs.ProcessError); ok { //nolint:errorlint // legacy code return &ProcessError{Process: p, Operation: perr.Op, Err: perr.Err, Events: perr.Events} } return err diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go index 37afbf691..8ef611d6a 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go @@ -63,7 +63,7 @@ func (process *Process) SystemID() string { } func (process *Process) processSignalResult(ctx context.Context, err error) (bool, error) { - switch err { + switch err { //nolint:errorlint case nil: return true, nil case ErrVmcomputeOperationInvalidState, ErrComputeSystemDoesNotExist, ErrElementNotFound: diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go index 176c49d49..cb8dea08d 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go @@ -9,6 +9,13 @@ package hcsschema +type FileSystemFilterType string + +const ( + UnionFS FileSystemFilterType = "UnionFS" + WCIFS FileSystemFilterType = "WCIFS" +) + type Layer struct { Id string `json:"Id,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_hive.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_hive.go new file mode 100644 index 000000000..e7b605fda --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_hive.go @@ -0,0 +1,13 @@ +package hcsschema + +// NOTE: manually added + +type RegistryHive string + +// List of RegistryHive +const ( + RegistryHive_SYSTEM RegistryHive = "System" + RegistryHive_SOFTWARE RegistryHive = "Software" + RegistryHive_SECURITY RegistryHive = "Security" + RegistryHive_SAM RegistryHive = "Sam" +) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go index 26fde99c7..1883444a5 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go @@ -10,7 +10,7 @@ package hcsschema type RegistryKey struct { - Hive string `json:"Hive,omitempty"` + Hive RegistryHive `json:"Hive,omitempty"` Name string `json:"Name,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go index 3f203176c..13f24d536 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go @@ -14,7 +14,7 @@ type RegistryValue struct { Name string `json:"Name,omitempty"` - Type_ string `json:"Type,omitempty"` + Type_ RegistryValueType `json:"Type,omitempty"` // One and only one value type must be set. StringValue string `json:"StringValue,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value_type.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value_type.go new file mode 100644 index 000000000..c8b4f6c95 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value_type.go @@ -0,0 +1,17 @@ +package hcsschema + +// NOTE: manually added + +type RegistryValueType string + +// List of RegistryValueType +const ( + RegistryValueType_NONE RegistryValueType = "None" + RegistryValueType_STRING RegistryValueType = "String" + RegistryValueType_EXPANDED_STRING RegistryValueType = "ExpandedString" + RegistryValueType_MULTI_STRING RegistryValueType = "MultiString" + RegistryValueType_BINARY RegistryValueType = "Binary" + RegistryValueType_D_WORD RegistryValueType = "DWord" + RegistryValueType_Q_WORD RegistryValueType = "QWord" + RegistryValueType_CUSTOM_TYPE RegistryValueType = "CustomType" +) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go index cf1db7da9..81d60ed43 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go @@ -97,7 +97,7 @@ func CreateComputeSystem(ctx context.Context, id string, hcsDocumentInterface in events, err := processAsyncHcsResult(ctx, createError, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemCreateCompleted, &timeout.SystemCreate) if err != nil { - if err == ErrTimeout { + if errors.Is(err, ErrTimeout) { // Terminate the compute system if it still exists. We're okay to // ignore a failure here. _ = computeSystem.Terminate(ctx) @@ -238,7 +238,7 @@ func (computeSystem *System) Shutdown(ctx context.Context) error { resultJSON, err := vmcompute.HcsShutdownComputeSystem(ctx, computeSystem.handle, "") events := processHcsResult(ctx, resultJSON) - switch err { + switch err { //nolint:errorlint case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending: default: return makeSystemError(computeSystem, operation, err, events) @@ -259,7 +259,7 @@ func (computeSystem *System) Terminate(ctx context.Context) error { resultJSON, err := vmcompute.HcsTerminateComputeSystem(ctx, computeSystem.handle, "") events := processHcsResult(ctx, resultJSON) - switch err { + switch err { //nolint:errorlint case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending: default: return makeSystemError(computeSystem, operation, err, events) @@ -279,7 +279,7 @@ func (computeSystem *System) waitBackground() { span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) err := waitForNotification(ctx, computeSystem.callbackNumber, hcsNotificationSystemExited, nil) - switch err { + switch err { //nolint:errorlint case nil: log.G(ctx).Debug("system exited") case ErrVmcomputeUnexpectedExit: diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go index 0a8f36d83..e61dc8de6 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go @@ -31,7 +31,7 @@ func hnsCallRawResponse(method, path, request string) (*hnsResponse, error) { func hnsCall(method, path, request string, returnResponse interface{}) error { hnsresponse, err := hnsCallRawResponse(method, path, request) if err != nil { - return fmt.Errorf("failed during hnsCallRawResponse: %v", err) + return fmt.Errorf("failed during hnsCallRawResponse: %w", err) } if !hnsresponse.Success { return fmt.Errorf("hns failed with error : %s", hnsresponse.Error) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go index 749588ad3..a64b67923 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go @@ -56,7 +56,7 @@ func issueNamespaceRequest(id *string, method, subpath string, request interface if strings.Contains(err.Error(), "Element not found.") { return nil, os.ErrNotExist } - return nil, fmt.Errorf("%s %s: %s", method, hnspath, err) + return nil, fmt.Errorf("%s %s: %w", method, hnspath, err) } return &ns, err } @@ -86,7 +86,7 @@ func GetNamespaceEndpoints(id string) ([]string, error) { var endpoint namespaceEndpointRequest err = json.Unmarshal(rsrc.Data, &endpoint) if err != nil { - return nil, fmt.Errorf("unmarshal endpoint: %s", err) + return nil, fmt.Errorf("unmarshal endpoint: %w", err) } endpoints = append(endpoints, endpoint.ID) } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go index bcca84b0d..eae3cc500 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go @@ -4,6 +4,7 @@ package jobobject import ( "context" + "errors" "fmt" "sync" "unsafe" @@ -59,7 +60,7 @@ func pollIOCP(ctx context.Context, iocpHandle windows.Handle) { }).Warn("failed to parse job object message") continue } - if err := msq.Enqueue(notification); err == queue.ErrQueueClosed { + if err := msq.Enqueue(notification); errors.Is(err, queue.ErrQueueClosed) { // Write will only return an error when the queue is closed. // The only time a queue would ever be closed is when we call `Close` on // the job it belongs to which also removes it from the jobMap, so something diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go index 4a224fbec..10ae4d670 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go @@ -374,7 +374,7 @@ func (job *JobObject) Pids() ([]uint32, error) { return []uint32{}, nil } - if err != winapi.ERROR_MORE_DATA { + if err != winapi.ERROR_MORE_DATA { //nolint:errorlint return nil, fmt.Errorf("failed initial query for PIDs in job object: %w", err) } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go index 03f71d9a4..e3b1a1edc 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go @@ -143,6 +143,13 @@ func (job *JobObject) SetCPUAffinity(affinityBitMask uint64) error { return err } info.BasicLimitInformation.LimitFlags |= uint32(windows.JOB_OBJECT_LIMIT_AFFINITY) + + // We really, really shouldn't be running on 32 bit, but just in case (and to satisfy CodeQL) ... + const maxUintptr = ^uintptr(0) + if affinityBitMask > uint64(maxUintptr) { + return fmt.Errorf("affinity bitmask (%d) exceeds max allowable value (%d)", affinityBitMask, maxUintptr) + } + info.BasicLimitInformation.Affinity = uintptr(affinityBitMask) return job.setExtendedInformation(info) } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/format.go b/vendor/github.com/Microsoft/hcsshim/internal/log/format.go index 6d69c15b9..1ceb26bad 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/log/format.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/log/format.go @@ -104,6 +104,7 @@ func encode(v interface{}) (_ []byte, err error) { if jErr := enc.Encode(v); jErr != nil { if err != nil { // TODO (go1.20): use multierror via fmt.Errorf("...: %w; ...: %w", ...) + //nolint:errorlint // non-wrapping format verb for fmt.Errorf return nil, fmt.Errorf("protojson encoding: %v; json encoding: %w", err, jErr) } return nil, fmt.Errorf("json encoding: %w", jErr) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go b/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go index 3e175e522..cceb3e2d1 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go @@ -46,6 +46,7 @@ const ( ExpectedType = "expected-type" Bool = "bool" + Int32 = "int32" Uint32 = "uint32" Uint64 = "uint64" diff --git a/vendor/github.com/Microsoft/hcsshim/internal/memory/pool.go b/vendor/github.com/Microsoft/hcsshim/internal/memory/pool.go index 1ef5814d7..6d39ca3bf 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/memory/pool.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/memory/pool.go @@ -126,7 +126,7 @@ func (pa *PoolAllocator) Allocate(size uint64) (MappedRegion, error) { // this means that there are no more regions for the current class, try expanding if nextCls != memCls { if err := pa.split(memCls); err != nil { - if err == ErrInvalidMemoryClass { + if errors.Is(err, ErrInvalidMemoryClass) { return nil, ErrNotEnoughSpace } return nil, err @@ -147,7 +147,7 @@ func (pa *PoolAllocator) Allocate(size uint64) (MappedRegion, error) { } // Release marks a memory region of class `memCls` and offset `offset` as free and tries to merge smaller regions into -// a bigger one +// a bigger one. func (pa *PoolAllocator) Release(reg MappedRegion) error { mp := pa.pools[reg.Type()] if mp == nil { @@ -164,7 +164,7 @@ func (pa *PoolAllocator) Release(reg MappedRegion) error { return ErrNotAllocated } if err := pa.merge(n.parent); err != nil { - if err != ErrEarlyMerge { + if !errors.Is(err, ErrEarlyMerge) { return err } } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go b/vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go index 71df25b8d..8c41a3661 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go @@ -6,7 +6,7 @@ import ( "net" "os" - "github.com/containerd/containerd/errdefs" + "github.com/containerd/errdefs" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) @@ -16,7 +16,7 @@ import ( func toStatusCode(err error) codes.Code { // checks if err implements GRPCStatus() *"google.golang.org/grpc/status".Status, - // wraps an error defined in "github.com/containerd/containerd/errdefs", or is a + // wraps an error defined in "github.com/containerd/errdefs", or is a // context timeout or cancelled error if s, ok := status.FromError(errdefs.ToGRPC(err)); ok { return s.Code() diff --git a/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go b/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go index 70368533b..b087b9879 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go @@ -243,7 +243,7 @@ func RemoveRelative(path string, root *os.File) error { if err == nil { defer f.Close() err = deleteOnClose(f) - if err == syscall.ERROR_ACCESS_DENIED { + if err == syscall.ERROR_ACCESS_DENIED { //nolint:errorlint // Maybe the file is marked readonly. Clear the bit and retry. _ = clearReadOnly(f) err = deleteOnClose(f) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go index 79b14ef97..67ca897cf 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go @@ -104,7 +104,7 @@ func execute(ctx gcontext.Context, timeout time.Duration, f func() error) error }() select { case <-ctx.Done(): - if ctx.Err() == gcontext.DeadlineExceeded { + if ctx.Err() == gcontext.DeadlineExceeded { //nolint:errorlint log.G(ctx).WithField(logfields.Timeout, trueTimeout). Warning("Syscall did not complete within operation timeout. This may indicate a platform issue. " + "If it appears to be making no forward progress, obtain the stacks and see if there is a syscall " + @@ -150,7 +150,7 @@ func HcsCreateComputeSystem(ctx gcontext.Context, id string, configuration strin if result != "" { span.AddAttributes(trace.StringAttribute("result", result)) } - if hr != errVmcomputeOperationPending { + if hr != errVmcomputeOperationPending { //nolint:errorlint // explicitly returned oc.SetSpanStatus(span, hr) } }() @@ -205,7 +205,7 @@ func HcsStartComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, option if result != "" { span.AddAttributes(trace.StringAttribute("result", result)) } - if hr != errVmcomputeOperationPending { + if hr != errVmcomputeOperationPending { //nolint:errorlint // explicitly returned oc.SetSpanStatus(span, hr) } }() @@ -228,7 +228,7 @@ func HcsShutdownComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, opt if result != "" { span.AddAttributes(trace.StringAttribute("result", result)) } - if hr != errVmcomputeOperationPending { + if hr != errVmcomputeOperationPending { //nolint:errorlint // explicitly returned oc.SetSpanStatus(span, hr) } }() @@ -251,7 +251,7 @@ func HcsTerminateComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, op if result != "" { span.AddAttributes(trace.StringAttribute("result", result)) } - if hr != errVmcomputeOperationPending { + if hr != errVmcomputeOperationPending { //nolint:errorlint // explicitly returned oc.SetSpanStatus(span, hr) } }() @@ -274,7 +274,7 @@ func HcsPauseComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, option if result != "" { span.AddAttributes(trace.StringAttribute("result", result)) } - if hr != errVmcomputeOperationPending { + if hr != errVmcomputeOperationPending { //nolint:errorlint // explicitly returned oc.SetSpanStatus(span, hr) } }() @@ -297,7 +297,7 @@ func HcsResumeComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, optio if result != "" { span.AddAttributes(trace.StringAttribute("result", result)) } - if hr != errVmcomputeOperationPending { + if hr != errVmcomputeOperationPending { //nolint:errorlint // explicitly returned oc.SetSpanStatus(span, hr) } }() @@ -621,7 +621,7 @@ func HcsSaveComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options if result != "" { span.AddAttributes(trace.StringAttribute("result", result)) } - if hr != errVmcomputeOperationPending { + if hr != errVmcomputeOperationPending { //nolint:errorlint // explicitly returned oc.SetSpanStatus(span, hr) } }() diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayerreader.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayerreader.go index 792f13f59..807b7de1f 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayerreader.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayerreader.go @@ -1,3 +1,5 @@ +//go:build windows + package wclayer import ( @@ -64,7 +66,7 @@ func (r *baseLayerReader) walkUntilCancelled() error { return nil }) - if err == errorIterationCanceled { + if err == errorIterationCanceled { //nolint:errorlint // explicitly returned return nil } @@ -103,7 +105,7 @@ func (r *baseLayerReader) walkUntilCancelled() error { return nil }) - if err == errorIterationCanceled { + if err == errorIterationCanceled { //nolint:errorlint // explicitly returned return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/converttobaselayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/converttobaselayer.go index c542f556c..d25c3c520 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/converttobaselayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/converttobaselayer.go @@ -1,3 +1,5 @@ +//go:build windows + package wclayer import ( diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go index e2ec27ad0..35fcbedb3 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go @@ -11,7 +11,6 @@ import ( "github.com/Microsoft/hcsshim/internal/hcserror" "github.com/Microsoft/hcsshim/internal/oc" - "github.com/Microsoft/hcsshim/osversion" "go.opencensus.io/trace" ) @@ -30,14 +29,17 @@ func ExpandScratchSize(ctx context.Context, path string, size uint64) (err error return hcserror.New(err, title, "") } - // Manually expand the volume now in order to work around bugs in 19H1 and - // prerelease versions of Vb. Remove once this is fixed in Windows. - if build := osversion.Build(); build >= osversion.V19H1 && build < 19020 { - err = expandSandboxVolume(ctx, path) - if err != nil { - return err - } + // Always expand the volume too. In case of legacy layers not expanding the volume here works because + // the PrepareLayer call internally handles the expansion. However, in other cases (like CimFS) we + // don't call PrepareLayer and so the volume will never be expanded. This also means in case of + // legacy layers, we might have a small perf hit because the VHD is mounted twice for expansion (once + // here and once during the PrepareLayer call). But as long as the perf hit is minimal, we should be + // okay. + err = expandSandboxVolume(ctx, path) + if err != nil { + return err } + return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go index 807d83310..fc12eeba4 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go @@ -154,7 +154,7 @@ func (r *legacyLayerReader) walkUntilCancelled() error { } return nil }) - if err == errorIterationCanceled { + if err == errorIterationCanceled { //nolint:errorlint // explicitly returned return nil } if err == nil { @@ -196,7 +196,7 @@ func findBackupStreamSize(r io.Reader) (int64, error) { for { hdr, err := br.Next() if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { err = nil } return 0, err @@ -428,7 +428,7 @@ func (w *legacyLayerWriter) initUtilityVM() error { // immutable. err = cloneTree(w.parentRoots[0], w.destRoot, UtilityVMFilesPath, mutatedUtilityVMFiles) if err != nil { - return fmt.Errorf("cloning the parent utility VM image failed: %s", err) + return fmt.Errorf("cloning the parent utility VM image failed: %w", err) } w.HasUtilityVM = true } @@ -451,7 +451,7 @@ func (w *legacyLayerWriter) reset() error { for { bhdr, err := br.Next() - if err == io.EOF { + if errors.Is(err, io.EOF) { // end of backupstream data break } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/cimfs.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/cimfs.go index d04bffc1f..21664577b 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/cimfs.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/cimfs.go @@ -1,3 +1,5 @@ +//go:build windows + package winapi import ( @@ -34,7 +36,7 @@ type CimFsFileMetadata struct { //sys CimDismountImage(volumeID *g) (hr error) = cimfs.CimDismountImage? //sys CimCreateImage(imagePath string, oldFSName *uint16, newFSName *uint16, cimFSHandle *FsHandle) (hr error) = cimfs.CimCreateImage? -//sys CimCloseImage(cimFSHandle FsHandle) (hr error) = cimfs.CimCloseImage? +//sys CimCloseImage(cimFSHandle FsHandle) = cimfs.CimCloseImage? //sys CimCommitImage(cimFSHandle FsHandle) (hr error) = cimfs.CimCommitImage? //sys CimCreateFile(cimFSHandle FsHandle, path string, file *CimFsFileMetadata, cimStreamHandle *StreamHandle) (hr error) = cimfs.CimCreateFile? diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go index b5b9fcccc..ffd3cd7ff 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go @@ -184,18 +184,12 @@ func _CMLocateDevNode(pdnDevInst *uint32, pDeviceID *uint16, uFlags uint32) (hr return } -func CimCloseImage(cimFSHandle FsHandle) (hr error) { - hr = procCimCloseImage.Find() - if hr != nil { +func CimCloseImage(cimFSHandle FsHandle) (err error) { + err = procCimCloseImage.Find() + if err != nil { return } - r0, _, _ := syscall.Syscall(procCimCloseImage.Addr(), 1, uintptr(cimFSHandle), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } + syscall.Syscall(procCimCloseImage.Addr(), 1, uintptr(cimFSHandle), 0, 0) return } diff --git a/vendor/github.com/containerd/errdefs/LICENSE b/vendor/github.com/containerd/errdefs/LICENSE new file mode 100644 index 000000000..584149b6e --- /dev/null +++ b/vendor/github.com/containerd/errdefs/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright The containerd Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/errdefs/README.md b/vendor/github.com/containerd/errdefs/README.md new file mode 100644 index 000000000..bd418c63f --- /dev/null +++ b/vendor/github.com/containerd/errdefs/README.md @@ -0,0 +1,13 @@ +# errdefs + +A Go package for defining and checking common containerd errors. + +## Project details + +**errdefs** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). +As a containerd sub-project, you will find the: + * [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md), + * [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS), + * and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md) + +information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/vendor/github.com/containerd/errdefs/errors.go b/vendor/github.com/containerd/errdefs/errors.go new file mode 100644 index 000000000..876225597 --- /dev/null +++ b/vendor/github.com/containerd/errdefs/errors.go @@ -0,0 +1,92 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package errdefs defines the common errors used throughout containerd +// packages. +// +// Use with fmt.Errorf to add context to an error. +// +// To detect an error class, use the IsXXX functions to tell whether an error +// is of a certain type. +// +// The functions ToGRPC and FromGRPC can be used to map server-side and +// client-side errors to the correct types. +package errdefs + +import ( + "context" + "errors" +) + +// Definitions of common error types used throughout containerd. All containerd +// errors returned by most packages will map into one of these errors classes. +// Packages should return errors of these types when they want to instruct a +// client to take a particular action. +// +// For the most part, we just try to provide local grpc errors. Most conditions +// map very well to those defined by grpc. +var ( + ErrUnknown = errors.New("unknown") // used internally to represent a missed mapping. + ErrInvalidArgument = errors.New("invalid argument") + ErrNotFound = errors.New("not found") + ErrAlreadyExists = errors.New("already exists") + ErrFailedPrecondition = errors.New("failed precondition") + ErrUnavailable = errors.New("unavailable") + ErrNotImplemented = errors.New("not implemented") // represents not supported and unimplemented +) + +// IsInvalidArgument returns true if the error is due to an invalid argument +func IsInvalidArgument(err error) bool { + return errors.Is(err, ErrInvalidArgument) +} + +// IsNotFound returns true if the error is due to a missing object +func IsNotFound(err error) bool { + return errors.Is(err, ErrNotFound) +} + +// IsAlreadyExists returns true if the error is due to an already existing +// metadata item +func IsAlreadyExists(err error) bool { + return errors.Is(err, ErrAlreadyExists) +} + +// IsFailedPrecondition returns true if an operation could not proceed to the +// lack of a particular condition +func IsFailedPrecondition(err error) bool { + return errors.Is(err, ErrFailedPrecondition) +} + +// IsUnavailable returns true if the error is due to a resource being unavailable +func IsUnavailable(err error) bool { + return errors.Is(err, ErrUnavailable) +} + +// IsNotImplemented returns true if the error is due to not being implemented +func IsNotImplemented(err error) bool { + return errors.Is(err, ErrNotImplemented) +} + +// IsCanceled returns true if the error is due to `context.Canceled`. +func IsCanceled(err error) bool { + return errors.Is(err, context.Canceled) +} + +// IsDeadlineExceeded returns true if the error is due to +// `context.DeadlineExceeded`. +func IsDeadlineExceeded(err error) bool { + return errors.Is(err, context.DeadlineExceeded) +} diff --git a/vendor/github.com/containerd/errdefs/grpc.go b/vendor/github.com/containerd/errdefs/grpc.go new file mode 100644 index 000000000..7a9b33e05 --- /dev/null +++ b/vendor/github.com/containerd/errdefs/grpc.go @@ -0,0 +1,147 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package errdefs + +import ( + "context" + "fmt" + "strings" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// ToGRPC will attempt to map the backend containerd error into a grpc error, +// using the original error message as a description. +// +// Further information may be extracted from certain errors depending on their +// type. +// +// If the error is unmapped, the original error will be returned to be handled +// by the regular grpc error handling stack. +func ToGRPC(err error) error { + if err == nil { + return nil + } + + if isGRPCError(err) { + // error has already been mapped to grpc + return err + } + + switch { + case IsInvalidArgument(err): + return status.Errorf(codes.InvalidArgument, err.Error()) + case IsNotFound(err): + return status.Errorf(codes.NotFound, err.Error()) + case IsAlreadyExists(err): + return status.Errorf(codes.AlreadyExists, err.Error()) + case IsFailedPrecondition(err): + return status.Errorf(codes.FailedPrecondition, err.Error()) + case IsUnavailable(err): + return status.Errorf(codes.Unavailable, err.Error()) + case IsNotImplemented(err): + return status.Errorf(codes.Unimplemented, err.Error()) + case IsCanceled(err): + return status.Errorf(codes.Canceled, err.Error()) + case IsDeadlineExceeded(err): + return status.Errorf(codes.DeadlineExceeded, err.Error()) + } + + return err +} + +// ToGRPCf maps the error to grpc error codes, assembling the formatting string +// and combining it with the target error string. +// +// This is equivalent to errdefs.ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)) +func ToGRPCf(err error, format string, args ...interface{}) error { + return ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)) +} + +// FromGRPC returns the underlying error from a grpc service based on the grpc error code +func FromGRPC(err error) error { + if err == nil { + return nil + } + + var cls error // divide these into error classes, becomes the cause + + switch code(err) { + case codes.InvalidArgument: + cls = ErrInvalidArgument + case codes.AlreadyExists: + cls = ErrAlreadyExists + case codes.NotFound: + cls = ErrNotFound + case codes.Unavailable: + cls = ErrUnavailable + case codes.FailedPrecondition: + cls = ErrFailedPrecondition + case codes.Unimplemented: + cls = ErrNotImplemented + case codes.Canceled: + cls = context.Canceled + case codes.DeadlineExceeded: + cls = context.DeadlineExceeded + default: + cls = ErrUnknown + } + + msg := rebaseMessage(cls, err) + if msg != "" { + err = fmt.Errorf("%s: %w", msg, cls) + } else { + err = cls + } + + return err +} + +// rebaseMessage removes the repeats for an error at the end of an error +// string. This will happen when taking an error over grpc then remapping it. +// +// Effectively, we just remove the string of cls from the end of err if it +// appears there. +func rebaseMessage(cls error, err error) string { + desc := errDesc(err) + clss := cls.Error() + if desc == clss { + return "" + } + + return strings.TrimSuffix(desc, ": "+clss) +} + +func isGRPCError(err error) bool { + _, ok := status.FromError(err) + return ok +} + +func code(err error) codes.Code { + if s, ok := status.FromError(err); ok { + return s.Code() + } + return codes.Unknown +} + +func errDesc(err error) string { + if s, ok := status.FromError(err); ok { + return s.Message() + } + return err.Error() +} diff --git a/vendor/github.com/containers/buildah/.cirrus.yml b/vendor/github.com/containers/buildah/.cirrus.yml index ac12d66b2..c48fc4bc3 100644 --- a/vendor/github.com/containers/buildah/.cirrus.yml +++ b/vendor/github.com/containers/buildah/.cirrus.yml @@ -27,12 +27,12 @@ env: #### # GCE project where images live IMAGE_PROJECT: "libpod-218412" - FEDORA_NAME: "fedora-39β" + FEDORA_NAME: "fedora-39" PRIOR_FEDORA_NAME: "fedora-38" DEBIAN_NAME: "debian-13" # Image identifiers - IMAGE_SUFFIX: "c20231004t194547z-f39f38d13" + IMAGE_SUFFIX: "c20240222t143004z-f39f38d13" FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}" DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}" @@ -221,6 +221,7 @@ integration_task: DISTRO_NV: "${DEBIAN_NAME}" IMAGE_NAME: "${DEBIAN_CACHE_IMAGE_NAME}" STORAGE_DRIVER: 'vfs' + CI_DESIRED_RUNTIME: runc # OVERLAY - env: DISTRO_NV: "${FEDORA_NAME}" @@ -234,6 +235,7 @@ integration_task: DISTRO_NV: "${DEBIAN_NAME}" IMAGE_NAME: "${DEBIAN_CACHE_IMAGE_NAME}" STORAGE_DRIVER: 'overlay' + CI_DESIRED_RUNTIME: runc gce_instance: image_name: "$IMAGE_NAME" @@ -323,6 +325,8 @@ in_podman_task: # set of tasks all passed, and allows confirming that based on the status # of this task. success_task: + # N/B: The prow merge-bot (tide) is sensitized to this exact name, DO NOT CHANGE IT. + # Ref: https://github.com/openshift/release/pull/48909 name: "Total Success" alias: success diff --git a/vendor/github.com/containers/buildah/CHANGELOG.md b/vendor/github.com/containers/buildah/CHANGELOG.md index 495812857..5c4101610 100644 --- a/vendor/github.com/containers/buildah/CHANGELOG.md +++ b/vendor/github.com/containers/buildah/CHANGELOG.md @@ -2,17 +2,99 @@ # Changelog -## v1.33.5 (2024-02-01) - - Bump c/common to v0.57.4, moby/buildkit v0.5.12 - -## v1.33.4 (2024-01-30) - - Bump c/image to v5.29.2 and c/common to v0.57.3 - -## v1.33.3 (2024-01-18) - - Bump c/common to 0.57.2 and c/image to 5.29.1 +## v1.35.1 (2024-03-18) + + [release-1.35] CVE-2024-1753 container escape fix + +## v1.35.0 (2024-03-06) + + fix(deps): update module github.com/stretchr/testify to v1.9.0 + cgroups: reuse version check from c/common + Update vendor of containers/(common,image) + fix(deps): update github.com/containers/storage digest to eadc620 + fix(deps): update github.com/containers/luksy digest to ceb12d4 + fix(deps): update github.com/containers/image/v5 digest to cdc6802 + manifest add: complain if we get artifact flags without --artifact + Use retry logic from containers/common + Vendor in containers/(storage,image,common) + Update module golang.org/x/crypto to v0.20.0 + Add comment re: Total Success task name + tests: skip_if_no_unshare(): check for --setuid + Properly handle build --pull=false + [skip-ci] Update tim-actions/get-pr-commits action to v1.3.1 + Update module go.etcd.io/bbolt to v1.3.9 + Revert "Reduce official image size" + Update module github.com/opencontainers/image-spec to v1.1.0 + Reduce official image size + Build with CNI support on FreeBSD + build --all-platforms: skip some base "image" platforms + Bump main to v1.35.0-dev + Vendor in latest containers/(storage,image,common) + Split up error messages for missing --sbom related flags + `buildah manifest`: add artifact-related options + cmd/buildah/manifest.go: lock lists before adding/annotating/pushing + cmd/buildah/manifest.go: don't make struct declarations aliases + Use golang.org/x/exp/slices.Contains + Disable loong64 again + Fix a couple of typos in one-line comments + egrep is obsolescent; use grep -E + Try Cirrus with a newer VM version + Set CONTAINERS_CONF in the chroot-mount-flags integration test + Update to match dependency API update + Update github.com/openshift/imagebuilder and containers/common + docs: correct default authfile path + fix(deps): update module github.com/containerd/containerd to v1.7.13 + tests: retrofit test for heredoc summary + build, heredoc: show heredoc summary in build output + manifest, push: add support for --retry and --retry-delay + fix(deps): update github.com/openshift/imagebuilder digest to b767bc3 + imagebuildah: fix crash with empty RUN + fix(deps): update github.com/containers/luksy digest to b62d551 + fix(deps): update module github.com/opencontainers/runc to v1.1.12 [security] + fix(deps): update module github.com/moby/buildkit to v0.12.5 [security] + Make buildah match podman for handling of ulimits + docs: move footnotes to where they're applicable + Allow users to specify no-dereference + Run codespell on code + Fix FreeBSD version parsing + Fix a build break on FreeBSD + Remove a bad FROM line + fix(deps): update module github.com/onsi/gomega to v1.31.1 + fix(deps): update module github.com/opencontainers/image-spec to v1.1.0-rc6 + docs: use reversed logo for dark theme in README + build,commit: add --sbom to scan and produce SBOMs when committing + commit: force omitHistory if the parent has layers but no history + docs: fix a couple of typos + internal/mkcw.Archive(): handle extra image content + stage_executor,heredoc: honor interpreter in heredoc + stage_executor,layers: burst cache if heredoc content is changed + fix(deps): update module golang.org/x/crypto to v0.18.0 + Replace map[K]bool with map[K]struct{} where it makes sense + fix(deps): update module golang.org/x/sync to v0.6.0 + fix(deps): update module golang.org/x/term to v0.16.0 + Bump CI VMs + Replace strings.SplitN with strings.Cut + fix(deps): update github.com/containers/storage digest to ef81e9b + fix(deps): update github.com/containers/image/v5 digest to 1b221d4 + fix(deps): update module github.com/fsouza/go-dockerclient to v1.10.1 + Document use of containers-transports values in buildah + fix(deps): update module golang.org/x/crypto to v0.17.0 [security] + chore(deps): update dependency containers/automation_images to v20231208 + manifest: addCompression use default from containers.conf + commit: add a --add-file flag + mkcw: populate the rootfs using an overlay + chore(deps): update dependency containers/automation_images to v20230517 + [skip-ci] Update actions/stale action to v9 + fix(deps): update module github.com/containernetworking/plugins to v1.4.0 + fix(deps): update github.com/containers/image/v5 digest to 7a40fee + Bump to v1.34.1-dev + Ignore errors if label.Relabel returns ENOSUP + +## v1.34.0 (2023-12-11) + + vendor: update c/{common,image,storage} + run: Allow using just one jail per container on FreeBSD + Remove makefile targets entrypoint{,.gz} for non x86_64 ## v1.33.2 (2023-11-22) diff --git a/vendor/github.com/containers/buildah/Makefile b/vendor/github.com/containers/buildah/Makefile index 85b43c7b9..2e801fa2f 100644 --- a/vendor/github.com/containers/buildah/Makefile +++ b/vendor/github.com/containers/buildah/Makefile @@ -4,12 +4,18 @@ APPARMORTAG := $(shell hack/apparmor_tag.sh) STORAGETAGS := exclude_graphdriver_devicemapper $(shell ./btrfs_tag.sh) $(shell ./btrfs_installed_tag.sh) $(shell ./hack/libsubid_tag.sh) SECURITYTAGS ?= seccomp $(APPARMORTAG) TAGS ?= $(SECURITYTAGS) $(STORAGETAGS) $(shell ./hack/systemd_tag.sh) +ifeq ($(shell uname -s),FreeBSD) +# FreeBSD needs CNI until netavark is supported +TAGS += cni +endif BUILDTAGS += $(TAGS) PREFIX := /usr/local BINDIR := $(PREFIX)/bin BASHINSTALLDIR = $(PREFIX)/share/bash-completion/completions BUILDFLAGS := -tags "$(BUILDTAGS)" BUILDAH := buildah +SELINUXOPT ?= $(shell test -x /usr/sbin/selinuxenabled && selinuxenabled && echo -Z) +SELINUXTYPE=container_runtime_exec_t GO := go GO_LDFLAGS := $(shell if $(GO) version|grep -q gccgo; then echo "-gccgoflags"; else echo "-ldflags"; fi) @@ -39,7 +45,7 @@ LIBSECCOMP_COMMIT := release-2.3 EXTRA_LDFLAGS ?= BUILDAH_LDFLAGS := $(GO_LDFLAGS) '-X main.GitCommit=$(GIT_COMMIT) -X main.buildInfo=$(SOURCE_DATE_EPOCH) -X main.cniVersion=$(CNI_COMMIT) $(EXTRA_LDFLAGS)' -SOURCES=*.go imagebuildah/*.go bind/*.go chroot/*.go copier/*.go define/*.go docker/*.go internal/config/*.go internal/mkcw/*.go internal/mkcw/types/*.go internal/parse/*.go internal/source/*.go internal/util/*.go manifests/*.go pkg/chrootuser/*.go pkg/cli/*.go pkg/completion/*.go pkg/formats/*.go pkg/overlay/*.go pkg/parse/*.go pkg/rusage/*.go pkg/sshagent/*.go pkg/umask/*.go pkg/util/*.go util/*.go +SOURCES=*.go imagebuildah/*.go bind/*.go chroot/*.go copier/*.go define/*.go docker/*.go internal/config/*.go internal/mkcw/*.go internal/mkcw/types/*.go internal/parse/*.go internal/sbom/*.go internal/source/*.go internal/tmpdir/*.go internal/util/*.go internal/volumes/*.go manifests/*.go pkg/chrootuser/*.go pkg/cli/*.go pkg/completion/*.go pkg/formats/*.go pkg/overlay/*.go pkg/parse/*.go pkg/rusage/*.go pkg/sshagent/*.go pkg/umask/*.go pkg/util/*.go util/*.go LINTFLAGS ?= @@ -69,27 +75,26 @@ static: mkdir -p ./bin cp -rfp ./result/bin/* ./bin/ -bin/buildah: $(SOURCES) cmd/buildah/*.go internal/mkcw/embed/entrypoint.gz +bin/buildah: $(SOURCES) cmd/buildah/*.go internal/mkcw/embed/entrypoint_amd64.gz $(GO_BUILD) $(BUILDAH_LDFLAGS) $(GO_GCFLAGS) "$(GOGCFLAGS)" -o $@ $(BUILDFLAGS) ./cmd/buildah + test -z "${SELINUXOPT}" || chcon --verbose -t $(SELINUXTYPE) $@ ifneq ($(shell as --version | grep x86_64),) -internal/mkcw/embed/entrypoint: internal/mkcw/embed/entrypoint.s +internal/mkcw/embed/entrypoint_amd64.gz: internal/mkcw/embed/entrypoint_amd64 + gzip -k9nf $^ + +internal/mkcw/embed/entrypoint_amd64: internal/mkcw/embed/entrypoint_amd64.s $(AS) -o $(patsubst %.s,%.o,$^) $^ $(LD) -o $@ $(patsubst %.s,%.o,$^) strip $@ -else -.PHONY: internal/mkcw/embed/entrypoint endif -internal/mkcw/embed/entrypoint.gz: internal/mkcw/embed/entrypoint - $(RM) $@ - gzip -k $^ .PHONY: buildah buildah: bin/buildah ALL_CROSS_TARGETS := $(addprefix bin/buildah.,$(subst /,.,$(shell $(GO) tool dist list))) -LINUX_CROSS_TARGETS := $(filter bin/buildah.linux.%,$(ALL_CROSS_TARGETS)) +LINUX_CROSS_TARGETS := $(filter-out %.loong64,$(filter bin/buildah.linux.%,$(ALL_CROSS_TARGETS))) DARWIN_CROSS_TARGETS := $(filter bin/buildah.darwin.%,$(ALL_CROSS_TARGETS)) WINDOWS_CROSS_TARGETS := $(addsuffix .exe,$(filter bin/buildah.windows.%,$(ALL_CROSS_TARGETS))) FREEBSD_CROSS_TARGETS := $(filter bin/buildah.freebsd.%,$(ALL_CROSS_TARGETS)) @@ -125,7 +130,7 @@ gopath: test $(shell pwd) = $(shell cd ../../../../src/github.com/containers/buildah ; pwd) codespell: - codespell -S Makefile,buildah.spec.rpkg,AUTHORS,bin,vendor,.git,go.mod,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L passt,bu,uint,iff,od,erro -w + codespell -S Makefile,buildah.spec.rpkg,AUTHORS,bin,vendor,.git,go.mod,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L secon,passt,bu,uint,iff,od,erro -w .PHONY: validate validate: install.tools @@ -198,7 +203,7 @@ test-unit: tests/testreport/testreport $(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover $(RACEFLAGS) ./cmd/buildah -args --root $$tmp/root --runroot $$tmp/runroot --storage-driver vfs --signature-policy $(shell pwd)/tests/policy.json --registries-conf $(shell pwd)/tests/registries.conf vendor-in-container: - podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src docker.io/library/golang:1.20 make vendor + podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src docker.io/library/golang:1.21 make vendor .PHONY: vendor vendor: diff --git a/vendor/github.com/containers/buildah/README.md b/vendor/github.com/containers/buildah/README.md index ded4870b5..0e6a1553c 100644 --- a/vendor/github.com/containers/buildah/README.md +++ b/vendor/github.com/containers/buildah/README.md @@ -1,4 +1,5 @@ -![buildah logo](https://cdn.rawgit.com/containers/buildah/main/logos/buildah-logo_large.png) +![buildah logo (light)](logos/buildah-logo_large.png#gh-light-mode-only) +![buildah logo (dark)](logos/buildah-logo_reverse_large.png#gh-dark-mode-only) # [Buildah](https://www.youtube.com/embed/YVk5NgSiUw8) - a tool that facilitates building [Open Container Initiative (OCI)](https://www.opencontainers.org/) container images diff --git a/vendor/github.com/containers/buildah/add.go b/vendor/github.com/containers/buildah/add.go index c61de5a49..da18429f6 100644 --- a/vendor/github.com/containers/buildah/add.go +++ b/vendor/github.com/containers/buildah/add.go @@ -23,7 +23,6 @@ import ( "github.com/containers/storage/pkg/idtools" "github.com/hashicorp/go-multierror" digest "github.com/opencontainers/go-digest" - "github.com/opencontainers/runc/libcontainer/userns" "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" ) @@ -438,7 +437,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption ChmodDirs: nil, ChownFiles: nil, ChmodFiles: nil, - IgnoreDevices: userns.RunningInUserNS(), + IgnoreDevices: runningInUserNS(), } putErr = copier.Put(extractDirectory, extractDirectory, putOptions, io.TeeReader(pipeReader, hasher)) } @@ -579,7 +578,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption ChmodDirs: nil, ChownFiles: nil, ChmodFiles: nil, - IgnoreDevices: userns.RunningInUserNS(), + IgnoreDevices: runningInUserNS(), } putErr = copier.Put(extractDirectory, extractDirectory, putOptions, io.TeeReader(pipeReader, hasher)) } diff --git a/vendor/github.com/containers/buildah/add_common.go b/vendor/github.com/containers/buildah/add_common.go new file mode 100644 index 000000000..b1eef2c19 --- /dev/null +++ b/vendor/github.com/containers/buildah/add_common.go @@ -0,0 +1,8 @@ +//go:build !linux +// +build !linux + +package buildah + +func runningInUserNS() bool { + return false +} diff --git a/vendor/github.com/containers/buildah/add_linux.go b/vendor/github.com/containers/buildah/add_linux.go new file mode 100644 index 000000000..78b742496 --- /dev/null +++ b/vendor/github.com/containers/buildah/add_linux.go @@ -0,0 +1,9 @@ +package buildah + +import ( + "github.com/opencontainers/runc/libcontainer/userns" +) + +func runningInUserNS() bool { + return userns.RunningInUserNS() +} diff --git a/vendor/github.com/containers/buildah/bind/mount.go b/vendor/github.com/containers/buildah/bind/mount.go index 213b1f64d..0524eca83 100644 --- a/vendor/github.com/containers/buildah/bind/mount.go +++ b/vendor/github.com/containers/buildah/bind/mount.go @@ -11,11 +11,11 @@ import ( "syscall" "github.com/containers/buildah/util" - cutil "github.com/containers/common/pkg/util" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/mount" "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" "golang.org/x/sys/unix" ) @@ -192,11 +192,11 @@ func SetupIntermediateMountNamespace(spec *specs.Spec, bundlePath string) (unmou // Decide if the mount should not be redirected to an intermediate location first. func leaveBindMountAlone(mount specs.Mount) bool { // If we know we shouldn't do a redirection for this mount, skip it. - if cutil.StringInSlice(NoBindOption, mount.Options) { + if slices.Contains(mount.Options, NoBindOption) { return true } // If we're not bind mounting it in, we don't need to do anything for it. - if mount.Type != "bind" && !cutil.StringInSlice("bind", mount.Options) && !cutil.StringInSlice("rbind", mount.Options) { + if mount.Type != "bind" && !slices.Contains(mount.Options, "bind") && !slices.Contains(mount.Options, "rbind") { return true } return false @@ -294,7 +294,7 @@ func UnmountMountpoints(mountpoint string, mountpointsToRemove []string) error { } } // if we're also supposed to remove this thing, do that, too - if cutil.StringInSlice(mount.Mountpoint, mountpointsToRemove) { + if slices.Contains(mountpointsToRemove, mount.Mountpoint) { if err := os.Remove(mount.Mountpoint); err != nil { return fmt.Errorf("removing %q: %w", mount.Mountpoint, err) } diff --git a/vendor/github.com/containers/buildah/bind/util.go b/vendor/github.com/containers/buildah/bind/util.go index 3f77f3e51..c7c67b0aa 100644 --- a/vendor/github.com/containers/buildah/bind/util.go +++ b/vendor/github.com/containers/buildah/bind/util.go @@ -1,8 +1,8 @@ package bind import ( - "github.com/containers/common/pkg/util" "github.com/opencontainers/runtime-spec/specs-go" + "golang.org/x/exp/slices" ) const ( @@ -14,7 +14,7 @@ const ( func stripNoBindOption(spec *specs.Spec) { for i := range spec.Mounts { - if util.StringInSlice(NoBindOption, spec.Mounts[i].Options) { + if slices.Contains(spec.Mounts[i].Options, NoBindOption) { prunedOptions := make([]string, 0, len(spec.Mounts[i].Options)) for _, option := range spec.Mounts[i].Options { if option != NoBindOption { diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go index e4ed5dcd5..c5c417aae 100644 --- a/vendor/github.com/containers/buildah/buildah.go +++ b/vendor/github.com/containers/buildah/buildah.go @@ -391,6 +391,10 @@ type ImportFromImageOptions struct { // instead of the usual rootfs contents. type ConfidentialWorkloadOptions = define.ConfidentialWorkloadOptions +// SBOMScanOptions encapsulates options which control whether or not we run a +// scanner on the rootfs that we're about to commit, and how. +type SBOMScanOptions = define.SBOMScanOptions + // NewBuilder creates a new build container. func NewBuilder(ctx context.Context, store storage.Store, options BuilderOptions) (*Builder, error) { if options.CommonBuildOpts == nil { diff --git a/vendor/github.com/containers/buildah/changelog.txt b/vendor/github.com/containers/buildah/changelog.txt index 96e4dbb18..8c3b7a791 100644 --- a/vendor/github.com/containers/buildah/changelog.txt +++ b/vendor/github.com/containers/buildah/changelog.txt @@ -1,14 +1,93 @@ --Changelog for v1.33.5 (2024-02-01) - - * Bump c/common to v0.57.4, moby/buildkit v0.5.12 - --Changelog for v1.33.4 (2024-01-30) - - * Bump c/image to v5.29.2 and c/common to v0.57.3 - --Changelog for v1.33.3 (2024-01-18) - - * Bump c/common to 0.57.2 and c/image to 5.29.1 +- Changelog for v1.35.1 (2024-03-18) + * [release-1.35] CVE-2024-1753 container escape fix + +- Changelog for v1.35.0 (2024-03-06) + * fix(deps): update module github.com/stretchr/testify to v1.9.0 + * cgroups: reuse version check from c/common + * Update vendor of containers/(common,image) + * fix(deps): update github.com/containers/storage digest to eadc620 + * fix(deps): update github.com/containers/luksy digest to ceb12d4 + * fix(deps): update github.com/containers/image/v5 digest to cdc6802 + * manifest add: complain if we get artifact flags without --artifact + * Use retry logic from containers/common + * Vendor in containers/(storage,image,common) + * Update module golang.org/x/crypto to v0.20.0 + * Add comment re: Total Success task name + * tests: skip_if_no_unshare(): check for --setuid + * Properly handle build --pull=false + * [skip-ci] Update tim-actions/get-pr-commits action to v1.3.1 + * Update module go.etcd.io/bbolt to v1.3.9 + * Revert "Reduce official image size" + * Update module github.com/opencontainers/image-spec to v1.1.0 + * Reduce official image size + * Build with CNI support on FreeBSD + * build --all-platforms: skip some base "image" platforms + * Bump main to v1.35.0-dev + * Vendor in latest containers/(storage,image,common) + * Split up error messages for missing --sbom related flags + * `buildah manifest`: add artifact-related options + * cmd/buildah/manifest.go: lock lists before adding/annotating/pushing + * cmd/buildah/manifest.go: don't make struct declarations aliases + * Use golang.org/x/exp/slices.Contains + * Disable loong64 again + * Fix a couple of typos in one-line comments + * egrep is obsolescent; use grep -E + * Try Cirrus with a newer VM version + * Set CONTAINERS_CONF in the chroot-mount-flags integration test + * Update to match dependency API update + * Update github.com/openshift/imagebuilder and containers/common + * docs: correct default authfile path + * fix(deps): update module github.com/containerd/containerd to v1.7.13 + * tests: retrofit test for heredoc summary + * build, heredoc: show heredoc summary in build output + * manifest, push: add support for --retry and --retry-delay + * fix(deps): update github.com/openshift/imagebuilder digest to b767bc3 + * imagebuildah: fix crash with empty RUN + * fix(deps): update github.com/containers/luksy digest to b62d551 + * fix(deps): update module github.com/opencontainers/runc to v1.1.12 [security] + * fix(deps): update module github.com/moby/buildkit to v0.12.5 [security] + * Make buildah match podman for handling of ulimits + * docs: move footnotes to where they're applicable + * Allow users to specify no-dereference + * Run codespell on code + * Fix FreeBSD version parsing + * Fix a build break on FreeBSD + * Remove a bad FROM line + * fix(deps): update module github.com/onsi/gomega to v1.31.1 + * fix(deps): update module github.com/opencontainers/image-spec to v1.1.0-rc6 + * docs: use reversed logo for dark theme in README + * build,commit: add --sbom to scan and produce SBOMs when committing + * commit: force omitHistory if the parent has layers but no history + * docs: fix a couple of typos + * internal/mkcw.Archive(): handle extra image content + * stage_executor,heredoc: honor interpreter in heredoc + * stage_executor,layers: burst cache if heredoc content is changed + * fix(deps): update module golang.org/x/crypto to v0.18.0 + * Replace map[K]bool with map[K]struct{} where it makes sense + * fix(deps): update module golang.org/x/sync to v0.6.0 + * fix(deps): update module golang.org/x/term to v0.16.0 + * Bump CI VMs + * Replace strings.SplitN with strings.Cut + * fix(deps): update github.com/containers/storage digest to ef81e9b + * fix(deps): update github.com/containers/image/v5 digest to 1b221d4 + * fix(deps): update module github.com/fsouza/go-dockerclient to v1.10.1 + * Document use of containers-transports values in buildah + * fix(deps): update module golang.org/x/crypto to v0.17.0 [security] + * chore(deps): update dependency containers/automation_images to v20231208 + * manifest: addCompression use default from containers.conf + * commit: add a --add-file flag + * mkcw: populate the rootfs using an overlay + * chore(deps): update dependency containers/automation_images to v20230517 + * [skip-ci] Update actions/stale action to v9 + * fix(deps): update module github.com/containernetworking/plugins to v1.4.0 + * fix(deps): update github.com/containers/image/v5 digest to 7a40fee + * Bump to v1.34.1-dev + * Ignore errors if label.Relabel returns ENOSUP + +- Changelog for v1.34.0 (2023-12-11) + * vendor: update c/{common,image,storage} + * run: Allow using just one jail per container on FreeBSD + * Remove makefile targets entrypoint{,.gz} for non x86_64 - Changelog for v1.33.2 (2023-11-22) * Update minimum to golang 1.20 diff --git a/vendor/github.com/containers/buildah/chroot/run_common.go b/vendor/github.com/containers/buildah/chroot/run_common.go index deda64fc2..4ffe1382d 100644 --- a/vendor/github.com/containers/buildah/chroot/run_common.go +++ b/vendor/github.com/containers/buildah/chroot/run_common.go @@ -34,6 +34,8 @@ const ( runUsingChrootCommand = "buildah-chroot-runtime" // runUsingChrootExec is a command we use as a key for reexec runUsingChrootExecCommand = "buildah-chroot-exec" + // containersConfEnv is an environment variable that we need to pass down except for the command itself + containersConfEnv = "CONTAINERS_CONF" ) func init() { @@ -128,6 +130,9 @@ func RunUsingChroot(spec *specs.Spec, bundlePath, homeDir string, stdin io.Reade cmd.Stdin, cmd.Stdout, cmd.Stderr = stdin, stdout, stderr cmd.Dir = "/" cmd.Env = []string{fmt.Sprintf("LOGLEVEL=%d", logrus.GetLevel())} + if _, ok := os.LookupEnv(containersConfEnv); ok { + cmd.Env = append(cmd.Env, containersConfEnv+"="+os.Getenv(containersConfEnv)) + } interrupted := make(chan os.Signal, 100) cmd.Hook = func(int) error { @@ -511,6 +516,9 @@ func runUsingChroot(spec *specs.Spec, bundlePath string, ctty *os.File, stdin io cmd.Stdin, cmd.Stdout, cmd.Stderr = stdin, stdout, stderr cmd.Dir = "/" cmd.Env = []string{fmt.Sprintf("LOGLEVEL=%d", logrus.GetLevel())} + if _, ok := os.LookupEnv(containersConfEnv); ok { + cmd.Env = append(cmd.Env, containersConfEnv+"="+os.Getenv(containersConfEnv)) + } if ctty != nil { cmd.Setsid = true cmd.Ctty = ctty diff --git a/vendor/github.com/containers/buildah/commit.go b/vendor/github.com/containers/buildah/commit.go index ef55e5419..14795eea2 100644 --- a/vendor/github.com/containers/buildah/commit.go +++ b/vendor/github.com/containers/buildah/commit.go @@ -118,6 +118,15 @@ type CommitOptions struct { // to the configuration of the image that is being committed, after // OverrideConfig is applied. OverrideChanges []string + // ExtraImageContent is a map which describes additional content to add + // to the committed image. The map's keys are filesystem paths in the + // image and the corresponding values are the paths of files whose + // contents will be used in their place. The contents will be owned by + // 0:0 and have mode 0644. Currently only accepts regular files. + ExtraImageContent map[string]string + // SBOMScanOptions encapsulates options which control whether or not we + // run scanners on the rootfs that we're about to commit, and how. + SBOMScanOptions []SBOMScanOptions } var ( @@ -315,6 +324,28 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options } logrus.Debugf("committing image with reference %q is allowed by policy", transports.ImageName(dest)) + // If we need to scan the rootfs, do it now. + options.ExtraImageContent = copyStringStringMap(options.ExtraImageContent) + var extraImageContent, extraLocalContent map[string]string + if len(options.SBOMScanOptions) != 0 { + var scansDirectory string + if extraImageContent, extraLocalContent, scansDirectory, err = b.sbomScan(ctx, options); err != nil { + return imgID, nil, "", fmt.Errorf("scanning rootfs to generate SBOM for container %q: %w", b.ContainerID, err) + } + if scansDirectory != "" { + defer func() { + if err := os.RemoveAll(scansDirectory); err != nil { + logrus.Warnf("removing temporary directory %q: %v", scansDirectory, err) + } + }() + } + for k, v := range extraImageContent { + if _, set := options.ExtraImageContent[k]; !set { + options.ExtraImageContent[k] = v + } + } + } + // Build an image reference from which we can copy the finished image. src, err = b.makeContainerImageRef(options) if err != nil { @@ -402,7 +433,31 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options } } } + // If we're supposed to store SBOM or PURL information in local files, write them now. + for filename, content := range extraLocalContent { + err := func() error { + output, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o644) + if err != nil { + return err + } + defer output.Close() + input, err := os.Open(content) + if err != nil { + return err + } + defer input.Close() + if _, err := io.Copy(output, input); err != nil { + return fmt.Errorf("copying from %q to %q: %w", content, filename, err) + } + return nil + }() + if err != nil { + return imgID, nil, "", err + } + } + // Calculate the as-written digest of the image's manifest and build the digested + // reference for the image. manifestDigest, err := manifest.Digest(manifestBytes) if err != nil { return imgID, nil, "", fmt.Errorf("computing digest of manifest of new image %q: %w", transports.ImageName(dest), err) diff --git a/vendor/github.com/containers/buildah/config.go b/vendor/github.com/containers/buildah/config.go index 3a287c7ea..581e5cb03 100644 --- a/vendor/github.com/containers/buildah/config.go +++ b/vendor/github.com/containers/buildah/config.go @@ -12,7 +12,6 @@ import ( "github.com/containers/buildah/define" "github.com/containers/buildah/docker" internalUtil "github.com/containers/buildah/internal/util" - "github.com/containers/common/pkg/util" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/compression" "github.com/containers/image/v5/transports" @@ -20,6 +19,7 @@ import ( "github.com/containers/storage/pkg/stringid" ociv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" ) // unmarshalConvertedConfig obtains the config blob of img valid for the wantedManifestMIMEType format @@ -229,10 +229,10 @@ func (b *Builder) OSFeatures() []string { // SetOSFeature adds a feature of the OS which the container, or a container // built using an image built from this container, depends on the OS supplying. func (b *Builder) SetOSFeature(feature string) { - if !util.StringInSlice(feature, b.OCIv1.OSFeatures) { + if !slices.Contains(b.OCIv1.OSFeatures, feature) { b.OCIv1.OSFeatures = append(b.OCIv1.OSFeatures, feature) } - if !util.StringInSlice(feature, b.Docker.OSFeatures) { + if !slices.Contains(b.Docker.OSFeatures, feature) { b.Docker.OSFeatures = append(b.Docker.OSFeatures, feature) } } @@ -241,7 +241,7 @@ func (b *Builder) SetOSFeature(feature string) { // container built using an image built from this container, depends on the OS // supplying. func (b *Builder) UnsetOSFeature(feature string) { - if util.StringInSlice(feature, b.OCIv1.OSFeatures) { + if slices.Contains(b.OCIv1.OSFeatures, feature) { features := make([]string, 0, len(b.OCIv1.OSFeatures)) for _, f := range b.OCIv1.OSFeatures { if f != feature { @@ -250,7 +250,7 @@ func (b *Builder) UnsetOSFeature(feature string) { } b.OCIv1.OSFeatures = features } - if util.StringInSlice(feature, b.Docker.OSFeatures) { + if slices.Contains(b.Docker.OSFeatures, feature) { features := make([]string, 0, len(b.Docker.OSFeatures)) for _, f := range b.Docker.OSFeatures { if f != feature { diff --git a/vendor/github.com/containers/buildah/convertcw.go b/vendor/github.com/containers/buildah/convertcw.go index 85576f425..be12b7f84 100644 --- a/vendor/github.com/containers/buildah/convertcw.go +++ b/vendor/github.com/containers/buildah/convertcw.go @@ -45,6 +45,7 @@ type CWConvertImageOptions struct { FirmwareLibrary string BaseImage string Logger *logrus.Logger + ExtraImageContent map[string]string // Passed through to BuilderOptions. Most settings won't make // sense to be made available here because we don't launch a process. @@ -171,6 +172,8 @@ func CWConvertImage(ctx context.Context, systemContext *types.SystemContext, sto Slop: options.Slop, FirmwareLibrary: options.FirmwareLibrary, Logger: logger, + GraphOptions: store.GraphOptions(), + ExtraImageContent: options.ExtraImageContent, } rc, workloadConfig, err := mkcw.Archive(sourceDir, &source.OCIv1, archiveOptions) if err != nil { diff --git a/vendor/github.com/containers/buildah/define/build.go b/vendor/github.com/containers/buildah/define/build.go index 95c9b9108..1dae0edc4 100644 --- a/vendor/github.com/containers/buildah/define/build.go +++ b/vendor/github.com/containers/buildah/define/build.go @@ -333,4 +333,7 @@ type BuildOptions struct { // value set in a base image will be preserved, so this does not // frequently need to be set. OSVersion string + // SBOMScanOptions encapsulates options which control whether or not we + // run scanners on the rootfs that we're about to commit, and how. + SBOMScanOptions []SBOMScanOptions } diff --git a/vendor/github.com/containers/buildah/define/types.go b/vendor/github.com/containers/buildah/define/types.go index 5d0032aae..27d112805 100644 --- a/vendor/github.com/containers/buildah/define/types.go +++ b/vendor/github.com/containers/buildah/define/types.go @@ -29,7 +29,7 @@ const ( // identify working containers. Package = "buildah" // Version for the Package. Also used by .packit.sh for Packit builds. - Version = "1.33.5" + Version = "1.35.1" // DefaultRuntime if containers.conf fails. DefaultRuntime = "runc" @@ -54,6 +54,9 @@ const ( SNP TeeType = "snp" ) +// DefaultRlimitValue is the value set by default for nofile and nproc +const RLimitDefaultValue = uint64(1048576) + // TeeType is a supported trusted execution environment type. type TeeType string @@ -121,7 +124,7 @@ type ConfidentialWorkloadOptions struct { AttestationURL string CPUs int Memory int - TempDir string + TempDir string // used for the temporary plaintext copy of the disk image TeeType TeeType IgnoreAttestationErrors bool WorkloadID string @@ -130,6 +133,42 @@ type ConfidentialWorkloadOptions struct { FirmwareLibrary string } +// SBOMMergeStrategy tells us how to merge multiple SBOM documents into one. +type SBOMMergeStrategy string + +const ( + // SBOMMergeStrategyCat literally concatenates the documents. + SBOMMergeStrategyCat SBOMMergeStrategy = "cat" + // SBOMMergeStrategyCycloneDXByComponentNameAndVersion adds components + // from the second document to the first, so long as they have a + // name+version combination which is not already present in the + // components array. + SBOMMergeStrategyCycloneDXByComponentNameAndVersion SBOMMergeStrategy = "merge-cyclonedx-by-component-name-and-version" + // SBOMMergeStrategySPDXByPackageNameAndVersionInfo adds packages from + // the second document to the first, so long as they have a + // name+versionInfo combination which is not already present in the + // first document's packages array, and adds hasExtractedLicensingInfos + // items from the second document to the first, so long as they include + // a licenseId value which is not already present in the first + // document's hasExtractedLicensingInfos array. + SBOMMergeStrategySPDXByPackageNameAndVersionInfo SBOMMergeStrategy = "merge-spdx-by-package-name-and-versioninfo" +) + +// SBOMScanOptions encapsulates options which control whether or not we run a +// scanner on the rootfs that we're about to commit, and how. +type SBOMScanOptions struct { + Type []string // a shorthand name for a defined group of these options + Image string // the scanner image to use + PullPolicy PullPolicy // how to get the scanner image + Commands []string // one or more commands to invoke for the image rootfs or ContextDir locations + ContextDir []string // one or more "source" directory locations + SBOMOutput string // where to save SBOM scanner output outside of the image (i.e., the local filesystem) + PURLOutput string // where to save PURL list outside of the image (i.e., the local filesystem) + ImageSBOMOutput string // where to save SBOM scanner output in the image + ImagePURLOutput string // where to save PURL list in the image + MergeStrategy SBOMMergeStrategy // how to merge the outputs of multiple scans +} + // TempDirForURL checks if the passed-in string looks like a URL or -. If it is, // TempDirForURL creates a temporary directory, arranges for its contents to be // the contents of that URL, and returns the temporary directory's path, along diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go index 7318e04bd..a26153b9b 100644 --- a/vendor/github.com/containers/buildah/image.go +++ b/vendor/github.com/containers/buildah/image.go @@ -45,9 +45,9 @@ const ( Dockerv2ImageManifest = define.Dockerv2ImageManifest ) -// ExtractRootfsOptions is consumed by ExtractRootfs() which allows -// users to preserve nature of various modes like setuid, setgid and xattrs -// over the extracted file system objects. +// ExtractRootfsOptions is consumed by ExtractRootfs() which allows users to +// control whether various information like the like setuid and setgid bits and +// xattrs are preserved when extracting file system objects. type ExtractRootfsOptions struct { StripSetuidBit bool // strip the setuid bit off of items being extracted. StripSetgidBit bool // strip the setgid bit off of items being extracted. @@ -82,6 +82,7 @@ type containerImageRef struct { postEmptyLayers []v1.History overrideChanges []string overrideConfig *manifest.Schema2Config + extraImageContent map[string]string } type blobLayerInfo struct { @@ -171,6 +172,22 @@ func (i *containerImageRef) extractConfidentialWorkloadFS(options ConfidentialWo if err := json.Unmarshal(i.oconfig, &image); err != nil { return nil, fmt.Errorf("recreating OCI configuration for %q: %w", i.containerID, err) } + if options.TempDir == "" { + cdir, err := i.store.ContainerDirectory(i.containerID) + if err != nil { + return nil, fmt.Errorf("getting the per-container data directory for %q: %w", i.containerID, err) + } + tempdir, err := os.MkdirTemp(cdir, "buildah-rootfs") + if err != nil { + return nil, fmt.Errorf("creating a temporary data directory to hold a rootfs image for %q: %w", i.containerID, err) + } + defer func() { + if err := os.RemoveAll(tempdir); err != nil { + logrus.Warnf("removing temporary directory %q: %v", tempdir, err) + } + }() + options.TempDir = tempdir + } mountPoint, err := i.store.Mount(i.containerID, i.mountLabel) if err != nil { return nil, fmt.Errorf("mounting container %q: %w", i.containerID, err) @@ -186,6 +203,8 @@ func (i *containerImageRef) extractConfidentialWorkloadFS(options ConfidentialWo DiskEncryptionPassphrase: options.DiskEncryptionPassphrase, Slop: options.Slop, FirmwareLibrary: options.FirmwareLibrary, + GraphOptions: i.store.GraphOptions(), + ExtraImageContent: i.extraImageContent, } rc, _, err := mkcw.Archive(mountPoint, &image, archiveOptions) if err != nil { @@ -211,9 +230,8 @@ func (i *containerImageRef) extractConfidentialWorkloadFS(options ConfidentialWo } // Extract the container's whole filesystem as if it were a single layer. -// Takes ExtractRootfsOptions as argument which allows caller to configure -// preserve nature of setuid,setgid,sticky and extended attributes -// on extracted rootfs. +// The ExtractRootfsOptions control whether or not to preserve setuid and +// setgid bits and extended attributes on contents. func (i *containerImageRef) extractRootfs(opts ExtractRootfsOptions) (io.ReadCloser, chan error, error) { var uidMap, gidMap []idtools.IDMap mountPoint, err := i.store.Mount(i.containerID, i.mountLabel) @@ -224,6 +242,27 @@ func (i *containerImageRef) extractRootfs(opts ExtractRootfsOptions) (io.ReadClo errChan := make(chan error, 1) go func() { defer close(errChan) + if len(i.extraImageContent) > 0 { + // Abuse the tar format and _prepend_ the synthesized + // data items to the archive we'll get from + // copier.Get(), in a way that looks right to a reader + // as long as we DON'T Close() the tar Writer. + filename, _, _, err := i.makeExtraImageContentDiff(false) + if err != nil { + errChan <- err + return + } + file, err := os.Open(filename) + if err != nil { + errChan <- err + return + } + defer file.Close() + if _, err = io.Copy(pipeWriter, file); err != nil { + errChan <- err + return + } + } if i.idMappingOptions != nil { uidMap, gidMap = convertRuntimeIDMaps(i.idMappingOptions.UIDMap, i.idMappingOptions.GIDMap) } @@ -234,7 +273,7 @@ func (i *containerImageRef) extractRootfs(opts ExtractRootfsOptions) (io.ReadClo StripSetgidBit: opts.StripSetgidBit, StripXattrs: opts.StripXattrs, } - err = copier.Get(mountPoint, mountPoint, copierOptions, []string{"."}, pipeWriter) + err := copier.Get(mountPoint, mountPoint, copierOptions, []string{"."}, pipeWriter) errChan <- err pipeWriter.Close() @@ -294,8 +333,8 @@ func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest, dimage.RootFS.Type = docker.TypeLayers dimage.RootFS.DiffIDs = []digest.Digest{} // Only clear the history if we're squashing, otherwise leave it be so - // that we can append entries to it. Clear the parent, too, we no - // longer include its layers and history. + // that we can append entries to it. Clear the parent, too, to reflect + // that we no longer include its layers and history. if i.confidentialWorkload.Convert || i.squash || i.omitHistory { dimage.Parent = "" dimage.History = []docker.V2S2History{} @@ -368,8 +407,9 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System if err != nil { return nil, fmt.Errorf("unable to read layer %q: %w", layerID, err) } - // Walk the list of parent layers, prepending each as we go. If we're squashing, - // stop at the layer ID of the top layer, which we won't really be using anyway. + // Walk the list of parent layers, prepending each as we go. If we're squashing + // or making a confidential workload, we're only producing one layer, so stop at + // the layer ID of the top layer, which we won't really be using anyway. for layer != nil { layers = append(append([]string{}, layerID), layers...) layerID = layer.Parent @@ -382,6 +422,14 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System return nil, fmt.Errorf("unable to read layer %q: %w", layerID, err) } } + layer = nil + + // If we're slipping in a synthesized layer, we need to add a placeholder for it + // to the list. + const synthesizedLayerID = "(synthesized layer)" + if len(i.extraImageContent) > 0 && !i.confidentialWorkload.Convert && !i.squash { + layers = append(layers, synthesizedLayerID) + } logrus.Debugf("layer list: %q", layers) // Make a temporary directory to hold blobs. @@ -407,6 +455,8 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System } // Extract each layer and compute its digests, both compressed (if requested) and uncompressed. + var extraImageContentDiff string + var extraImageContentDiffDigest digest.Digest blobLayers := make(map[digest.Digest]blobLayerInfo) for _, layerID := range layers { what := fmt.Sprintf("layer %q", layerID) @@ -417,16 +467,32 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System omediaType := v1.MediaTypeImageLayer dmediaType := docker.V2S2MediaTypeUncompressedLayer // Look up this layer. - layer, err := i.store.Layer(layerID) - if err != nil { - return nil, fmt.Errorf("unable to locate layer %q: %w", layerID, err) + var layerUncompressedDigest digest.Digest + var layerUncompressedSize int64 + if layerID != synthesizedLayerID { + layer, err := i.store.Layer(layerID) + if err != nil { + return nil, fmt.Errorf("unable to locate layer %q: %w", layerID, err) + } + layerID = layer.ID + layerUncompressedDigest = layer.UncompressedDigest + layerUncompressedSize = layer.UncompressedSize + } else { + diffFilename, digest, size, err := i.makeExtraImageContentDiff(true) + if err != nil { + return nil, fmt.Errorf("unable to generate layer for additional content: %w", err) + } + extraImageContentDiff = diffFilename + extraImageContentDiffDigest = digest + layerUncompressedDigest = digest + layerUncompressedSize = size } // If we already know the digest of the contents of parent // layers, reuse their blobsums, diff IDs, and sizes. - if !i.confidentialWorkload.Convert && !i.squash && layerID != i.layerID && layer.UncompressedDigest != "" { - layerBlobSum := layer.UncompressedDigest - layerBlobSize := layer.UncompressedSize - diffID := layer.UncompressedDigest + if !i.confidentialWorkload.Convert && !i.squash && layerID != i.layerID && layerID != synthesizedLayerID && layerUncompressedDigest != "" { + layerBlobSum := layerUncompressedDigest + layerBlobSize := layerUncompressedSize + diffID := layerUncompressedDigest // Note this layer in the manifest, using the appropriate blobsum. olayerDescriptor := v1.Descriptor{ MediaType: omediaType, @@ -444,7 +510,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, diffID) dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, diffID) blobLayers[diffID] = blobLayerInfo{ - ID: layer.ID, + ID: layerID, Size: layerBlobSize, } continue @@ -474,15 +540,22 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System return nil, err } } else { - // If we're up to the final layer, but we don't want to - // include a diff for it, we're done. - if i.emptyLayer && layerID == i.layerID { - continue - } - // Extract this layer, one of possibly many. - rc, err = i.store.Diff("", layerID, diffOptions) - if err != nil { - return nil, fmt.Errorf("extracting %s: %w", what, err) + if layerID != synthesizedLayerID { + // If we're up to the final layer, but we don't want to + // include a diff for it, we're done. + if i.emptyLayer && layerID == i.layerID { + continue + } + // Extract this layer, one of possibly many. + rc, err = i.store.Diff("", layerID, diffOptions) + if err != nil { + return nil, fmt.Errorf("extracting %s: %w", what, err) + } + } else { + // Slip in additional content as an additional layer. + if rc, err = os.Open(extraImageContentDiff); err != nil { + return nil, err + } } } srcHasher := digest.Canonical.Digester() @@ -624,20 +697,19 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System } } - // Calculate base image history for special scenarios - // when base layers does not contains any history. - // We will ignore sanity checks if baseImage history is null - // but still add new history for docker parity. - baseImageHistoryLen := len(oimage.History) // Only attempt to append history if history was not disabled explicitly. if !i.omitHistory { + // Keep track of how many entries the base image's history had + // before we started adding to it. + baseImageHistoryLen := len(oimage.History) appendHistory(i.preEmptyLayers) created := time.Now().UTC() if i.created != nil { created = (*i.created).UTC() } comment := i.historyComment - // Add a comment for which base image is being used + // Add a comment indicating which base image was used, if it wasn't + // just an image ID. if strings.Contains(i.parent, i.fromImageID) && i.fromImageName != i.fromImageID { comment += "FROM " + i.fromImageName } @@ -659,10 +731,24 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System dimage.History = append(dimage.History, dnews) appendHistory(i.postEmptyLayers) - // Sanity check that we didn't just create a mismatch between non-empty layers in the - // history and the number of diffIDs. Following sanity check is ignored if build history - // is disabled explicitly by the user. - // Disable sanity check when baseImageHistory is null for docker parity + // Add a history entry for the extra image content if we added a layer for it. + if extraImageContentDiff != "" { + createdBy := fmt.Sprintf(`/bin/sh -c #(nop) ADD dir:%s in /",`, extraImageContentDiffDigest.Encoded()) + onews := v1.History{ + Created: &created, + CreatedBy: createdBy, + } + oimage.History = append(oimage.History, onews) + dnews := docker.V2S2History{ + Created: created, + CreatedBy: createdBy, + } + dimage.History = append(dimage.History, dnews) + } + + // Confidence check that we didn't just create a mismatch between non-empty layers in the + // history and the number of diffIDs. Only applicable if the base image (if there was + // one) provided us at least one entry to use as a starting point. if baseImageHistoryLen != 0 { expectedDiffIDs := expectedOCIDiffIDs(oimage) if len(oimage.RootFS.DiffIDs) != expectedDiffIDs { @@ -859,6 +945,68 @@ func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo, return ioutils.NewReadCloserWrapper(layerReadCloser, closer), size, nil } +// makeExtraImageContentDiff creates an archive file containing the contents of +// files named in i.extraImageContent. The footer that marks the end of the +// archive may be omitted. +func (i *containerImageRef) makeExtraImageContentDiff(includeFooter bool) (string, digest.Digest, int64, error) { + cdir, err := i.store.ContainerDirectory(i.containerID) + if err != nil { + return "", "", -1, err + } + diff, err := os.CreateTemp(cdir, "extradiff") + if err != nil { + return "", "", -1, err + } + defer diff.Close() + digester := digest.Canonical.Digester() + counter := ioutils.NewWriteCounter(digester.Hash()) + tw := tar.NewWriter(io.MultiWriter(diff, counter)) + created := time.Now() + if i.created != nil { + created = *i.created + } + for path, contents := range i.extraImageContent { + if err := func() error { + content, err := os.Open(contents) + if err != nil { + return err + } + defer content.Close() + st, err := content.Stat() + if err != nil { + return err + } + if err := tw.WriteHeader(&tar.Header{ + Name: path, + Typeflag: tar.TypeReg, + Mode: 0o644, + ModTime: created, + Size: st.Size(), + }); err != nil { + return err + } + if _, err := io.Copy(tw, content); err != nil { + return err + } + if err := tw.Flush(); err != nil { + return err + } + return nil + }(); err != nil { + return "", "", -1, err + } + } + if !includeFooter { + return diff.Name(), "", -1, err + } + tw.Close() + return diff.Name(), digester.Digest(), counter.Count, err +} + +// makeContainerImageRef creates a containers/image/v5/types.ImageReference +// which is mainly used for representing the working container as a source +// image that can be copied, which is how we commit container to create the +// image. func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageRef, error) { var name reference.Named container, err := b.store.Container(b.ContainerID) @@ -900,11 +1048,21 @@ func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageR } parent := "" + forceOmitHistory := false if b.FromImageID != "" { parentDigest := digest.NewDigestFromEncoded(digest.Canonical, b.FromImageID) if parentDigest.Validate() == nil { parent = parentDigest.String() } + if !options.OmitHistory && len(b.OCIv1.History) == 0 && len(b.OCIv1.RootFS.DiffIDs) != 0 { + // Parent had layers, but no history. We shouldn't confuse + // our own confidence checks by adding history for layers + // that we're adding, creating an image with multiple layers, + // only some of which have history entries, which would be + // broken in confusing ways. + b.Logger.Debugf("parent image %q had no history but had %d layers, assuming OmitHistory", b.FromImageID, len(b.OCIv1.RootFS.DiffIDs)) + forceOmitHistory = true + } } ref := &containerImageRef{ @@ -926,7 +1084,7 @@ func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageR preferredManifestType: manifestType, squash: options.Squash, confidentialWorkload: options.ConfidentialWorkloadOptions, - omitHistory: options.OmitHistory, + omitHistory: options.OmitHistory || forceOmitHistory, emptyLayer: options.EmptyLayer && !options.Squash && !options.ConfidentialWorkloadOptions.Convert, idMappingOptions: &b.IDMappingOptions, parent: parent, @@ -935,6 +1093,7 @@ func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageR postEmptyLayers: b.AppendedEmptyLayers, overrideChanges: options.OverrideChanges, overrideConfig: options.OverrideConfig, + extraImageContent: copyStringStringMap(options.ExtraImageContent), } return ref, nil } diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go index 03081fde9..39e983706 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/build.go +++ b/vendor/github.com/containers/buildah/imagebuildah/build.go @@ -5,6 +5,7 @@ import ( "context" "errors" "fmt" + gotypes "go/types" "io" "net/http" "os" @@ -36,6 +37,7 @@ import ( specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/openshift/imagebuilder" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" "golang.org/x/sync/semaphore" ) @@ -493,6 +495,26 @@ func preprocessContainerfileContents(logger *logrus.Logger, containerfile string return &stdoutBuffer, nil } +// platformIsUnknown checks if the platform value indicates that the +// corresponding index entry is suitable for use as a base image +func platformIsAcceptable(platform *v1.Platform, logger *logrus.Logger) bool { + if platform == nil { + logger.Trace("rejecting potential base image with no platform information") + return false + } + if gotypes.SizesFor("gc", platform.Architecture) == nil { + // the compiler's never heard of this + logger.Tracef("rejecting potential base image architecture %q for which Go has no knowledge of how to do unsafe code", platform.Architecture) + return false + } + if slices.Contains([]string{"", "unknown"}, platform.OS) { + // we're hard-wired to reject images with these values + logger.Tracef("rejecting potential base image for which the OS value is always-rejected value %q", platform.OS) + return false + } + return true +} + // platformsForBaseImages resolves the names of base images from the // dockerfiles, and if they are all valid references to manifest lists, returns // the list of platforms that are supported by all of the base images. @@ -570,7 +592,10 @@ func platformsForBaseImages(ctx context.Context, logger *logrus.Logger, dockerfi if baseImageIndex == 0 { // populate the list with the first image's normalized platforms for _, instance := range index.Manifests { - if instance.Platform == nil { + if !platformIsAcceptable(instance.Platform, logger) { + continue + } + if instance.ArtifactType != "" { continue } platform := internalUtil.NormalizePlatform(*instance.Platform) @@ -581,7 +606,10 @@ func platformsForBaseImages(ctx context.Context, logger *logrus.Logger, dockerfi // prune the list of any normalized platforms this base image doesn't support imagePlatforms := make(map[string]struct{}) for _, instance := range index.Manifests { - if instance.Platform == nil { + if !platformIsAcceptable(instance.Platform, logger) { + continue + } + if instance.ArtifactType != "" { continue } platform := internalUtil.NormalizePlatform(*instance.Platform) @@ -651,7 +679,7 @@ func baseImages(dockerfilenames []string, dockerfilecontents [][]byte, from stri return nil, fmt.Errorf("reading multiple stages: %w", err) } var baseImages []string - nicknames := make(map[string]bool) + nicknames := make(map[string]struct{}) for stageIndex, stage := range stages { node := stage.Node // first line for node != nil { // each line @@ -673,7 +701,7 @@ func baseImages(dockerfilenames []string, dockerfilecontents [][]byte, from stri } } base := child.Next.Value - if base != "" && base != buildah.BaseImageFakeName && !nicknames[base] { + if base != "" && base != buildah.BaseImageFakeName && !internalUtil.SetHas(nicknames, base) { headingArgs := argsMapToSlice(stage.Builder.HeadingArgs) userArgs := argsMapToSlice(stage.Builder.Args) // append heading args so if --build-arg key=value is not @@ -692,7 +720,7 @@ func baseImages(dockerfilenames []string, dockerfilecontents [][]byte, from stri node = node.Next // next line } if stage.Name != strconv.Itoa(stageIndex) { - nicknames[stage.Name] = true + nicknames[stage.Name] = struct{}{} } } return baseImages, nil diff --git a/vendor/github.com/containers/buildah/imagebuildah/executor.go b/vendor/github.com/containers/buildah/imagebuildah/executor.go index 917c84f6c..4c797decd 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/executor.go +++ b/vendor/github.com/containers/buildah/imagebuildah/executor.go @@ -14,6 +14,7 @@ import ( "github.com/containers/buildah" "github.com/containers/buildah/define" + internalUtil "github.com/containers/buildah/internal/util" "github.com/containers/buildah/pkg/parse" "github.com/containers/buildah/pkg/sshagent" "github.com/containers/buildah/util" @@ -41,19 +42,19 @@ import ( // complain if we're given values for arguments which have no corresponding ARG // instruction in the Dockerfile, since that's usually an indication of a user // error, but for these values we make exceptions and ignore them. -var builtinAllowedBuildArgs = map[string]bool{ - "HTTP_PROXY": true, - "http_proxy": true, - "HTTPS_PROXY": true, - "https_proxy": true, - "FTP_PROXY": true, - "ftp_proxy": true, - "NO_PROXY": true, - "no_proxy": true, - "TARGETARCH": true, - "TARGETOS": true, - "TARGETPLATFORM": true, - "TARGETVARIANT": true, +var builtinAllowedBuildArgs = map[string]struct{}{ + "HTTP_PROXY": {}, + "http_proxy": {}, + "HTTPS_PROXY": {}, + "https_proxy": {}, + "FTP_PROXY": {}, + "ftp_proxy": {}, + "NO_PROXY": {}, + "no_proxy": {}, + "TARGETARCH": {}, + "TARGETOS": {}, + "TARGETPLATFORM": {}, + "TARGETVARIANT": {}, } // Executor is a buildah-based implementation of the imagebuilder.Executor @@ -110,8 +111,8 @@ type Executor struct { forceRmIntermediateCtrs bool imageMap map[string]string // Used to map images that we create to handle the AS construct. containerMap map[string]*buildah.Builder // Used to map from image names to only-created-for-the-rootfs containers. - baseMap map[string]bool // Holds the names of every base image, as given. - rootfsMap map[string]bool // Holds the names of every stage whose rootfs is referenced in a COPY or ADD instruction. + baseMap map[string]struct{} // Holds the names of every base image, as given. + rootfsMap map[string]struct{} // Holds the names of every stage whose rootfs is referenced in a COPY or ADD instruction. blobDirectory string excludes []string groupAdd []string @@ -151,6 +152,7 @@ type Executor struct { osFeatures []string envs []string confidentialWorkload define.ConfidentialWorkloadOptions + sbomScanOptions []define.SBOMScanOptions } type imageTypeAndHistoryAndDiffIDs struct { @@ -278,8 +280,8 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o forceRmIntermediateCtrs: options.ForceRmIntermediateCtrs, imageMap: make(map[string]string), containerMap: make(map[string]*buildah.Builder), - baseMap: make(map[string]bool), - rootfsMap: make(map[string]bool), + baseMap: make(map[string]struct{}), + rootfsMap: make(map[string]struct{}), blobDirectory: options.BlobDirectory, unusedArgs: make(map[string]struct{}), capabilities: capabilities, @@ -309,6 +311,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o osFeatures: append([]string{}, options.OSFeatures...), envs: append([]string{}, options.Envs...), confidentialWorkload: options.ConfidentialWorkload, + sbomScanOptions: options.SBOMScanOptions, } if exec.err == nil { exec.err = os.Stderr @@ -337,13 +340,13 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o // We have to be careful here - it's either an argument // and value, or just an argument, since they can be // separated by either "=" or whitespace. - list := strings.SplitN(arg.Value, "=", 2) + argName, argValue, hasValue := strings.Cut(arg.Value, "=") if !foundFirstStage { - if len(list) > 1 { - globalArgs[list[0]] = list[1] + if hasValue { + globalArgs[argName] = argValue } } - delete(exec.unusedArgs, list[0]) + delete(exec.unusedArgs, argName) } case "FROM": foundFirstStage = true @@ -491,17 +494,12 @@ func (b *Executor) buildStage(ctx context.Context, cleanupStages map[int]*StageE // to the Dockerfile that would provide the same result. // Reason: Docker adds label modification as a last step which can be // processed like regular steps, and if no modification is done to - // layers, its easier to re-use cached layers. + // layers, its easier to reuse cached layers. if len(b.labels) > 0 { var labelLine string labels := append([]string{}, b.labels...) for _, labelSpec := range labels { - label := strings.SplitN(labelSpec, "=", 2) - key := label[0] - value := "" - if len(label) > 1 { - value = label[1] - } + key, value, _ := strings.Cut(labelSpec, "=") // check only for an empty key since docker allows empty values if key != "" { labelLine += fmt.Sprintf(" %q=%q", key, value) @@ -523,10 +521,8 @@ func (b *Executor) buildStage(ctx context.Context, cleanupStages map[int]*StageE if len(b.envs) > 0 { var envLine string for _, envSpec := range b.envs { - env := strings.SplitN(envSpec, "=", 2) - key := env[0] - if len(env) > 1 { - value := env[1] + key, value, hasValue := strings.Cut(envSpec, "=") + if hasValue { envLine += fmt.Sprintf(" %q=%q", key, value) } else { return "", nil, false, fmt.Errorf("BUG: unresolved environment variable: %q", key) @@ -613,7 +609,7 @@ func markDependencyStagesForTarget(dependencyMap map[string]*stageDependencyInfo } func (b *Executor) warnOnUnsetBuildArgs(stages imagebuilder.Stages, dependencyMap map[string]*stageDependencyInfo, args map[string]string) { - argFound := make(map[string]bool) + argFound := make(map[string]struct{}) for _, stage := range stages { node := stage.Node // first line for node != nil { // each line @@ -624,12 +620,12 @@ func (b *Executor) warnOnUnsetBuildArgs(stages imagebuilder.Stages, dependencyMa if strings.Contains(argName, "=") { res := strings.Split(argName, "=") if res[1] != "" { - argFound[res[0]] = true + argFound[res[0]] = struct{}{} } } argHasValue := true if !strings.Contains(argName, "=") { - argHasValue = argFound[argName] + argHasValue = internalUtil.SetHas(argFound, argName) } if _, ok := args[argName]; !argHasValue && !ok { shouldWarn := true @@ -779,7 +775,7 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image if err != nil { return "", nil, fmt.Errorf("while replacing arg variables with values for format %q: %w", base, err) } - b.baseMap[baseWithArg] = true + b.baseMap[baseWithArg] = struct{}{} logrus.Debugf("base for stage %d: %q", stageIndex, base) // Check if selected base is not an additional // build context and if base is a valid stage @@ -801,7 +797,7 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image // was named using argument values, we might // not record the right value here. rootfs := strings.TrimPrefix(flag, "--from=") - b.rootfsMap[rootfs] = true + b.rootfsMap[rootfs] = struct{}{} logrus.Debugf("rootfs needed for COPY in stage %d: %q", stageIndex, rootfs) // Populate dependency tree and check // if following ADD or COPY needs any other @@ -844,24 +840,18 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image mountFlags := strings.TrimPrefix(flag, "--mount=") fields := strings.Split(mountFlags, ",") for _, field := range fields { - if strings.HasPrefix(field, "from=") { - fromField := strings.SplitN(field, "=", 2) - if len(fromField) > 1 { - mountFrom := fromField[1] - // Check if this base is a stage if yes - // add base to current stage's dependency tree - // but also confirm if this is not in additional context. - if _, ok := b.additionalBuildContexts[mountFrom]; !ok { - // Treat from as a rootfs we need to preserve - b.rootfsMap[mountFrom] = true - if _, ok := dependencyMap[mountFrom]; ok { - // update current stage's dependency info - currentStageInfo := dependencyMap[stage.Name] - currentStageInfo.Needs = append(currentStageInfo.Needs, mountFrom) - } + if mountFrom, hasFrom := strings.CutPrefix(field, "from="); hasFrom { + // Check if this base is a stage if yes + // add base to current stage's dependency tree + // but also confirm if this is not in additional context. + if _, ok := b.additionalBuildContexts[mountFrom]; !ok { + // Treat from as a rootfs we need to preserve + b.rootfsMap[mountFrom] = struct{}{} + if _, ok := dependencyMap[mountFrom]; ok { + // update current stage's dependency info + currentStageInfo := dependencyMap[stage.Name] + currentStageInfo.Needs = append(currentStageInfo.Needs, mountFrom) } - } else { - return "", nil, fmt.Errorf("invalid value for field `from=`: %q", fromField[1]) } } } diff --git a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go index 9398dcef8..0b83800ff 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go +++ b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go @@ -565,24 +565,23 @@ func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]inte stageMountPoints := make(map[string]internal.StageMountDetails) for _, flag := range mountList { if strings.Contains(flag, "from") { - arr := strings.SplitN(flag, ",", 2) - if len(arr) < 2 { + tokens := strings.Split(flag, ",") + if len(tokens) < 2 { return nil, fmt.Errorf("Invalid --mount command: %s", flag) } - tokens := strings.Split(flag, ",") - for _, val := range tokens { - kv := strings.SplitN(val, "=", 2) - switch kv[0] { + for _, token := range tokens { + key, val, hasVal := strings.Cut(token, "=") + switch key { case "from": - if len(kv) == 1 { + if !hasVal { return nil, fmt.Errorf("unable to resolve argument for `from=`: bad argument") } - if kv[1] == "" { + if val == "" { return nil, fmt.Errorf("unable to resolve argument for `from=`: from points to an empty value") } - from, fromErr := imagebuilder.ProcessWord(kv[1], s.stage.Builder.Arguments()) + from, fromErr := imagebuilder.ProcessWord(val, s.stage.Builder.Arguments()) if fromErr != nil { - return nil, fmt.Errorf("unable to resolve argument %q: %w", kv[1], fromErr) + return nil, fmt.Errorf("unable to resolve argument %q: %w", val, fromErr) } // If additional buildContext contains this // give priority to that and break if additional @@ -684,6 +683,15 @@ func (s *StageExecutor) createNeededHeredocMountsForRun(files []imagebuilder.Fil return mountResult, nil } +func parseSheBang(data string) string { + lines := strings.Split(data, "\n") + if len(lines) > 2 && strings.HasPrefix(lines[1], "#!") { + shebang := strings.TrimLeft(lines[1], "#!") + return shebang + } + return "" +} + // Run executes a RUN instruction using the stage's current working container // as a root directory. func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error { @@ -694,12 +702,17 @@ func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error { if heredoc := buildkitparser.MustParseHeredoc(args[0]); heredoc != nil { if strings.HasPrefix(run.Files[0].Data, "#!") || strings.HasPrefix(run.Files[0].Data, "\n#!") { // This is a single heredoc with a shebang, so create a file - // and run it. + // and run it with program specified in shebang. heredocMount, err := s.createNeededHeredocMountsForRun(run.Files) if err != nil { return err } - args = []string{heredocMount[0].Destination} + shebangArgs := parseSheBang(run.Files[0].Data) + if shebangArgs != "" { + args = []string{shebangArgs + " " + heredocMount[0].Destination} + } else { + args = []string{heredocMount[0].Destination} + } heredocMounts = append(heredocMounts, heredocMount...) } else { args = []string{run.Files[0].Data} @@ -1044,8 +1057,8 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, moreStages := s.index < len(s.stages)-1 lastStage := !moreStages onlyBaseImage := false - imageIsUsedLater := moreStages && (s.executor.baseMap[stage.Name] || s.executor.baseMap[strconv.Itoa(stage.Position)]) - rootfsIsUsedLater := moreStages && (s.executor.rootfsMap[stage.Name] || s.executor.rootfsMap[strconv.Itoa(stage.Position)]) + imageIsUsedLater := moreStages && (internalUtil.SetHas(s.executor.baseMap, stage.Name) || internalUtil.SetHas(s.executor.baseMap, strconv.Itoa(stage.Position))) + rootfsIsUsedLater := moreStages && (internalUtil.SetHas(s.executor.rootfsMap, stage.Name) || internalUtil.SetHas(s.executor.rootfsMap, strconv.Itoa(stage.Position))) // If the base image's name corresponds to the result of an earlier // stage, make sure that stage has finished building an image, and @@ -1160,7 +1173,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, if len(children) == 0 { // There are no steps. - if s.builder.FromImageID == "" || s.executor.squash || s.executor.confidentialWorkload.Convert || len(s.executor.labels) > 0 || len(s.executor.annotations) > 0 || len(s.executor.unsetEnvs) > 0 || len(s.executor.unsetLabels) > 0 { + if s.builder.FromImageID == "" || s.executor.squash || s.executor.confidentialWorkload.Convert || len(s.executor.labels) > 0 || len(s.executor.annotations) > 0 || len(s.executor.unsetEnvs) > 0 || len(s.executor.unsetLabels) > 0 || len(s.executor.sbomScanOptions) > 0 { // We either don't have a base image, or we need to // transform the contents of the base image, or we need // to make some changes to just the config blob. Whichever @@ -1169,7 +1182,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, // No base image means there's nothing to put in a // layer, so don't create one. emptyLayer := (s.builder.FromImageID == "") - if imgID, ref, err = s.commit(ctx, s.getCreatedBy(nil, ""), emptyLayer, s.output, s.executor.squash, lastStage); err != nil { + if imgID, ref, err = s.commit(ctx, s.getCreatedBy(nil, ""), emptyLayer, s.output, s.executor.squash || s.executor.confidentialWorkload.Convert, lastStage); err != nil { return "", nil, false, fmt.Errorf("committing base container: %w", err) } } else { @@ -1204,7 +1217,24 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, } logrus.Debugf("Parsed Step: %+v", *step) if !s.executor.quiet { - s.log("%s", step.Original) + logMsg := step.Original + if len(step.Heredocs) > 0 { + summarizeHeredoc := func(doc string) string { + doc = strings.TrimSpace(doc) + lines := strings.Split(strings.ReplaceAll(doc, "\r\n", "\n"), "\n") + summary := lines[0] + if len(lines) > 1 { + summary += "..." + } + return summary + } + + for _, doc := range node.Heredocs { + heredocContent := summarizeHeredoc(doc.Content) + logMsg = logMsg + " (" + heredocContent + ")" + } + } + s.log("%s", logMsg) } // Check if there's a --from if the step command is COPY. @@ -1511,7 +1541,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, } } - // Note: If the build has squash, we must try to re-use as many layers as possible if cache is found. + // Note: If the build has squash, we must try to reuse as many layers as possible if cache is found. // So only perform commit if it's the lastInstruction of lastStage. if cacheID != "" { logCacheHit(cacheID) @@ -1567,11 +1597,13 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, } if lastInstruction && lastStage { - if s.executor.squash || s.executor.confidentialWorkload.Convert { - // Create a squashed version of this image - // if we're supposed to create one and this - // is the last instruction of the last stage. - imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName, true, lastStage && lastInstruction) + if s.executor.squash || s.executor.confidentialWorkload.Convert || len(s.executor.sbomScanOptions) != 0 { + // If this is the last instruction of the last stage, + // create a squashed or confidential workload + // version of the image if that's what we're after, + // or a normal one if we need to scan the image while + // committing it. + imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName, s.executor.squash || s.executor.confidentialWorkload.Convert, lastStage && lastInstruction) if err != nil { return "", nil, false, fmt.Errorf("committing final squash step %+v: %w", *step, err) } @@ -1722,11 +1754,22 @@ func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentSummary stri buildArgs := s.getBuildArgsKey() return "/bin/sh -c #(nop) ARG " + buildArgs case "RUN": + shArg := "" buildArgs := s.getBuildArgsResolvedForRun() + if len(node.Original) > 4 { + shArg = node.Original[4:] + } if buildArgs != "" { - return "|" + strconv.Itoa(len(strings.Split(buildArgs, " "))) + " " + buildArgs + " /bin/sh -c " + node.Original[4:] + return "|" + strconv.Itoa(len(strings.Split(buildArgs, " "))) + " " + buildArgs + " /bin/sh -c " + shArg + } + result := "/bin/sh -c " + shArg + if len(node.Heredocs) > 0 { + for _, doc := range node.Heredocs { + heredocContent := strings.TrimSpace(doc.Content) + result = result + "\n" + heredocContent + } } - return "/bin/sh -c " + node.Original[4:] + return result case "ADD", "COPY": destination := node for destination.Next != nil { @@ -1748,9 +1791,9 @@ func (s *StageExecutor) getBuildArgsResolvedForRun() string { dockerConfig := s.stage.Builder.Config() for _, env := range dockerConfig.Env { - splitv := strings.SplitN(env, "=", 2) - if len(splitv) == 2 { - configuredEnvs[splitv[0]] = splitv[1] + key, val, hasVal := strings.Cut(env, "=") + if hasVal { + configuredEnvs[key] = val } } @@ -2102,8 +2145,8 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer s.builder.SetPort(string(p)) } for _, envSpec := range config.Env { - spec := strings.SplitN(envSpec, "=", 2) - s.builder.SetEnv(spec[0], spec[1]) + key, val, _ := strings.Cut(envSpec, "=") + s.builder.SetEnv(key, val) } for _, envSpec := range s.executor.unsetEnvs { s.builder.UnsetEnv(envSpec) @@ -2139,12 +2182,8 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer // an intermediate image, in such case we must // honor layer labels if they are configured. for _, labelString := range s.executor.layerLabels { - label := strings.SplitN(labelString, "=", 2) - if len(label) > 1 { - s.builder.SetLabel(label[0], label[1]) - } else { - s.builder.SetLabel(label[0], "") - } + labelk, labelv, _ := strings.Cut(labelString, "=") + s.builder.SetLabel(labelk, labelv) } } for k, v := range config.Labels { @@ -2157,12 +2196,8 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer s.builder.UnsetLabel(key) } for _, annotationSpec := range s.executor.annotations { - annotation := strings.SplitN(annotationSpec, "=", 2) - if len(annotation) > 1 { - s.builder.SetAnnotation(annotation[0], annotation[1]) - } else { - s.builder.SetAnnotation(annotation[0], "") - } + annotationk, annotationv, _ := strings.Cut(annotationSpec, "=") + s.builder.SetAnnotation(annotationk, annotationv) } if imageRef != nil { logName := transports.ImageName(imageRef) @@ -2192,6 +2227,7 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer } if finalInstruction { options.ConfidentialWorkloadOptions = s.executor.confidentialWorkload + options.SBOMScanOptions = s.executor.sbomScanOptions } imgID, _, manifestDigest, err := s.builder.Commit(ctx, imageRef, options) if err != nil { diff --git a/vendor/github.com/containers/buildah/import.go b/vendor/github.com/containers/buildah/import.go index 88f732abb..1829a2e50 100644 --- a/vendor/github.com/containers/buildah/import.go +++ b/vendor/github.com/containers/buildah/import.go @@ -22,7 +22,7 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system return nil, errors.New("Internal error: imageID is empty in importBuilderDataFromImage") } - storeopts, err := storage.DefaultStoreOptions(false, 0) + storeopts, err := storage.DefaultStoreOptions() if err != nil { return nil, err } diff --git a/vendor/github.com/containers/buildah/info.go b/vendor/github.com/containers/buildah/info.go index 85e570ce7..72e1050ab 100644 --- a/vendor/github.com/containers/buildah/info.go +++ b/vendor/github.com/containers/buildah/info.go @@ -12,6 +12,7 @@ import ( internalUtil "github.com/containers/buildah/internal/util" putil "github.com/containers/buildah/pkg/util" "github.com/containers/buildah/util" + "github.com/containers/common/pkg/cgroups" "github.com/containers/storage" "github.com/containers/storage/pkg/system" "github.com/containers/storage/pkg/unshare" @@ -50,7 +51,7 @@ func hostInfo() map[string]interface{} { info["cpus"] = runtime.NumCPU() info["rootless"] = unshare.IsRootless() - unified, err := util.IsCgroup2UnifiedMode() + unified, err := cgroups.IsCgroup2UnifiedMode() if err != nil { logrus.Error(err, "err reading cgroups mode") } diff --git a/vendor/github.com/containers/buildah/internal/mkcw/archive.go b/vendor/github.com/containers/buildah/internal/mkcw/archive.go index a0677e426..7517205da 100644 --- a/vendor/github.com/containers/buildah/internal/mkcw/archive.go +++ b/vendor/github.com/containers/buildah/internal/mkcw/archive.go @@ -17,7 +17,12 @@ import ( "strings" "time" + "github.com/containers/buildah/internal/tmpdir" + "github.com/containers/buildah/pkg/overlay" "github.com/containers/luksy" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/system" "github.com/docker/docker/pkg/ioutils" "github.com/docker/go-units" digest "github.com/opencontainers/go-digest" @@ -48,6 +53,8 @@ type ArchiveOptions struct { DiskEncryptionPassphrase string FirmwareLibrary string Logger *logrus.Logger + GraphOptions []string // passed in from a storage Store, probably + ExtraImageContent map[string]string } type chainRetrievalError struct { @@ -64,9 +71,7 @@ func (c chainRetrievalError) Error() string { // Archive generates a WorkloadConfig for a specified directory and produces a // tar archive of a container image's rootfs with the expected contents. -// The input directory will have a ".krun_config.json" file added to it while -// this function is running, but it will be removed on completion. -func Archive(path string, ociConfig *v1.Image, options ArchiveOptions) (io.ReadCloser, WorkloadConfig, error) { +func Archive(rootfsPath string, ociConfig *v1.Image, options ArchiveOptions) (io.ReadCloser, WorkloadConfig, error) { const ( teeDefaultCPUs = 2 teeDefaultMemory = 512 @@ -74,7 +79,7 @@ func Archive(path string, ociConfig *v1.Image, options ArchiveOptions) (io.ReadC teeDefaultTeeType = SNP ) - if path == "" { + if rootfsPath == "" { return nil, WorkloadConfig{}, fmt.Errorf("required path not specified") } logger := options.Logger @@ -97,7 +102,7 @@ func Archive(path string, ociConfig *v1.Image, options ArchiveOptions) (io.ReadC filesystem := teeDefaultFilesystem workloadID := options.WorkloadID if workloadID == "" { - digestInput := path + filesystem + time.Now().String() + digestInput := rootfsPath + filesystem + time.Now().String() workloadID = digest.Canonical.FromString(digestInput).Encoded() } workloadConfig := WorkloadConfig{ @@ -107,6 +112,9 @@ func Archive(path string, ociConfig *v1.Image, options ArchiveOptions) (io.ReadC Memory: memory, AttestationURL: options.AttestationURL, } + if options.TempDir == "" { + options.TempDir = tmpdir.GetTempDir() + } // Do things which are specific to the type of TEE we're building for. var chainBytes []byte @@ -165,12 +173,115 @@ func Archive(path string, ociConfig *v1.Image, options ArchiveOptions) (io.ReadC workloadConfig.TeeData = string(encodedTeeData) } + // We're going to want to add some content to the rootfs, so set up an + // overlay that uses it as a lower layer so that we can write to it. + st, err := system.Stat(rootfsPath) + if err != nil { + return nil, WorkloadConfig{}, fmt.Errorf("reading information about the container root filesystem: %w", err) + } + // Create a temporary directory to hold all of this. Use tmpdir.GetTempDir() + // instead of the passed-in location, which a crafty caller might have put in an + // overlay filesystem in storage because there tends to be more room there than + // in, say, /var/tmp, and the plaintext disk image, which we put in the passed-in + // location, can get quite large. + rootfsParentDir, err := os.MkdirTemp(tmpdir.GetTempDir(), "buildah-rootfs") + if err != nil { + return nil, WorkloadConfig{}, fmt.Errorf("setting up parent for container root filesystem: %w", err) + } + defer func() { + if err := os.RemoveAll(rootfsParentDir); err != nil { + logger.Warnf("cleaning up parent for container root filesystem: %v", err) + } + }() + // Create a mountpoint for the new overlay, which we'll use as the rootfs. + rootfsDir := filepath.Join(rootfsParentDir, "rootfs") + if err := idtools.MkdirAndChown(rootfsDir, fs.FileMode(st.Mode()), idtools.IDPair{UID: int(st.UID()), GID: int(st.GID())}); err != nil { + return nil, WorkloadConfig{}, fmt.Errorf("creating mount target for container root filesystem: %w", err) + } + defer func() { + if err := os.Remove(rootfsDir); err != nil { + logger.Warnf("removing mount target for container root filesystem: %v", err) + } + }() + // Create a directory to hold all of the overlay package's working state. + tempDir := filepath.Join(rootfsParentDir, "tmp") + if err = os.Mkdir(tempDir, 0o700); err != nil { + return nil, WorkloadConfig{}, err + } + // Create some working state in there. + overlayTempDir, err := overlay.TempDir(tempDir, int(st.UID()), int(st.GID())) + if err != nil { + return nil, WorkloadConfig{}, fmt.Errorf("setting up mount of container root filesystem: %w", err) + } + defer func() { + if err := overlay.RemoveTemp(overlayTempDir); err != nil { + logger.Warnf("cleaning up mount of container root filesystem: %v", err) + } + }() + // Create a mount point using that working state. + rootfsMount, err := overlay.Mount(overlayTempDir, rootfsPath, rootfsDir, 0, 0, options.GraphOptions) + if err != nil { + return nil, WorkloadConfig{}, fmt.Errorf("setting up support for overlay of container root filesystem: %w", err) + } + defer func() { + if err := overlay.Unmount(overlayTempDir); err != nil { + logger.Warnf("unmounting support for overlay of container root filesystem: %v", err) + } + }() + // Follow through on the overlay or bind mount, whatever the overlay package decided + // to leave to us to do. + rootfsMountOptions := strings.Join(rootfsMount.Options, ",") + logrus.Debugf("mounting %q to %q as %q with options %v", rootfsMount.Source, rootfsMount.Destination, rootfsMount.Type, rootfsMountOptions) + if err := mount.Mount(rootfsMount.Source, rootfsMount.Destination, rootfsMount.Type, rootfsMountOptions); err != nil { + return nil, WorkloadConfig{}, fmt.Errorf("mounting overlay of container root filesystem: %w", err) + } + defer func() { + logrus.Debugf("unmounting %q", rootfsMount.Destination) + if err := mount.Unmount(rootfsMount.Destination); err != nil { + logger.Warnf("unmounting overlay of container root filesystem: %v", err) + } + }() + // Pretend that we didn't have to do any of the preceding. + rootfsPath = rootfsDir + + // Write extra content to the rootfs, creating intermediate directories if necessary. + for location, content := range options.ExtraImageContent { + err := func() error { + if err := idtools.MkdirAllAndChownNew(filepath.Dir(filepath.Join(rootfsPath, location)), 0o755, idtools.IDPair{UID: int(st.UID()), GID: int(st.GID())}); err != nil { + return fmt.Errorf("ensuring %q is present in container root filesystem: %w", filepath.Dir(location), err) + } + output, err := os.OpenFile(filepath.Join(rootfsPath, location), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o644) + if err != nil { + return fmt.Errorf("preparing to write %q to container root filesystem: %w", location, err) + } + defer output.Close() + input, err := os.Open(content) + if err != nil { + return err + } + defer input.Close() + if _, err := io.Copy(output, input); err != nil { + return fmt.Errorf("copying contents of %q to %q in container root filesystem: %w", content, location, err) + } + if err := output.Chown(int(st.UID()), int(st.GID())); err != nil { + return fmt.Errorf("setting owner of %q in the container root filesystem: %w", location, err) + } + if err := output.Chmod(0o644); err != nil { + return fmt.Errorf("setting permissions on %q in the container root filesystem: %w", location, err) + } + return nil + }() + if err != nil { + return nil, WorkloadConfig{}, err + } + } + // Write part of the config blob where the krun init process will be // looking for it. The oci2cw tool used `buildah inspect` output, but // init is just looking for fields that have the right names in any // object, and the image's config will have that, so let's try encoding // it directly. - krunConfigPath := filepath.Join(path, ".krun_config.json") + krunConfigPath := filepath.Join(rootfsPath, ".krun_config.json") krunConfigBytes, err := json.Marshal(ociConfig) if err != nil { return nil, WorkloadConfig{}, fmt.Errorf("creating .krun_config from image configuration: %w", err) @@ -178,11 +289,6 @@ func Archive(path string, ociConfig *v1.Image, options ArchiveOptions) (io.ReadC if err := ioutils.AtomicWriteFile(krunConfigPath, krunConfigBytes, 0o600); err != nil { return nil, WorkloadConfig{}, fmt.Errorf("saving krun config: %w", err) } - defer func() { - if err := os.Remove(krunConfigPath); err != nil { - logger.Warnf("removing krun configuration file: %v", err) - } - }() // Encode the workload config, in case it fails for any reason. cleanedUpWorkloadConfig := workloadConfig @@ -213,7 +319,7 @@ func Archive(path string, ociConfig *v1.Image, options ArchiveOptions) (io.ReadC imageSize := slop(options.ImageSize, options.Slop) if imageSize == 0 { var sourceSize int64 - if err := filepath.WalkDir(path, func(path string, d fs.DirEntry, err error) error { + if err := filepath.WalkDir(rootfsPath, func(path string, d fs.DirEntry, err error) error { if err != nil && !errors.Is(err, os.ErrNotExist) && !errors.Is(err, os.ErrPermission) { return err } @@ -261,7 +367,7 @@ func Archive(path string, ociConfig *v1.Image, options ArchiveOptions) (io.ReadC } // Format the disk image with the filesystem contents. - if _, stderr, err := MakeFS(path, plain.Name(), filesystem); err != nil { + if _, stderr, err := MakeFS(rootfsPath, plain.Name(), filesystem); err != nil { if strings.TrimSpace(stderr) != "" { return nil, WorkloadConfig{}, fmt.Errorf("%s: %w", strings.TrimSpace(stderr), err) } @@ -381,8 +487,8 @@ func Archive(path string, ociConfig *v1.Image, options ArchiveOptions) (io.ReadC tmpHeader.Name = "tmp/" tmpHeader.Typeflag = tar.TypeDir tmpHeader.Mode = 0o1777 - tmpHeader.Uname, workloadConfigHeader.Gname = "", "" - tmpHeader.Uid, workloadConfigHeader.Gid = 0, 0 + tmpHeader.Uname, tmpHeader.Gname = "", "" + tmpHeader.Uid, tmpHeader.Gid = 0, 0 tmpHeader.Size = 0 if err = tw.WriteHeader(tmpHeader); err != nil { logrus.Errorf("writing header for %q: %v", tmpHeader.Name, err) diff --git a/vendor/github.com/containers/buildah/internal/mkcw/embed/entrypoint.gz b/vendor/github.com/containers/buildah/internal/mkcw/embed/entrypoint.gz deleted file mode 100644 index 0e0035189..000000000 Binary files a/vendor/github.com/containers/buildah/internal/mkcw/embed/entrypoint.gz and /dev/null differ diff --git a/vendor/github.com/containers/buildah/internal/mkcw/embed/entrypoint_amd64.gz b/vendor/github.com/containers/buildah/internal/mkcw/embed/entrypoint_amd64.gz new file mode 100644 index 000000000..947bed9b6 Binary files /dev/null and b/vendor/github.com/containers/buildah/internal/mkcw/embed/entrypoint_amd64.gz differ diff --git a/vendor/github.com/containers/buildah/internal/mkcw/entrypoint.go b/vendor/github.com/containers/buildah/internal/mkcw/entrypoint.go index d72032168..c69a4a340 100644 --- a/vendor/github.com/containers/buildah/internal/mkcw/entrypoint.go +++ b/vendor/github.com/containers/buildah/internal/mkcw/entrypoint.go @@ -2,5 +2,5 @@ package mkcw import _ "embed" -//go:embed "embed/entrypoint.gz" +//go:embed "embed/entrypoint_amd64.gz" var entrypointCompressedBytes []byte diff --git a/vendor/github.com/containers/buildah/internal/sbom/merge.go b/vendor/github.com/containers/buildah/internal/sbom/merge.go new file mode 100644 index 000000000..dddea84c5 --- /dev/null +++ b/vendor/github.com/containers/buildah/internal/sbom/merge.go @@ -0,0 +1,296 @@ +package sbom + +import ( + "encoding/json" + "fmt" + "io" + "os" + "sort" + + "github.com/containers/buildah/define" +) + +// getComponentNameVersionPurl extracts the "name", "version", and "purl" +// fields of a CycloneDX component record +func getComponentNameVersionPurl(anyComponent any) (string, string, error) { + if component, ok := anyComponent.(map[string]any); ok { + // read the "name" field + anyName, ok := component["name"] + if !ok { + return "", "", fmt.Errorf("no name in component %v", anyComponent) + } + name, ok := anyName.(string) + if !ok { + return "", "", fmt.Errorf("name %v is not a string", anyName) + } + // read the optional "version" field + var version string + anyVersion, ok := component["version"] + if ok { + if version, ok = anyVersion.(string); !ok { + return "", "", fmt.Errorf("version %v is not a string", anyVersion) + } + } + // combine them + nameWithVersion := name + if version != "" { + nameWithVersion += ("@" + version) + } + // read the optional "purl" field + var purl string + anyPurl, ok := component["purl"] + if ok { + if purl, ok = anyPurl.(string); !ok { + return "", "", fmt.Errorf("purl %v is not a string", anyPurl) + } + } + return nameWithVersion, purl, nil + } + return "", "", fmt.Errorf("component %v is not an object", anyComponent) +} + +// getPackageNameVersionInfoPurl extracts the "name", "versionInfo", and "purl" +// fields of an SPDX package record +func getPackageNameVersionInfoPurl(anyPackage any) (string, string, error) { + if pkg, ok := anyPackage.(map[string]any); ok { + // read the "name" field + anyName, ok := pkg["name"] + if !ok { + return "", "", fmt.Errorf("no name in package %v", anyPackage) + } + name, ok := anyName.(string) + if !ok { + return "", "", fmt.Errorf("name %v is not a string", anyName) + } + // read the optional "versionInfo" field + var versionInfo string + if anyVersionInfo, ok := pkg["versionInfo"]; ok { + if versionInfo, ok = anyVersionInfo.(string); !ok { + return "", "", fmt.Errorf("versionInfo %v is not a string", anyVersionInfo) + } + } + // combine them + nameWithVersionInfo := name + if versionInfo != "" { + nameWithVersionInfo += ("@" + versionInfo) + } + // now look for optional externalRefs[].purl if "referenceCategory" + // is "PACKAGE-MANAGER" and "referenceType" is "purl" + var purl string + if anyExternalRefs, ok := pkg["externalRefs"]; ok { + if externalRefs, ok := anyExternalRefs.([]any); ok { + for _, anyExternalRef := range externalRefs { + if externalRef, ok := anyExternalRef.(map[string]any); ok { + anyReferenceCategory, ok := externalRef["referenceCategory"] + if !ok { + continue + } + if referenceCategory, ok := anyReferenceCategory.(string); !ok || referenceCategory != "PACKAGE-MANAGER" { + continue + } + anyReferenceType, ok := externalRef["referenceType"] + if !ok { + continue + } + if referenceType, ok := anyReferenceType.(string); !ok || referenceType != "purl" { + continue + } + if anyReferenceLocator, ok := externalRef["referenceLocator"]; ok { + if purl, ok = anyReferenceLocator.(string); !ok { + return "", "", fmt.Errorf("purl %v is not a string", anyReferenceLocator) + } + } + } + } + } + } + return nameWithVersionInfo, purl, nil + } + return "", "", fmt.Errorf("package %v is not an object", anyPackage) +} + +// getLicenseID extracts the "licenseId" field of an SPDX license record +func getLicenseID(anyLicense any) (string, error) { + var licenseID string + if lic, ok := anyLicense.(map[string]any); ok { + anyID, ok := lic["licenseId"] + if !ok { + return "", fmt.Errorf("no licenseId in license %v", anyID) + } + id, ok := anyID.(string) + if !ok { + return "", fmt.Errorf("licenseId %v is not a string", anyID) + } + licenseID = id + } + return licenseID, nil +} + +// mergeSlicesWithoutDuplicates merges a named slice in "base" with items from +// the same slice in "merge", so long as getKey() returns values for them that +// it didn't for items from the "base" slice +func mergeSlicesWithoutDuplicates(base, merge map[string]any, sliceField string, getKey func(record any) (string, error)) error { + uniqueKeys := make(map[string]struct{}) + // go through all of the values in the base slice, grab their + // keys, and note them + baseRecords := base[sliceField] + baseRecordsSlice, ok := baseRecords.([]any) + if !ok { + baseRecordsSlice = []any{} + } + for _, anyRecord := range baseRecordsSlice { + key, err := getKey(anyRecord) + if err != nil { + return err + } + uniqueKeys[key] = struct{}{} + } + // go through all of the record values in the merge doc, grab their + // associated keys, and append them to the base records slice if we + // haven't seen the key yet + mergeRecords := merge[sliceField] + mergeRecordsSlice, ok := mergeRecords.([]any) + if !ok { + mergeRecordsSlice = []any{} + } + for _, anyRecord := range mergeRecordsSlice { + key, err := getKey(anyRecord) + if err != nil { + return err + } + if _, present := uniqueKeys[key]; !present { + baseRecordsSlice = append(baseRecordsSlice, anyRecord) + uniqueKeys[key] = struct{}{} + } + } + if len(baseRecordsSlice) > 0 { + base[sliceField] = baseRecordsSlice + } + return nil +} + +// decodeJSON decodes a file into a map +func decodeJSON(inputFile string, document *map[string]any) error { + src, err := os.Open(inputFile) + if err != nil { + return err + } + defer src.Close() + if err = json.NewDecoder(src).Decode(document); err != nil { + return fmt.Errorf("decoding JSON document from %q: %w", inputFile, err) + } + return nil +} + +// encodeJSON encodes a map and saves it to a file +func encodeJSON(outputFile string, document any) error { + dst, err := os.Create(outputFile) + if err != nil { + return err + } + defer dst.Close() + if err = json.NewEncoder(dst).Encode(document); err != nil { + return fmt.Errorf("writing JSON document to %q: %w", outputFile, err) + } + return nil +} + +// Merge adds the contents of inputSBOM to inputOutputSBOM using one of a +// handful of named strategies. +func Merge(mergeStrategy define.SBOMMergeStrategy, inputOutputSBOM, inputSBOM, outputPURL string) (err error) { + type purlImageContents struct { + Dependencies []string `json:"dependencies,omitempty"` + } + type purlDocument struct { + ImageContents purlImageContents `json:"image_contents,omitempty"` + } + purls := []string{} + seenPurls := make(map[string]struct{}) + + switch mergeStrategy { + case define.SBOMMergeStrategyCycloneDXByComponentNameAndVersion: + var base, merge map[string]any + if err = decodeJSON(inputOutputSBOM, &base); err != nil { + return fmt.Errorf("reading first SBOM to be merged from %q: %w", inputOutputSBOM, err) + } + if err = decodeJSON(inputSBOM, &merge); err != nil { + return fmt.Errorf("reading second SBOM to be merged from %q: %w", inputSBOM, err) + } + + // merge the "components" lists based on unique combinations of + // "name" and "version" fields, and save unique package URL + // values + err = mergeSlicesWithoutDuplicates(base, merge, "components", func(anyPackage any) (string, error) { + nameWithVersion, purl, err := getComponentNameVersionPurl(anyPackage) + if purl != "" { + if _, seen := seenPurls[purl]; !seen { + purls = append(purls, purl) + seenPurls[purl] = struct{}{} + } + } + return nameWithVersion, err + }) + if err != nil { + return fmt.Errorf("merging the %q field of CycloneDX SBOMs: %w", "components", err) + } + + // save the updated doc + err = encodeJSON(inputOutputSBOM, base) + + case define.SBOMMergeStrategySPDXByPackageNameAndVersionInfo: + var base, merge map[string]any + if err = decodeJSON(inputOutputSBOM, &base); err != nil { + return fmt.Errorf("reading first SBOM to be merged from %q: %w", inputOutputSBOM, err) + } + if err = decodeJSON(inputSBOM, &merge); err != nil { + return fmt.Errorf("reading second SBOM to be merged from %q: %w", inputSBOM, err) + } + + // merge the "packages" lists based on unique combinations of + // "name" and "versionInfo" fields, and save unique package URL + // values + err = mergeSlicesWithoutDuplicates(base, merge, "packages", func(anyPackage any) (string, error) { + nameWithVersionInfo, purl, err := getPackageNameVersionInfoPurl(anyPackage) + if purl != "" { + if _, seen := seenPurls[purl]; !seen { + purls = append(purls, purl) + seenPurls[purl] = struct{}{} + } + } + return nameWithVersionInfo, err + }) + if err != nil { + return fmt.Errorf("merging the %q field of SPDX SBOMs: %w", "packages", err) + } + + // merge the "hasExtractedLicensingInfos" lists based on unique + // "licenseId" values + err = mergeSlicesWithoutDuplicates(base, merge, "hasExtractedLicensingInfos", getLicenseID) + if err != nil { + return fmt.Errorf("merging the %q field of SPDX SBOMs: %w", "hasExtractedLicensingInfos", err) + } + + // save the updated doc + err = encodeJSON(inputOutputSBOM, base) + + case define.SBOMMergeStrategyCat: + dst, err := os.OpenFile(inputOutputSBOM, os.O_RDWR|os.O_APPEND, 0o644) + if err != nil { + return err + } + defer dst.Close() + src, err := os.Open(inputSBOM) + if err != nil { + return err + } + defer src.Close() + if _, err = io.Copy(dst, src); err != nil { + return err + } + } + if err == nil { + sort.Strings(purls) + err = encodeJSON(outputPURL, &purlDocument{purlImageContents{Dependencies: purls}}) + } + return err +} diff --git a/vendor/github.com/containers/buildah/internal/sbom/presets.go b/vendor/github.com/containers/buildah/internal/sbom/presets.go new file mode 100644 index 000000000..8f104139a --- /dev/null +++ b/vendor/github.com/containers/buildah/internal/sbom/presets.go @@ -0,0 +1,65 @@ +package sbom + +import "github.com/containers/buildah/define" + +// Preset returns a predefined SBOMScanOptions structure that has the passed-in +// name as one of its "Type" values. +func Preset(name string) (preset *define.SBOMScanOptions, err error) { + // If you change these, make sure you update references in + // buildah-commit.1.md and buildah-build.1.md to match! + presets := []define.SBOMScanOptions{ + { + Type: []string{"", "syft", "syft-cyclonedx"}, + Image: "ghcr.io/anchore/syft", + Commands: []string{ + "/syft scan -q dir:{ROOTFS} --output cyclonedx-json={OUTPUT}", + "/syft scan -q dir:{CONTEXT} --output cyclonedx-json={OUTPUT}", + }, + // ImageSBOMOutput: "/root/buildinfo/content_manifests/sbom-cyclonedx.json", + // ImagePURLOutput: "/root/buildinfo/content_manifests/sbom-purl.json", + MergeStrategy: define.SBOMMergeStrategyCycloneDXByComponentNameAndVersion, + }, + { + Type: []string{"syft-spdx"}, + Image: "ghcr.io/anchore/syft", + Commands: []string{ + "/syft scan -q dir:{ROOTFS} --output spdx-json={OUTPUT}", + "/syft scan -q dir:{CONTEXT} --output spdx-json={OUTPUT}", + }, + // ImageSBOMOutput: "/root/buildinfo/content_manifests/sbom-spdx.json", + // ImagePURLOutput: "/root/buildinfo/content_manifests/sbom-purl.json", + MergeStrategy: define.SBOMMergeStrategySPDXByPackageNameAndVersionInfo, + }, + + { + Type: []string{"trivy", "trivy-cyclonedx"}, + Image: "ghcr.io/aquasecurity/trivy", + Commands: []string{ + "trivy filesystem -q {ROOTFS} --format cyclonedx --output {OUTPUT}", + "trivy filesystem -q {CONTEXT} --format cyclonedx --output {OUTPUT}", + }, + // ImageSBOMOutput: "/root/buildinfo/content_manifests/sbom-cyclonedx.json", + // ImagePURLOutput: "/root/buildinfo/content_manifests/sbom-purl.json", + MergeStrategy: define.SBOMMergeStrategyCycloneDXByComponentNameAndVersion, + }, + { + Type: []string{"trivy-spdx"}, + Image: "ghcr.io/aquasecurity/trivy", + Commands: []string{ + "trivy filesystem -q {ROOTFS} --format spdx-json --output {OUTPUT}", + "trivy filesystem -q {CONTEXT} --format spdx-json --output {OUTPUT}", + }, + // ImageSBOMOutput: "/root/buildinfo/content_manifests/sbom-spdx.json", + // ImagePURLOutput: "/root/buildinfo/content_manifests/sbom-purl.json", + MergeStrategy: define.SBOMMergeStrategySPDXByPackageNameAndVersionInfo, + }, + } + for _, preset := range presets { + for _, presetName := range preset.Type { + if presetName == name { + return &preset, nil + } + } + } + return nil, nil +} diff --git a/vendor/github.com/containers/buildah/internal/util/util.go b/vendor/github.com/containers/buildah/internal/util/util.go index 01f4b1051..dbcaa2375 100644 --- a/vendor/github.com/containers/buildah/internal/util/util.go +++ b/vendor/github.com/containers/buildah/internal/util/util.go @@ -97,3 +97,8 @@ func ExportFromReader(input io.Reader, opts define.BuildOutputOption) error { } return nil } + +func SetHas(m map[string]struct{}, k string) bool { + _, ok := m[k] + return ok +} diff --git a/vendor/github.com/containers/buildah/internal/volumes/volumes.go b/vendor/github.com/containers/buildah/internal/volumes/volumes.go index a79b8df8c..515f846f3 100644 --- a/vendor/github.com/containers/buildah/internal/volumes/volumes.go +++ b/vendor/github.com/containers/buildah/internal/volumes/volumes.go @@ -11,6 +11,7 @@ import ( "errors" + "github.com/containers/buildah/copier" "github.com/containers/buildah/define" "github.com/containers/buildah/internal" internalParse "github.com/containers/buildah/internal/parse" @@ -54,6 +55,7 @@ func CacheParent() string { return filepath.Join(tmpdir.GetTempDir(), buildahCacheDir+"-"+strconv.Itoa(unshare.GetRootlessUID())) } +// FIXME: this code needs to be merged with pkg/parse/parse.go ValidateVolumeOpts // GetBindMount parses a single bind mount entry from the --mount flag. // Returns specifiedMount and a string which contains name of image that we mounted otherwise its empty. // Caller is expected to perform unmount of any mounted images @@ -69,8 +71,8 @@ func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, st fromImage := "" for _, val := range args { - kv := strings.SplitN(val, "=", 2) - switch kv[0] { + argName, argValue, hasArgValue := strings.Cut(val, "=") + switch argName { case "type": // This is already processed continue @@ -80,7 +82,7 @@ func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, st case "ro", "nosuid", "nodev", "noexec": // TODO: detect duplication of these options. // (Is this necessary?) - newMount.Options = append(newMount.Options, kv[0]) + newMount.Options = append(newMount.Options, argName) mountReadability = true case "rw", "readwrite": newMount.Options = append(newMount.Options, "rw") @@ -89,28 +91,31 @@ func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, st // Alias for "ro" newMount.Options = append(newMount.Options, "ro") mountReadability = true - case "shared", "rshared", "private", "rprivate", "slave", "rslave", "Z", "z", "U": - newMount.Options = append(newMount.Options, kv[0]) + case "shared", "rshared", "private", "rprivate", "slave", "rslave", "Z", "z", "U", "no-dereference": + if hasArgValue { + return newMount, "", fmt.Errorf("%v: %w", val, errBadOptionArg) + } + newMount.Options = append(newMount.Options, argName) case "from": - if len(kv) == 1 { - return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + if !hasArgValue { + return newMount, "", fmt.Errorf("%v: %w", argName, errBadOptionArg) } - fromImage = kv[1] + fromImage = argValue case "bind-propagation": - if len(kv) == 1 { - return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + if !hasArgValue { + return newMount, "", fmt.Errorf("%v: %w", argName, errBadOptionArg) } - newMount.Options = append(newMount.Options, kv[1]) + newMount.Options = append(newMount.Options, argValue) case "src", "source": - if len(kv) == 1 { - return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + if !hasArgValue { + return newMount, "", fmt.Errorf("%v: %w", argName, errBadOptionArg) } - newMount.Source = kv[1] + newMount.Source = argValue case "target", "dst", "destination": - if len(kv) == 1 { - return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + if !hasArgValue { + return newMount, "", fmt.Errorf("%v: %w", argName, errBadOptionArg) } - targetPath := kv[1] + targetPath := argValue if !path.IsAbs(targetPath) { targetPath = filepath.Join(workDir, targetPath) } @@ -124,23 +129,20 @@ func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, st return newMount, "", fmt.Errorf("cannot pass 'relabel' option more than once: %w", errBadOptionArg) } setRelabel = true - if len(kv) != 2 { - return newMount, "", fmt.Errorf("%s mount option must be 'private' or 'shared': %w", kv[0], errBadMntOption) - } - switch kv[1] { + switch argValue { case "private": newMount.Options = append(newMount.Options, "Z") case "shared": newMount.Options = append(newMount.Options, "z") default: - return newMount, "", fmt.Errorf("%s mount option must be 'private' or 'shared': %w", kv[0], errBadMntOption) + return newMount, "", fmt.Errorf("%s mount option must be 'private' or 'shared': %w", argName, errBadMntOption) } case "consistency": // Option for OS X only, has no meaning on other platforms // and can thus be safely ignored. // See also the handling of the equivalent "delegated" and "cached" in ValidateVolumeOpts default: - return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadMntOption) + return newMount, "", fmt.Errorf("%v: %w", argName, errBadMntOption) } } @@ -188,7 +190,11 @@ func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, st // buildkit parity: support absolute path for sources from current build context if contextDir != "" { // path should be /contextDir/specified path - newMount.Source = filepath.Join(contextDir, filepath.Clean(string(filepath.Separator)+newMount.Source)) + evaluated, err := copier.Eval(contextDir, newMount.Source, copier.EvalOptions{}) + if err != nil { + return newMount, "", err + } + newMount.Source = evaluated } else { // looks like its coming from `build run --mount=type=bind` allow using absolute path // error out if no source is set @@ -234,25 +240,25 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a } // if id is set a new subdirectory with `id` will be created under /host-temp/buildah-build-cache/id id := "" - //buidkit parity: cache directory defaults to 755 + // buildkit parity: cache directory defaults to 755 mode = 0o755 - //buidkit parity: cache directory defaults to uid 0 if not specified + // buildkit parity: cache directory defaults to uid 0 if not specified uid := 0 - //buidkit parity: cache directory defaults to gid 0 if not specified + // buildkit parity: cache directory defaults to gid 0 if not specified gid := 0 // sharing mode sharing := "shared" for _, val := range args { - kv := strings.SplitN(val, "=", 2) - switch kv[0] { + argName, argValue, hasArgValue := strings.Cut(val, "=") + switch argName { case "type": // This is already processed continue case "nosuid", "nodev", "noexec": // TODO: detect duplication of these options. // (Is this necessary?) - newMount.Options = append(newMount.Options, kv[0]) + newMount.Options = append(newMount.Options, argName) case "rw", "readwrite": newMount.Options = append(newMount.Options, "rw") case "readonly", "ro": @@ -260,33 +266,33 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a newMount.Options = append(newMount.Options, "ro") setReadOnly = true case "Z", "z": - newMount.Options = append(newMount.Options, kv[0]) + newMount.Options = append(newMount.Options, argName) foundSElinuxLabel = true case "shared", "rshared", "private", "rprivate", "slave", "rslave", "U": - newMount.Options = append(newMount.Options, kv[0]) + newMount.Options = append(newMount.Options, argName) setShared = true case "sharing": - sharing = kv[1] + sharing = argValue case "bind-propagation": - if len(kv) == 1 { - return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + if !hasArgValue { + return newMount, nil, fmt.Errorf("%v: %w", argName, errBadOptionArg) } - newMount.Options = append(newMount.Options, kv[1]) + newMount.Options = append(newMount.Options, argValue) case "id": - if len(kv) == 1 { - return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + if !hasArgValue { + return newMount, nil, fmt.Errorf("%v: %w", argName, errBadOptionArg) } - id = kv[1] + id = argValue case "from": - if len(kv) == 1 { - return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + if !hasArgValue { + return newMount, nil, fmt.Errorf("%v: %w", argName, errBadOptionArg) } - fromStage = kv[1] + fromStage = argValue case "target", "dst", "destination": - if len(kv) == 1 { - return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + if !hasArgValue { + return newMount, nil, fmt.Errorf("%v: %w", argName, errBadOptionArg) } - targetPath := kv[1] + targetPath := argValue if !path.IsAbs(targetPath) { targetPath = filepath.Join(workDir, targetPath) } @@ -296,36 +302,36 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a newMount.Destination = targetPath setDest = true case "src", "source": - if len(kv) == 1 { - return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + if !hasArgValue { + return newMount, nil, fmt.Errorf("%v: %w", argName, errBadOptionArg) } - newMount.Source = kv[1] + newMount.Source = argValue case "mode": - if len(kv) == 1 { - return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + if !hasArgValue { + return newMount, nil, fmt.Errorf("%v: %w", argName, errBadOptionArg) } - mode, err = strconv.ParseUint(kv[1], 8, 32) + mode, err = strconv.ParseUint(argValue, 8, 32) if err != nil { return newMount, nil, fmt.Errorf("unable to parse cache mode: %w", err) } case "uid": - if len(kv) == 1 { - return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + if !hasArgValue { + return newMount, nil, fmt.Errorf("%v: %w", argName, errBadOptionArg) } - uid, err = strconv.Atoi(kv[1]) + uid, err = strconv.Atoi(argValue) if err != nil { return newMount, nil, fmt.Errorf("unable to parse cache uid: %w", err) } case "gid": - if len(kv) == 1 { - return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + if !hasArgValue { + return newMount, nil, fmt.Errorf("%v: %w", argName, errBadOptionArg) } - gid, err = strconv.Atoi(kv[1]) + gid, err = strconv.Atoi(argValue) if err != nil { return newMount, nil, fmt.Errorf("unable to parse cache gid: %w", err) } default: - return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadMntOption) + return newMount, nil, fmt.Errorf("%v: %w", argName, errBadMntOption) } } @@ -383,7 +389,7 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a UID: uid, GID: gid, } - //buildkit parity: change uid and gid if specified otheriwise keep `0` + // buildkit parity: change uid and gid if specified otheriwise keep `0` err = idtools.MkdirAllAndChownNew(newMount.Source, os.FileMode(mode), idPair) if err != nil { return newMount, nil, fmt.Errorf("unable to change uid,gid of cache directory: %w", err) @@ -590,42 +596,42 @@ func GetTmpfsMount(args []string) (specs.Mount, error) { setDest := false for _, val := range args { - kv := strings.SplitN(val, "=", 2) - switch kv[0] { + argName, argValue, hasArgValue := strings.Cut(val, "=") + switch argName { case "type": // This is already processed continue case "ro", "nosuid", "nodev", "noexec": - newMount.Options = append(newMount.Options, kv[0]) + newMount.Options = append(newMount.Options, argName) case "readonly": // Alias for "ro" newMount.Options = append(newMount.Options, "ro") case "tmpcopyup": - //the path that is shadowed by the tmpfs mount is recursively copied up to the tmpfs itself. - newMount.Options = append(newMount.Options, kv[0]) + // the path that is shadowed by the tmpfs mount is recursively copied up to the tmpfs itself. + newMount.Options = append(newMount.Options, argName) case "tmpfs-mode": - if len(kv) == 1 { - return newMount, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + if !hasArgValue { + return newMount, fmt.Errorf("%v: %w", argName, errBadOptionArg) } - newMount.Options = append(newMount.Options, fmt.Sprintf("mode=%s", kv[1])) + newMount.Options = append(newMount.Options, fmt.Sprintf("mode=%s", argValue)) case "tmpfs-size": - if len(kv) == 1 { - return newMount, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + if !hasArgValue { + return newMount, fmt.Errorf("%v: %w", argName, errBadOptionArg) } - newMount.Options = append(newMount.Options, fmt.Sprintf("size=%s", kv[1])) + newMount.Options = append(newMount.Options, fmt.Sprintf("size=%s", argValue)) case "src", "source": return newMount, errors.New("source is not supported with tmpfs mounts") case "target", "dst", "destination": - if len(kv) == 1 { - return newMount, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + if !hasArgValue { + return newMount, fmt.Errorf("%v: %w", argName, errBadOptionArg) } - if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil { + if err := parse.ValidateVolumeCtrDir(argValue); err != nil { return newMount, err } - newMount.Destination = kv[1] + newMount.Destination = argValue setDest = true default: - return newMount, fmt.Errorf("%v: %w", kv[0], errBadMntOption) + return newMount, fmt.Errorf("%v: %w", argName, errBadMntOption) } } diff --git a/vendor/github.com/containers/buildah/pkg/jail/jail.go b/vendor/github.com/containers/buildah/pkg/jail/jail.go index fdaca5af2..07651a598 100644 --- a/vendor/github.com/containers/buildah/pkg/jail/jail.go +++ b/vendor/github.com/containers/buildah/pkg/jail/jail.go @@ -4,10 +4,14 @@ package jail import ( + "fmt" + "strconv" "strings" + "sync" "syscall" "unsafe" + "github.com/containers/buildah/pkg/util" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -28,6 +32,11 @@ type config struct { params map[string]interface{} } +var ( + needVnetJailOnce sync.Once + needVnetJail bool +) + func NewConfig() *config { return &config{ params: make(map[string]interface{}), @@ -178,3 +187,60 @@ func (j *jail) Set(jconf *config) error { _, err := jailSet(jconf, JAIL_UPDATE) return err } + +func parseVersion(version string) (string, int, int, int, error) { + // Expected formats: + // .-RELEASE optionally followed by -p + // -STABLE + // -CURRENT + parts := strings.Split(string(version), "-") + if len(parts) < 2 || len(parts) > 3 { + return "", -1, -1, -1, fmt.Errorf("unexpected OS version: %s", version) + } + ver := strings.Split(parts[0], ".") + + if len(ver) != 2 { + return "", -1, -1, -1, fmt.Errorf("unexpected OS version: %s", version) + } + major, err := strconv.Atoi(ver[0]) + if err != nil { + return "", -1, -1, -1, fmt.Errorf("unexpected OS version: %s", version) + } + minor, err := strconv.Atoi(ver[1]) + if err != nil { + return "", -1, -1, -1, fmt.Errorf("unexpected OS version: %s", version) + } + patchlevel := 0 + if len(parts) == 3 { + if parts[1] != "RELEASE" || !strings.HasPrefix(parts[2], "p") { + return "", -1, -1, -1, fmt.Errorf("unexpected OS version: %s", version) + } + patchlevel, err = strconv.Atoi(strings.TrimPrefix(parts[2], "p")) + if err != nil { + return "", -1, -1, -1, fmt.Errorf("unexpected OS version: %s", version) + } + } + return parts[1], major, minor, patchlevel, nil +} + +// Return true if its necessary to have a separate jail to own the vnet. For +// FreeBSD 13.3 and later, we don't need a separate vnet jail since it is +// possible to configure the network without either attaching to the container's +// jail or trusting the ifconfig and route utilities in the container. If for +// any reason, we fail to parse the OS version, we default to returning true. +func NeedVnetJail() bool { + needVnetJailOnce.Do(func() { + // FreeBSD 13.3 and later have support for 'ifconfig -j' and 'route -j' + needVnetJail = true + version, err := util.ReadKernelVersion() + if err != nil { + logrus.Errorf("failed to determine OS version: %v", err) + return + } + _, major, minor, _, err := parseVersion(version) + if major > 13 || (major == 13 && minor > 2) { + needVnetJail = false + } + }) + return needVnetJail +} diff --git a/vendor/github.com/containers/buildah/pkg/overlay/overlay.go b/vendor/github.com/containers/buildah/pkg/overlay/overlay.go index e416ecd78..666a0a01a 100644 --- a/vendor/github.com/containers/buildah/pkg/overlay/overlay.go +++ b/vendor/github.com/containers/buildah/pkg/overlay/overlay.go @@ -6,6 +6,7 @@ import ( "os/exec" "path/filepath" "strings" + "syscall" "errors" @@ -14,7 +15,6 @@ import ( "github.com/containers/storage/pkg/unshare" "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" ) // Options type holds various configuration options for overlay @@ -113,10 +113,10 @@ func MountReadOnly(contentDir, source, dest string, rootUID, rootGID int, graphO // findMountProgram finds if any mount program is specified in the graph options. func findMountProgram(graphOptions []string) string { - mountMap := map[string]bool{ - ".mount_program": true, - "overlay.mount_program": true, - "overlay2.mount_program": true, + mountMap := map[string]struct{}{ + ".mount_program": {}, + "overlay.mount_program": {}, + "overlay2.mount_program": {}, } for _, i := range graphOptions { @@ -126,7 +126,7 @@ func findMountProgram(graphOptions []string) string { } key := s[0] val := s[1] - if mountMap[key] { + if _, has := mountMap[key]; has { return val } } @@ -180,7 +180,7 @@ func Unmount(contentDir string) error { } // Ignore EINVAL as the specified merge dir is not a mount point - if err := unix.Unmount(mergeDir, 0); err != nil && !errors.Is(err, os.ErrNotExist) && err != unix.EINVAL { + if err := system.Unmount(mergeDir); err != nil && !errors.Is(err, os.ErrNotExist) && !errors.Is(err, syscall.EINVAL) { return fmt.Errorf("unmount overlay %s: %w", mergeDir, err) } return nil diff --git a/vendor/github.com/containers/buildah/pkg/overlay/overlay_freebsd.go b/vendor/github.com/containers/buildah/pkg/overlay/overlay_freebsd.go index e814a327c..b064ec578 100644 --- a/vendor/github.com/containers/buildah/pkg/overlay/overlay_freebsd.go +++ b/vendor/github.com/containers/buildah/pkg/overlay/overlay_freebsd.go @@ -18,6 +18,9 @@ import ( // But allows api to set custom workdir, upperdir and other overlay options // Following API is being used by podman at the moment func MountWithOptions(contentDir, source, dest string, opts *Options) (mount specs.Mount, Err error) { + if opts == nil { + opts = &Options{} + } if opts.ReadOnly { // Read-only overlay mounts can be simulated with nullfs mount.Source = source diff --git a/vendor/github.com/containers/buildah/pkg/overlay/overlay_linux.go b/vendor/github.com/containers/buildah/pkg/overlay/overlay_linux.go index 9bd72bc24..46d0c44aa 100644 --- a/vendor/github.com/containers/buildah/pkg/overlay/overlay_linux.go +++ b/vendor/github.com/containers/buildah/pkg/overlay/overlay_linux.go @@ -17,6 +17,9 @@ import ( // But allows api to set custom workdir, upperdir and other overlay options // Following API is being used by podman at the moment func MountWithOptions(contentDir, source, dest string, opts *Options) (mount specs.Mount, Err error) { + if opts == nil { + opts = &Options{} + } mergeDir := filepath.Join(contentDir, "merge") // Create overlay mount options for rw/ro. diff --git a/vendor/github.com/containers/buildah/pkg/overlay/overlay_unsupported.go b/vendor/github.com/containers/buildah/pkg/overlay/overlay_unsupported.go new file mode 100644 index 000000000..538db65e0 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/overlay/overlay_unsupported.go @@ -0,0 +1,20 @@ +//go:build !freebsd && !linux +// +build !freebsd,!linux + +package overlay + +import ( + "fmt" + "runtime" + + "github.com/opencontainers/runtime-spec/specs-go" +) + +// MountWithOptions creates a subdir of the contentDir based on the source directory +// from the source system. It then mounts up the source directory on to the +// generated mount point and returns the mount point to the caller. +// But allows api to set custom workdir, upperdir and other overlay options +// Following API is being used by podman at the moment +func MountWithOptions(contentDir, source, dest string, opts *Options) (mount specs.Mount, err error) { + return mount, fmt.Errorf("read/write overlay mounts not supported on %q", runtime.GOOS) +} diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse.go b/vendor/github.com/containers/buildah/pkg/parse/parse.go index d865f5044..3ec612c9b 100644 --- a/vendor/github.com/containers/buildah/pkg/parse/parse.go +++ b/vendor/github.com/containers/buildah/pkg/parse/parse.go @@ -18,6 +18,7 @@ import ( "github.com/containers/buildah/define" mkcwtypes "github.com/containers/buildah/internal/mkcw/types" internalParse "github.com/containers/buildah/internal/parse" + "github.com/containers/buildah/internal/sbom" "github.com/containers/buildah/internal/tmpdir" "github.com/containers/buildah/pkg/sshagent" "github.com/containers/common/pkg/auth" @@ -446,6 +447,60 @@ func SystemContextFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name strin return ctx, nil } +// PullPolicyFromOptions returns a PullPolicy that reflects the combination of +// the specified "pull" and undocumented "pull-always" and "pull-never" flags. +func PullPolicyFromOptions(c *cobra.Command) (define.PullPolicy, error) { + return PullPolicyFromFlagSet(c.Flags(), c.Flag) +} + +// PullPolicyFromFlagSet returns a PullPolicy that reflects the combination of +// the specified "pull" and undocumented "pull-always" and "pull-never" flags. +func PullPolicyFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name string) *pflag.Flag) (define.PullPolicy, error) { + pullFlagsCount := 0 + + if findFlagFunc("pull").Changed { + pullFlagsCount++ + } + if findFlagFunc("pull-always").Changed { + pullFlagsCount++ + } + if findFlagFunc("pull-never").Changed { + pullFlagsCount++ + } + + if pullFlagsCount > 1 { + return 0, errors.New("can only set one of 'pull' or 'pull-always' or 'pull-never'") + } + + // Allow for --pull, --pull=true, --pull=false, --pull=never, --pull=always + // --pull-always and --pull-never. The --pull-never and --pull-always options + // will not be documented. + pullPolicy := define.PullIfMissing + pullFlagValue := findFlagFunc("pull").Value.String() + if strings.EqualFold(pullFlagValue, "true") || strings.EqualFold(pullFlagValue, "ifnewer") { + pullPolicy = define.PullIfNewer + } + pullAlwaysFlagValue, err := flags.GetBool("pull-always") + if err != nil { + return 0, err + } + if pullAlwaysFlagValue || strings.EqualFold(pullFlagValue, "always") { + pullPolicy = define.PullAlways + } + pullNeverFlagValue, err := flags.GetBool("pull-never") + if err != nil { + return 0, err + } + if pullNeverFlagValue || + strings.EqualFold(pullFlagValue, "never") || + strings.EqualFold(pullFlagValue, "false") { + pullPolicy = define.PullNever + } + logrus.Debugf("Pull Policy for pull [%v]", pullPolicy) + + return pullPolicy, nil +} + func getAuthFile(authfile string) string { if authfile != "" { absAuthfile, err := filepath.Abs(authfile) @@ -709,6 +764,76 @@ func GetConfidentialWorkloadOptions(arg string) (define.ConfidentialWorkloadOpti return options, nil } +// SBOMScanOptions parses the build options from the cli +func SBOMScanOptions(c *cobra.Command) (*define.SBOMScanOptions, error) { + return SBOMScanOptionsFromFlagSet(c.Flags(), c.Flag) +} + +// SBOMScanOptionsFromFlagSet parses scan settings from the cli +func SBOMScanOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name string) *pflag.Flag) (*define.SBOMScanOptions, error) { + preset, err := flags.GetString("sbom") + if err != nil { + return nil, fmt.Errorf("invalid value for --sbom: %w", err) + } + + options, err := sbom.Preset(preset) + if err != nil { + return nil, err + } + if options == nil { + return nil, fmt.Errorf("parsing --sbom flag: unrecognized preset name %q", preset) + } + image, err := flags.GetString("sbom-scanner-image") + if err != nil { + return nil, fmt.Errorf("invalid value for --sbom-scanner-image: %w", err) + } + commands, err := flags.GetStringArray("sbom-scanner-command") + if err != nil { + return nil, fmt.Errorf("invalid value for --sbom-scanner-command: %w", err) + } + mergeStrategy, err := flags.GetString("sbom-merge-strategy") + if err != nil { + return nil, fmt.Errorf("invalid value for --sbom-merge-strategy: %w", err) + } + + if image != "" || len(commands) > 0 || mergeStrategy != "" { + options = &define.SBOMScanOptions{ + Image: image, + Commands: append([]string{}, commands...), + MergeStrategy: define.SBOMMergeStrategy(mergeStrategy), + } + } + if options.ImageSBOMOutput, err = flags.GetString("sbom-image-output"); err != nil { + return nil, fmt.Errorf("invalid value for --sbom-image-output: %w", err) + } + if options.SBOMOutput, err = flags.GetString("sbom-output"); err != nil { + return nil, fmt.Errorf("invalid value for --sbom-output: %w", err) + } + if options.ImagePURLOutput, err = flags.GetString("sbom-image-purl-output"); err != nil { + return nil, fmt.Errorf("invalid value for --sbom-image-purl-output: %w", err) + } + if options.PURLOutput, err = flags.GetString("sbom-purl-output"); err != nil { + return nil, fmt.Errorf("invalid value for --sbom-purl-output: %w", err) + } + + if options.Image == "" || len(options.Commands) == 0 { + return options, fmt.Errorf("sbom configuration missing one or more of (%q or %q)", "--sbom-scanner-image", "--sbom-scanner-command") + } + if options.SBOMOutput == "" && options.ImageSBOMOutput == "" && options.PURLOutput == "" && options.ImagePURLOutput == "" { + return options, fmt.Errorf("sbom configuration missing one or more of (%q, %q, %q or %q)", "--sbom-output", "--sbom-image-output", "--sbom-purl-output", "--sbom-image-purl-output") + } + if len(options.Commands) > 1 && options.MergeStrategy == "" { + return options, fmt.Errorf("sbom configuration included multiple %q values but no %q value", "--sbom-scanner-command", "--sbom-merge-strategy") + } + switch options.MergeStrategy { + default: + return options, fmt.Errorf("sbom arguments included unrecognized merge strategy %q", string(options.MergeStrategy)) + case define.SBOMMergeStrategyCat, define.SBOMMergeStrategyCycloneDXByComponentNameAndVersion, define.SBOMMergeStrategySPDXByPackageNameAndVersionInfo: + // all good here + } + return options, nil +} + // IDMappingOptions parses the build options related to user namespaces and ID mapping. func IDMappingOptions(c *cobra.Command, isolation define.Isolation) (usernsOptions define.NamespaceOptions, idmapOptions *define.IDMappingOptions, err error) { return IDMappingOptionsFromFlagSet(c.Flags(), c.PersistentFlags(), c.Flag) @@ -1053,19 +1178,19 @@ func Device(device string) (string, string, string, error) { // isValidDeviceMode checks if the mode for device is valid or not. // isValid mode is a composition of r (read), w (write), and m (mknod). func isValidDeviceMode(mode string) bool { - var legalDeviceMode = map[rune]bool{ - 'r': true, - 'w': true, - 'm': true, + var legalDeviceMode = map[rune]struct{}{ + 'r': {}, + 'w': {}, + 'm': {}, } if mode == "" { return false } for _, c := range mode { - if !legalDeviceMode[c] { + if _, has := legalDeviceMode[c]; !has { return false } - legalDeviceMode[c] = false + delete(legalDeviceMode, c) } return true } diff --git a/vendor/github.com/containers/buildah/run_common.go b/vendor/github.com/containers/buildah/run_common.go index 93550f62b..184391736 100644 --- a/vendor/github.com/containers/buildah/run_common.go +++ b/vendor/github.com/containers/buildah/run_common.go @@ -110,7 +110,7 @@ func (b *Builder) addResolvConf(rdir string, chownOpts *idtools.IDPair, dnsServe return "", err } - if err := label.Relabel(cfile, b.MountLabel, false); err != nil { + if err := relabel(cfile, b.MountLabel, false); err != nil { return "", err } return cfile, nil @@ -169,7 +169,7 @@ func (b *Builder) generateHosts(rdir string, chownOpts *idtools.IDPair, imageRoo if err = os.Chown(targetfile, uid, gid); err != nil { return "", err } - if err := label.Relabel(targetfile, b.MountLabel, false); err != nil { + if err := relabel(targetfile, b.MountLabel, false); err != nil { return "", err } @@ -198,7 +198,7 @@ func (b *Builder) generateHostname(rdir, hostname string, chownOpts *idtools.IDP if err = os.Chown(cfile, uid, gid); err != nil { return "", err } - if err := label.Relabel(cfile, b.MountLabel, false); err != nil { + if err := relabel(cfile, b.MountLabel, false); err != nil { return "", err } @@ -1410,7 +1410,7 @@ func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, builtin if err = os.MkdirAll(volumePath, 0755); err != nil { return nil, err } - if err = label.Relabel(volumePath, mountLabel, false); err != nil { + if err = relabel(volumePath, mountLabel, false); err != nil { return nil, err } initializeVolume = true @@ -1750,7 +1750,7 @@ func (b *Builder) getSecretMount(tokens []string, secrets map[string]define.Secr return nil, "", err } - if err := label.Relabel(ctrFileOnHost, b.MountLabel, false); err != nil { + if err := relabel(ctrFileOnHost, b.MountLabel, false); err != nil { return nil, "", err } hostUID, hostGID, err := util.GetHostIDs(idMaps.uidmap, idMaps.gidmap, uid, gid) @@ -1848,13 +1848,13 @@ func (b *Builder) getSSHMount(tokens []string, count int, sshsources map[string] return nil, nil, err } - if err := label.Relabel(filepath.Dir(hostSock), b.MountLabel, false); err != nil { + if err := relabel(filepath.Dir(hostSock), b.MountLabel, false); err != nil { if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil { b.Logger.Errorf("error shutting down agent: %v", shutdownErr) } return nil, nil, err } - if err := label.Relabel(hostSock, b.MountLabel, false); err != nil { + if err := relabel(hostSock, b.MountLabel, false); err != nil { if shutdownErr := fwdAgent.Shutdown(); shutdownErr != nil { b.Logger.Errorf("error shutting down agent: %v", shutdownErr) } @@ -1959,3 +1959,13 @@ func setPdeathsig(cmd *exec.Cmd) { } cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL } + +func relabel(path, mountLabel string, recurse bool) error { + if err := label.Relabel(path, mountLabel, recurse); err != nil { + if !errors.Is(err, syscall.ENOTSUP) { + return err + } + logrus.Debugf("Labeling not supported on %q", path) + } + return nil +} diff --git a/vendor/github.com/containers/buildah/run_freebsd.go b/vendor/github.com/containers/buildah/run_freebsd.go index 9344876df..9e3ffad0b 100644 --- a/vendor/github.com/containers/buildah/run_freebsd.go +++ b/vendor/github.com/containers/buildah/run_freebsd.go @@ -25,7 +25,6 @@ import ( "github.com/containers/common/libnetwork/resolvconf" nettypes "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/config" - cutil "github.com/containers/common/pkg/util" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/lockfile" "github.com/containers/storage/pkg/stringid" @@ -34,6 +33,7 @@ import ( spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" "golang.org/x/sys/unix" ) @@ -156,7 +156,11 @@ func (b *Builder) Run(command []string, options RunOptions) error { containerName := Package + "-" + filepath.Base(path) if configureNetwork { - g.AddAnnotation("org.freebsd.parentJail", containerName+"-vnet") + if jail.NeedVnetJail() { + g.AddAnnotation("org.freebsd.parentJail", containerName+"-vnet") + } else { + g.AddAnnotation("org.freebsd.jail.vnet", "new") + } } homeDir, err := b.configureUIDGID(g, mountPoint, options) @@ -199,7 +203,7 @@ func (b *Builder) Run(command []string, options RunOptions) error { rootIDPair := &idtools.IDPair{UID: int(rootUID), GID: int(rootGID)} hostFile := "" - if !options.NoHosts && !cutil.StringInSlice(config.DefaultHostsFile, volumes) && options.ConfigureNetwork != define.NetworkDisabled { + if !options.NoHosts && !slices.Contains(volumes, config.DefaultHostsFile) && options.ConfigureNetwork != define.NetworkDisabled { hostFile, err = b.generateHosts(path, rootIDPair, mountPoint, spec) if err != nil { return err @@ -207,7 +211,7 @@ func (b *Builder) Run(command []string, options RunOptions) error { bindFiles[config.DefaultHostsFile] = hostFile } - if !cutil.StringInSlice(resolvconf.DefaultResolvConf, volumes) && options.ConfigureNetwork != define.NetworkDisabled && !(len(b.CommonBuildOpts.DNSServers) == 1 && strings.ToLower(b.CommonBuildOpts.DNSServers[0]) == "none") { + if !slices.Contains(volumes, resolvconf.DefaultResolvConf) && options.ConfigureNetwork != define.NetworkDisabled && !(len(b.CommonBuildOpts.DNSServers) == 1 && strings.ToLower(b.CommonBuildOpts.DNSServers[0]) == "none") { resolvFile, err := b.addResolvConf(path, rootIDPair, b.CommonBuildOpts.DNSServers, b.CommonBuildOpts.DNSSearch, b.CommonBuildOpts.DNSOptions, nil) if err != nil { return err @@ -247,9 +251,11 @@ func (b *Builder) Run(command []string, options RunOptions) error { defer b.cleanupTempVolumes() - // If we are creating a network, make the vnet here so that we - // can execute the OCI runtime inside it. - if configureNetwork { + // If we are creating a network, make the vnet here so that we can + // execute the OCI runtime inside it. For FreeBSD-13.3 and later, we can + // configure the container network settings from outside the jail, which + // removes the need for a separate jail to manage the vnet. + if configureNetwork && jail.NeedVnetJail() { mynetns := containerName + "-vnet" jconf := jail.NewConfig() @@ -426,7 +432,12 @@ func (b *Builder) runConfigureNetwork(pid int, isolation define.Isolation, optio } logrus.Debugf("configureNetworks: %v", configureNetworks) - mynetns := containerName + "-vnet" + var mynetns string + if jail.NeedVnetJail() { + mynetns = containerName + "-vnet" + } else { + mynetns = containerName + } networks := make(map[string]nettypes.PerNetworkOptions, len(configureNetworks)) for i, network := range configureNetworks { diff --git a/vendor/github.com/containers/buildah/run_linux.go b/vendor/github.com/containers/buildah/run_linux.go index 5263abecc..03abaec5d 100644 --- a/vendor/github.com/containers/buildah/run_linux.go +++ b/vendor/github.com/containers/buildah/run_linux.go @@ -35,7 +35,6 @@ import ( "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/hooks" hooksExec "github.com/containers/common/pkg/hooks/exec" - cutil "github.com/containers/common/pkg/util" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/lockfile" @@ -44,8 +43,8 @@ import ( "github.com/docker/go-units" "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" - "github.com/opencontainers/selinux/go-selinux/label" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" "golang.org/x/sys/unix" ) @@ -262,7 +261,7 @@ func (b *Builder) Run(command []string, options RunOptions) error { rootIDPair := &idtools.IDPair{UID: int(rootUID), GID: int(rootGID)} hostFile := "" - if !options.NoHosts && !cutil.StringInSlice(config.DefaultHostsFile, volumes) && options.ConfigureNetwork != define.NetworkDisabled { + if !options.NoHosts && !slices.Contains(volumes, config.DefaultHostsFile) && options.ConfigureNetwork != define.NetworkDisabled { hostFile, err = b.generateHosts(path, rootIDPair, mountPoint, spec) if err != nil { return err @@ -270,7 +269,7 @@ func (b *Builder) Run(command []string, options RunOptions) error { bindFiles[config.DefaultHostsFile] = hostFile } - if !options.NoHostname && !(cutil.StringInSlice("/etc/hostname", volumes)) { + if !options.NoHostname && !(slices.Contains(volumes, "/etc/hostname")) { hostFile, err := b.generateHostname(path, spec.Hostname, rootIDPair) if err != nil { return err @@ -279,7 +278,7 @@ func (b *Builder) Run(command []string, options RunOptions) error { bindFiles["/etc/hostname"] = hostFile } - if !cutil.StringInSlice(resolvconf.DefaultResolvConf, volumes) && options.ConfigureNetwork != define.NetworkDisabled && !(len(b.CommonBuildOpts.DNSServers) == 1 && strings.ToLower(b.CommonBuildOpts.DNSServers[0]) == "none") { + if !slices.Contains(volumes, resolvconf.DefaultResolvConf) && options.ConfigureNetwork != define.NetworkDisabled && !(len(b.CommonBuildOpts.DNSServers) == 1 && strings.ToLower(b.CommonBuildOpts.DNSServers[0]) == "none") { resolvFile, err := b.addResolvConf(path, rootIDPair, b.CommonBuildOpts.DNSServers, b.CommonBuildOpts.DNSSearch, b.CommonBuildOpts.DNSOptions, spec.Linux.Namespaces) if err != nil { return err @@ -310,7 +309,7 @@ rootless=%d if err = ioutils.AtomicWriteFile(containerenvPath, []byte(containerenv), 0755); err != nil { return err } - if err := label.Relabel(containerenvPath, b.MountLabel, false); err != nil { + if err := relabel(containerenvPath, b.MountLabel, false); err != nil { return err } @@ -855,6 +854,9 @@ func addRlimits(ulimit []string, g *generate.Generator, defaultUlimits []string) var ( ul *units.Ulimit err error + // setup rlimits + nofileSet bool + nprocSet bool ) ulimit = append(defaultUlimits, ulimit...) @@ -863,8 +865,39 @@ func addRlimits(ulimit []string, g *generate.Generator, defaultUlimits []string) return fmt.Errorf("ulimit option %q requires name=SOFT:HARD, failed to be parsed: %w", u, err) } + if strings.ToUpper(ul.Name) == "NOFILE" { + nofileSet = true + } + if strings.ToUpper(ul.Name) == "NPROC" { + nprocSet = true + } g.AddProcessRlimits("RLIMIT_"+strings.ToUpper(ul.Name), uint64(ul.Hard), uint64(ul.Soft)) } + if !nofileSet { + max := define.RLimitDefaultValue + var rlimit unix.Rlimit + if err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlimit); err == nil { + if max < rlimit.Max || unshare.IsRootless() { + max = rlimit.Max + } + } else { + logrus.Warnf("Failed to return RLIMIT_NOFILE ulimit %q", err) + } + g.AddProcessRlimits("RLIMIT_NOFILE", max, max) + } + if !nprocSet { + max := define.RLimitDefaultValue + var rlimit unix.Rlimit + if err := unix.Getrlimit(unix.RLIMIT_NPROC, &rlimit); err == nil { + if max < rlimit.Max || unshare.IsRootless() { + max = rlimit.Max + } + } else { + logrus.Warnf("Failed to return RLIMIT_NPROC ulimit %q", err) + } + g.AddProcessRlimits("RLIMIT_NPROC", max, max) + } + return nil } @@ -916,12 +949,12 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, options = append(options, "rw") } if foundz { - if err := label.Relabel(host, mountLabel, true); err != nil { + if err := relabel(host, mountLabel, true); err != nil { return specs.Mount{}, err } } if foundZ { - if err := label.Relabel(host, mountLabel, false); err != nil { + if err := relabel(host, mountLabel, false); err != nil { return specs.Mount{}, err } } diff --git a/vendor/github.com/containers/buildah/scan.go b/vendor/github.com/containers/buildah/scan.go new file mode 100644 index 000000000..16b53d855 --- /dev/null +++ b/vendor/github.com/containers/buildah/scan.go @@ -0,0 +1,295 @@ +package buildah + +import ( + "context" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/containers/buildah/define" + "github.com/containers/buildah/internal/sbom" + "github.com/mattn/go-shellwords" + rspec "github.com/opencontainers/runtime-spec/specs-go" + "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" +) + +func stringSliceReplaceAll(slice []string, replacements map[string]string, important []string) (built []string, replacedAnImportantValue bool) { + built = make([]string, 0, len(slice)) + for i := range slice { + element := slice[i] + for from, to := range replacements { + previous := element + if element = strings.ReplaceAll(previous, from, to); element != previous { + if len(important) == 0 || slices.Contains(important, from) { + replacedAnImportantValue = true + } + } + } + built = append(built, element) + } + return built, replacedAnImportantValue +} + +// sbomScan iterates through the scanning configuration settings, generating +// SBOM files and storing them either in the rootfs or in a local file path. +func (b *Builder) sbomScan(ctx context.Context, options CommitOptions) (imageFiles, localFiles map[string]string, scansDir string, err error) { + // We'll use a temporary per-container directory for this one. + cdir, err := b.store.ContainerDirectory(b.ContainerID) + if err != nil { + return nil, nil, "", err + } + scansDir, err = os.MkdirTemp(cdir, "buildah-scan") + if err != nil { + return nil, nil, "", err + } + defer func() { + if err != nil { + if err := os.RemoveAll(scansDir); err != nil { + logrus.Warnf("removing temporary directory %q: %v", scansDir, err) + } + } + }() + + // We may be producing sets of outputs using temporary containers, and + // there's no need to create more than one container for any one + // specific scanner image. + scanners := make(map[string]*Builder) + defer func() { + for _, scanner := range scanners { + scannerID := scanner.ContainerID + if err := scanner.Delete(); err != nil { + logrus.Warnf("removing temporary scanner container %q: %v", scannerID, err) + } + } + }() + + // Just assume that every scanning method will be looking at the rootfs. + rootfs, err := b.Mount(b.MountLabel) + if err != nil { + return nil, nil, "", err + } + defer func(b *Builder) { + if err := b.Unmount(); err != nil { + logrus.Warnf("unmounting temporary scanner container %q: %v", b.ContainerID, err) + } + }(b) + + // Iterate through all of the scanning strategies. + for _, scanSpec := range options.SBOMScanOptions { + // Pull the image and create a container we can run the scanner + // in, unless we've done that already for this scanner image. + scanBuilder, ok := scanners[scanSpec.Image] + if !ok { + builderOptions := BuilderOptions{ + FromImage: scanSpec.Image, + ContainerSuffix: "scanner", + PullPolicy: scanSpec.PullPolicy, + BlobDirectory: options.BlobDirectory, + Logger: b.Logger, + SystemContext: options.SystemContext, + MountLabel: b.MountLabel, + ProcessLabel: b.ProcessLabel, + IDMappingOptions: &b.IDMappingOptions, + } + if scanBuilder, err = NewBuilder(ctx, b.store, builderOptions); err != nil { + return nil, nil, "", fmt.Errorf("creating temporary working container to run scanner: %w", err) + } + scanners[scanSpec.Image] = scanBuilder + } + // Now figure out which commands we need to run. First, try to + // parse a command ourselves, because syft's image (at least) + // doesn't include a shell. Build a slice of command slices. + var commands [][]string + for _, commandSpec := range scanSpec.Commands { + // Start by assuming it's shell -c $whatever. + parsedCommand := []string{"/bin/sh", "-c", commandSpec} + if shell := scanBuilder.Shell(); len(shell) != 0 { + parsedCommand = append(append([]string{}, shell...), commandSpec) + } + if !strings.ContainsAny(commandSpec, "<>|") { // An imperfect check for shell redirection being used. + // If we can parse it ourselves, though, prefer to use that result, + // in case the scanner image doesn't include a shell. + if parsed, err := shellwords.Parse(commandSpec); err == nil { + parsedCommand = parsed + } + } + commands = append(commands, parsedCommand) + } + // Set up a list of mounts for the rootfs and whichever context + // directories we're told were used. + const rootfsTargetDir = "/.rootfs" + const scansTargetDir = "/.scans" + const contextsTargetDirPrefix = "/.context" + runMounts := []rspec.Mount{ + // Our temporary directory, read-write. + { + Type: define.TypeBind, + Source: scansDir, + Destination: scansTargetDir, + Options: []string{"rw", "z"}, + }, + // The rootfs, read-only. + { + Type: define.TypeBind, + Source: rootfs, + Destination: rootfsTargetDir, + Options: []string{"ro"}, + }, + } + // Each context directory, also read-only. + for i := range scanSpec.ContextDir { + contextMount := rspec.Mount{ + Type: define.TypeBind, + Source: scanSpec.ContextDir[i], + Destination: fmt.Sprintf("%s%d", contextsTargetDirPrefix, i), + Options: []string{"ro"}, + } + runMounts = append(runMounts, contextMount) + } + // Set up run options and mounts one time, and reuse it. + runOptions := RunOptions{ + Logger: b.Logger, + Isolation: b.Isolation, + SystemContext: options.SystemContext, + Mounts: runMounts, + } + // We'll have to do some text substitutions so that we run the + // right commands, in the right order, pointing at the right + // mount points. + var resolvedCommands [][]string + var resultFiles []string + for _, command := range commands { + // Each command gets to produce its own file that we'll + // combine later if there's more than one of them. + contextDirScans := 0 + for i := range scanSpec.ContextDir { + resultFile := filepath.Join(scansTargetDir, fmt.Sprintf("scan%d.json", len(resultFiles))) + // If the command mentions {CONTEXT}... + resolvedCommand, scansContext := stringSliceReplaceAll(command, + map[string]string{ + "{CONTEXT}": fmt.Sprintf("%s%d", contextsTargetDirPrefix, i), + "{OUTPUT}": resultFile, + }, + []string{"{CONTEXT}"}, + ) + if !scansContext { + break + } + // ... resolve the path references and add it to the list of commands. + resolvedCommands = append(resolvedCommands, resolvedCommand) + resultFiles = append(resultFiles, resultFile) + contextDirScans++ + } + if contextDirScans == 0 { + resultFile := filepath.Join(scansTargetDir, fmt.Sprintf("scan%d.json", len(resultFiles))) + // If the command didn't mention {CONTEXT}, but does mention {ROOTFS}... + resolvedCommand, scansRootfs := stringSliceReplaceAll(command, + map[string]string{ + "{ROOTFS}": rootfsTargetDir, + "{OUTPUT}": resultFile, + }, + []string{"{ROOTFS}"}, + ) + // ... resolve the path references and add that to the list of commands. + if scansRootfs { + resolvedCommands = append(resolvedCommands, resolvedCommand) + resultFiles = append(resultFiles, resultFile) + } + } + } + // Run all of the commands, one after the other, producing one + // or more files named "scan%d.json" in our temporary directory. + for _, resolvedCommand := range resolvedCommands { + logrus.Debugf("Running scan command %q", resolvedCommand) + if err = scanBuilder.Run(resolvedCommand, runOptions); err != nil { + return nil, nil, "", fmt.Errorf("running scanning command %v: %w", resolvedCommand, err) + } + } + // Produce the combined output files that we need to create, if there are any. + var sbomResult, purlResult string + switch { + case scanSpec.ImageSBOMOutput != "": + sbomResult = filepath.Join(scansDir, filepath.Base(scanSpec.ImageSBOMOutput)) + case scanSpec.SBOMOutput != "": + sbomResult = filepath.Join(scansDir, filepath.Base(scanSpec.SBOMOutput)) + default: + sbomResult = filepath.Join(scansDir, "sbom-result") + } + switch { + case scanSpec.ImagePURLOutput != "": + purlResult = filepath.Join(scansDir, filepath.Base(scanSpec.ImagePURLOutput)) + case scanSpec.PURLOutput != "": + purlResult = filepath.Join(scansDir, filepath.Base(scanSpec.PURLOutput)) + default: + purlResult = filepath.Join(scansDir, "purl-result") + } + copyFile := func(destination, source string) error { + dst, err := os.Create(destination) + if err != nil { + return err + } + defer dst.Close() + src, err := os.Open(source) + if err != nil { + return err + } + defer src.Close() + if _, err = io.Copy(dst, src); err != nil { + return fmt.Errorf("copying %q to %q: %w", source, destination, err) + } + return nil + } + err = func() error { + for i := range resultFiles { + thisResultFile := filepath.Join(scansDir, filepath.Base(resultFiles[i])) + switch i { + case 0: + // Straight-up copy to create the first version of the final output. + if err = copyFile(sbomResult, thisResultFile); err != nil { + return err + } + // This shouldn't change any contents, but lets us generate the purl file. + err = sbom.Merge(scanSpec.MergeStrategy, thisResultFile, sbomResult, purlResult) + default: + // Hopefully we know how to merge information from the new one into the final output. + err = sbom.Merge(scanSpec.MergeStrategy, sbomResult, thisResultFile, purlResult) + } + } + return err + }() + if err != nil { + return nil, nil, "", err + } + // If these files are supposed to be written to the local filesystem, add + // their contents to the map of files we expect our caller to write. + if scanSpec.SBOMOutput != "" || scanSpec.PURLOutput != "" { + if localFiles == nil { + localFiles = make(map[string]string) + } + if scanSpec.SBOMOutput != "" { + localFiles[scanSpec.SBOMOutput] = sbomResult + } + if scanSpec.PURLOutput != "" { + localFiles[scanSpec.PURLOutput] = purlResult + } + } + // If these files are supposed to be written to the image, create a map of + // their contents so that we can either create a layer diff for them (or + // slipstream them into a squashed layer diff) later. + if scanSpec.ImageSBOMOutput != "" || scanSpec.ImagePURLOutput != "" { + if imageFiles == nil { + imageFiles = make(map[string]string) + } + if scanSpec.ImageSBOMOutput != "" { + imageFiles[scanSpec.ImageSBOMOutput] = sbomResult + } + if scanSpec.ImagePURLOutput != "" { + imageFiles[scanSpec.ImagePURLOutput] = purlResult + } + } + } + return imageFiles, localFiles, scansDir, nil +} diff --git a/vendor/github.com/containers/buildah/util/util.go b/vendor/github.com/containers/buildah/util/util.go index bec861483..b72353af2 100644 --- a/vendor/github.com/containers/buildah/util/util.go +++ b/vendor/github.com/containers/buildah/util/util.go @@ -9,13 +9,11 @@ import ( "path/filepath" "sort" "strings" - "sync" "syscall" "github.com/containers/buildah/define" "github.com/containers/common/libimage" "github.com/containers/common/pkg/config" - "github.com/containers/common/pkg/util" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/pkg/shortnames" "github.com/containers/image/v5/signature" @@ -26,6 +24,7 @@ import ( "github.com/opencontainers/go-digest" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" ) const ( @@ -45,9 +44,9 @@ var ( } ) -// StringInSlice is deprecated, use github.com/containers/common/pkg/util.StringInSlice +// StringInSlice is deprecated, use golang.org/x/exp/slices.Contains func StringInSlice(s string, slice []string) bool { - return util.StringInSlice(s, slice) + return slices.Contains(slice, s) } // resolveName checks if name is a valid image name, and if that name doesn't @@ -244,7 +243,7 @@ func Runtime() string { conf, err := config.Default() if err != nil { - logrus.Warnf("Error loading container config when searching for local runtime: %v", err) + logrus.Warnf("Error loading default container config when searching for local runtime: %v", err) return define.DefaultRuntime } return conf.Engine.OCIRuntime @@ -376,12 +375,6 @@ func TruncateString(str string, to int) string { return newStr } -var ( - isUnifiedOnce sync.Once - isUnified bool - isUnifiedErr error -) - // fileExistsAndNotADir - Check to see if a file exists // and that it is not a directory. func fileExistsAndNotADir(path string) (bool, error) { diff --git a/vendor/github.com/containers/buildah/util/util_linux.go b/vendor/github.com/containers/buildah/util/util_linux.go deleted file mode 100644 index cca1f9e7e..000000000 --- a/vendor/github.com/containers/buildah/util/util_linux.go +++ /dev/null @@ -1,20 +0,0 @@ -package util - -import ( - "syscall" - - "golang.org/x/sys/unix" -) - -// IsCgroup2UnifiedMode returns whether we are running in cgroup 2 cgroup2 mode. -func IsCgroup2UnifiedMode() (bool, error) { - isUnifiedOnce.Do(func() { - var st syscall.Statfs_t - if err := syscall.Statfs("/sys/fs/cgroup", &st); err != nil { - isUnified, isUnifiedErr = false, err - } else { - isUnified, isUnifiedErr = st.Type == unix.CGROUP2_SUPER_MAGIC, nil - } - }) - return isUnified, isUnifiedErr -} diff --git a/vendor/github.com/containers/common/internal/attributedstring/slice.go b/vendor/github.com/containers/common/internal/attributedstring/slice.go index ad4acc5ec..298d468d5 100644 --- a/vendor/github.com/containers/common/internal/attributedstring/slice.go +++ b/vendor/github.com/containers/common/internal/attributedstring/slice.go @@ -42,8 +42,8 @@ func (a *Slice) Set(values []string) { } // UnmarshalTOML is the custom unmarshal method for Slice. -func (a *Slice) UnmarshalTOML(data interface{}) error { - iFaceSlice, ok := data.([]interface{}) +func (a *Slice) UnmarshalTOML(data any) error { + iFaceSlice, ok := data.([]any) if !ok { return fmt.Errorf("unable to cast to interface array: %v", data) } @@ -53,7 +53,7 @@ func (a *Slice) UnmarshalTOML(data interface{}) error { switch val := x.(type) { case string: // Strings are directly appended to the slice. loadedStrings = append(loadedStrings, val) - case map[string]interface{}: // The attribute struct is represented as a map. + case map[string]any: // The attribute struct is represented as a map. for k, v := range val { // Iterate over all _supported_ keys. switch k { case "append": @@ -81,16 +81,15 @@ func (a *Slice) UnmarshalTOML(data interface{}) error { // MarshalTOML is the custom marshal method for Slice. func (a *Slice) MarshalTOML() ([]byte, error) { - iFaceSlice := make([]interface{}, 0, len(a.Values)) + iFaceSlice := make([]any, 0, len(a.Values)) for _, x := range a.Values { iFaceSlice = append(iFaceSlice, x) } if a.Attributes.Append != nil { - Attributes := make(map[string]any) - Attributes["append"] = *a.Attributes.Append - iFaceSlice = append(iFaceSlice, Attributes) + attributes := map[string]any{"append": *a.Attributes.Append} + iFaceSlice = append(iFaceSlice, attributes) } buf := new(bytes.Buffer) diff --git a/vendor/github.com/containers/common/internal/deepcopy.go b/vendor/github.com/containers/common/internal/deepcopy.go new file mode 100644 index 000000000..157f6ee4c --- /dev/null +++ b/vendor/github.com/containers/common/internal/deepcopy.go @@ -0,0 +1,29 @@ +package internal + +import ( + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" +) + +// DeepCopyDescriptor copies a Descriptor, deeply copying its contents +func DeepCopyDescriptor(original *v1.Descriptor) *v1.Descriptor { + tmp := *original + if original.URLs != nil { + tmp.URLs = slices.Clone(original.URLs) + } + if original.Annotations != nil { + tmp.Annotations = maps.Clone(original.Annotations) + } + if original.Data != nil { + tmp.Data = slices.Clone(original.Data) + } + if original.Platform != nil { + tmpPlatform := *original.Platform + if original.Platform.OSFeatures != nil { + tmpPlatform.OSFeatures = slices.Clone(original.Platform.OSFeatures) + } + tmp.Platform = &tmpPlatform + } + return &tmp +} diff --git a/vendor/github.com/containers/common/libimage/copier.go b/vendor/github.com/containers/common/libimage/copier.go index d6acc7325..ebd76b386 100644 --- a/vendor/github.com/containers/common/libimage/copier.go +++ b/vendor/github.com/containers/common/libimage/copier.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libimage @@ -364,11 +363,13 @@ func (c *copier) copy(ctx context.Context, source, destination types.ImageRefere defer cancel() defer timer.Stop() - fmt.Fprintf(c.imageCopyOptions.ReportWriter, - "Pulling image %s inside systemd: setting pull timeout to %s\n", - source.StringWithinTransport(), - time.Duration(numExtensions)*extension, - ) + if c.imageCopyOptions.ReportWriter != nil { + fmt.Fprintf(c.imageCopyOptions.ReportWriter, + "Pulling image %s inside systemd: setting pull timeout to %s\n", + source.StringWithinTransport(), + time.Duration(numExtensions)*extension, + ) + } // From `man systemd.service(5)`: // @@ -431,12 +432,12 @@ func (c *copier) copy(ctx context.Context, source, destination types.ImageRefere // Sanity checks for Buildah. if sourceInsecure != nil && *sourceInsecure { if c.systemContext.DockerInsecureSkipTLSVerify == types.OptionalBoolFalse { - return nil, fmt.Errorf("can't require tls verification on an insecured registry") + return nil, errors.New("can't require tls verification on an insecured registry") } } if destinationInsecure != nil && *destinationInsecure { if c.systemContext.DockerInsecureSkipTLSVerify == types.OptionalBoolFalse { - return nil, fmt.Errorf("can't require tls verification on an insecured registry") + return nil, errors.New("can't require tls verification on an insecured registry") } } @@ -516,8 +517,8 @@ func checkRegistrySourcesAllows(dest types.ImageReference) (insecure *bool, err return nil, fmt.Errorf("registry %q denied by policy: not in allowed registries list (%s)", reference.Domain(dref), registrySources) } - for _, inseureDomain := range sources.InsecureRegistries { - if inseureDomain == reference.Domain(dref) { + for _, insecureDomain := range sources.InsecureRegistries { + if insecureDomain == reference.Domain(dref) { insecure := true return &insecure, nil } diff --git a/vendor/github.com/containers/common/libimage/define/manifests.go b/vendor/github.com/containers/common/libimage/define/manifests.go index 1e02984b2..c59a58f70 100644 --- a/vendor/github.com/containers/common/libimage/define/manifests.go +++ b/vendor/github.com/containers/common/libimage/define/manifests.go @@ -4,24 +4,28 @@ import ( "github.com/containers/image/v5/manifest" ) -// ManifestListDescriptor references a platform-specific manifest. -// Contains exclusive field like `annotations` which is only present in -// OCI spec and not in docker image spec. +// ManifestListDescriptor describes a manifest that is mentioned in an +// image index or manifest list. +// Contains a subset of the fields which are present in both the OCI spec and +// the Docker spec, along with some which are unique to one or the other. type ManifestListDescriptor struct { manifest.Schema2Descriptor - Platform manifest.Schema2PlatformSpec `json:"platform"` - // Annotations contains arbitrary metadata for the image index. - Annotations map[string]string `json:"annotations,omitempty"` + Platform manifest.Schema2PlatformSpec `json:"platform,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` + ArtifactType string `json:"artifactType,omitempty"` + Data []byte `json:"data,omitempty"` + Files []string `json:"files,omitempty"` } // ManifestListData is a list of platform-specific manifests, specifically used to // generate output struct for `podman manifest inspect`. Reason for maintaining and -// having this type is to ensure we can have a common type which contains exclusive +// having this type is to ensure we can have a single type which contains exclusive // fields from both Docker manifest format and OCI manifest format. type ManifestListData struct { SchemaVersion int `json:"schemaVersion"` MediaType string `json:"mediaType"` + ArtifactType string `json:"artifactType,omitempty"` Manifests []ManifestListDescriptor `json:"manifests"` - // Annotations contains arbitrary metadata for the image index. - Annotations map[string]string `json:"annotations,omitempty"` + Subject *ManifestListDescriptor `json:"subject,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` } diff --git a/vendor/github.com/containers/common/libimage/disk_usage.go b/vendor/github.com/containers/common/libimage/disk_usage.go index 765b0df86..6264b25ec 100644 --- a/vendor/github.com/containers/common/libimage/disk_usage.go +++ b/vendor/github.com/containers/common/libimage/disk_usage.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libimage @@ -37,7 +36,7 @@ func (r *Runtime) DiskUsage(ctx context.Context) ([]ImageDiskUsage, int64, error return nil, -1, err } - layerTree, err := r.layerTree(images) + layerTree, err := r.layerTree(ctx, images) if err != nil { return nil, -1, err } @@ -80,7 +79,7 @@ func (r *Runtime) DiskUsage(ctx context.Context) ([]ImageDiskUsage, int64, error // diskUsageForImage returns the disk-usage baseistics for the specified image. func diskUsageForImage(ctx context.Context, image *Image, tree *layerTree) ([]ImageDiskUsage, error) { - if err := image.isCorrupted(""); err != nil { + if err := image.isCorrupted(ctx, ""); err != nil { return nil, err } diff --git a/vendor/github.com/containers/common/libimage/events.go b/vendor/github.com/containers/common/libimage/events.go index 5d82efa6a..5071d2549 100644 --- a/vendor/github.com/containers/common/libimage/events.go +++ b/vendor/github.com/containers/common/libimage/events.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libimage @@ -19,6 +18,8 @@ const ( EventTypeUnknown EventType = iota // EventTypeImagePull represents an image pull. EventTypeImagePull + // EventTypeImagePullError represents an image pull failed. + EventTypeImagePullError // EventTypeImagePush represents an image push. EventTypeImagePush // EventTypeImageRemove represents an image removal. @@ -47,6 +48,8 @@ type Event struct { Time time.Time // Type of the event. Type EventType + // Error in case of failure. + Error error } // writeEvent writes the specified event to the Runtime's event channel. The diff --git a/vendor/github.com/containers/common/libimage/filters.go b/vendor/github.com/containers/common/libimage/filters.go index b51853af1..f0cf2e5b3 100644 --- a/vendor/github.com/containers/common/libimage/filters.go +++ b/vendor/github.com/containers/common/libimage/filters.go @@ -1,10 +1,10 @@ //go:build !remote -// +build !remote package libimage import ( "context" + "errors" "fmt" "path" "strconv" @@ -21,33 +21,28 @@ import ( // indicates that the image matches the criteria. type filterFunc func(*Image) (bool, error) -// Apply the specified filters. At least one filter of each key must apply. -func (i *Image) applyFilters(filters map[string][]filterFunc) (bool, error) { - matches := false - for key := range filters { // and - matches = false - for _, filter := range filters[key] { // or - var err error - matches, err = filter(i) +// Apply the specified filters. All filters of each key must apply. +func (i *Image) applyFilters(ctx context.Context, filters map[string][]filterFunc) (bool, error) { + for key := range filters { + for _, filter := range filters[key] { + matches, err := filter(i) if err != nil { // Some images may have been corrupted in the // meantime, so do an extra check and make the // error non-fatal (see containers/podman/issues/12582). - if errCorrupted := i.isCorrupted(""); errCorrupted != nil { + if errCorrupted := i.isCorrupted(ctx, ""); errCorrupted != nil { logrus.Errorf(errCorrupted.Error()) return false, nil } return false, err } - if matches { - break + // If any filter within a group doesn't match, return false + if !matches { + return false, nil } } - if !matches { - return false, nil - } } - return matches, nil + return true, nil } // filterImages returns a slice of images which are passing all specified @@ -63,7 +58,7 @@ func (r *Runtime) filterImages(ctx context.Context, images []*Image, options *Li } result := []*Image{} for i := range images { - match, err := images[i].applyFilters(filters) + match, err := images[i].applyFilters(ctx, filters) if err != nil { return nil, err } @@ -84,7 +79,7 @@ func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOp var tree *layerTree getTree := func() (*layerTree, error) { if tree == nil { - t, err := r.layerTree(nil) + t, err := r.layerTree(ctx, nil) if err != nil { return nil, err } @@ -93,6 +88,7 @@ func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOp return tree, nil } + var wantedReferenceMatches, unwantedReferenceMatches []string filters := map[string][]filterFunc{} duplicate := map[string]string{} for _, f := range options.Filters { @@ -184,7 +180,12 @@ func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOp filter = filterManifest(ctx, manifest) case "reference": - filter = filterReferences(r, value) + if negate { + unwantedReferenceMatches = append(unwantedReferenceMatches, value) + } else { + wantedReferenceMatches = append(wantedReferenceMatches, value) + } + continue case "until": until, err := r.until(value) @@ -202,6 +203,11 @@ func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOp filters[key] = append(filters[key], filter) } + // reference filters is a special case as it does an OR for positive matches + // and an AND logic for negative matches + filter := filterReferences(r, wantedReferenceMatches, unwantedReferenceMatches) + filters["reference"] = append(filters["reference"], filter) + return filters, nil } @@ -221,7 +227,7 @@ func (r *Runtime) containers(duplicate map[string]string, key, value string, ext case "false", "true": case "external": if externalFunc == nil { - return fmt.Errorf("libimage error: external containers filter without callback") + return errors.New("libimage error: external containers filter without callback") } default: return fmt.Errorf("unsupported value %q for containers filter", value) @@ -273,55 +279,97 @@ func filterManifest(ctx context.Context, value bool) filterFunc { } } -// filterReferences creates a reference filter for matching the specified value. -func filterReferences(r *Runtime, value string) filterFunc { - lookedUp, _, _ := r.LookupImage(value, nil) +// filterReferences creates a reference filter for matching the specified wantedReferenceMatches value (OR logic) +// and for matching the unwantedReferenceMatches values (AND logic) +func filterReferences(r *Runtime, wantedReferenceMatches, unwantedReferenceMatches []string) filterFunc { return func(img *Image) (bool, error) { - if lookedUp != nil { - if lookedUp.ID() == img.ID() { + // Empty reference filters, return true + if len(wantedReferenceMatches) == 0 && len(unwantedReferenceMatches) == 0 { + return true, nil + } + + unwantedMatched := false + // Go through the unwanted matches first + for _, value := range unwantedReferenceMatches { + matches, err := imageMatchesReferenceFilter(r, img, value) + if err != nil { + return false, err + } + if matches { + unwantedMatched = true + } + } + + // If there are no wanted match filters, then return false for the image + // that matched the unwanted value otherwise return true + if len(wantedReferenceMatches) == 0 { + return !unwantedMatched, nil + } + + // Go through the wanted matches + // If an image matches the wanted filter but it also matches the unwanted + // filter, don't add it to the output + for _, value := range wantedReferenceMatches { + matches, err := imageMatchesReferenceFilter(r, img, value) + if err != nil { + return false, err + } + if matches && !unwantedMatched { return true, nil } } - refs, err := img.NamesReferences() - if err != nil { - return false, err + return false, nil + } +} + +// imageMatchesReferenceFilter returns true if an image matches the filter value given +func imageMatchesReferenceFilter(r *Runtime, img *Image, value string) (bool, error) { + lookedUp, _, _ := r.LookupImage(value, nil) + if lookedUp != nil { + if lookedUp.ID() == img.ID() { + return true, nil } + } - for _, ref := range refs { - refString := ref.String() // FQN with tag/digest - candidates := []string{refString} + refs, err := img.NamesReferences() + if err != nil { + return false, err + } - // Split the reference into 3 components (twice if digested/tagged): - // 1) Fully-qualified reference - // 2) Without domain - // 3) Without domain and path - if named, isNamed := ref.(reference.Named); isNamed { + for _, ref := range refs { + refString := ref.String() // FQN with tag/digest + candidates := []string{refString} + + // Split the reference into 3 components (twice if digested/tagged): + // 1) Fully-qualified reference + // 2) Without domain + // 3) Without domain and path + if named, isNamed := ref.(reference.Named); isNamed { + candidates = append(candidates, + reference.Path(named), // path/name without tag/digest (Path() removes it) + refString[strings.LastIndex(refString, "/")+1:]) // name with tag/digest + + trimmedString := reference.TrimNamed(named).String() + if refString != trimmedString { + tagOrDigest := refString[len(trimmedString):] candidates = append(candidates, - reference.Path(named), // path/name without tag/digest (Path() removes it) - refString[strings.LastIndex(refString, "/")+1:]) // name with tag/digest - - trimmedString := reference.TrimNamed(named).String() - if refString != trimmedString { - tagOrDigest := refString[len(trimmedString):] - candidates = append(candidates, - trimmedString, // FQN without tag/digest - reference.Path(named)+tagOrDigest, // path/name with tag/digest - trimmedString[strings.LastIndex(trimmedString, "/")+1:]) // name without tag/digest - } + trimmedString, // FQN without tag/digest + reference.Path(named)+tagOrDigest, // path/name with tag/digest + trimmedString[strings.LastIndex(trimmedString, "/")+1:]) // name without tag/digest } + } - for _, candidate := range candidates { - // path.Match() is also used by Docker's reference.FamiliarMatch(). - matched, _ := path.Match(value, candidate) - if matched { - return true, nil - } + for _, candidate := range candidates { + // path.Match() is also used by Docker's reference.FamiliarMatch(). + matched, _ := path.Match(value, candidate) + if matched { + return true, nil } } - - return false, nil } + + return false, nil } // filterLabel creates a label for matching the specified value. diff --git a/vendor/github.com/containers/common/libimage/history.go b/vendor/github.com/containers/common/libimage/history.go index ccd810962..56f84e37a 100644 --- a/vendor/github.com/containers/common/libimage/history.go +++ b/vendor/github.com/containers/common/libimage/history.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libimage @@ -26,7 +25,7 @@ func (i *Image) History(ctx context.Context) ([]ImageHistory, error) { return nil, err } - layerTree, err := i.runtime.layerTree(nil) + layerTree, err := i.runtime.layerTree(ctx, nil) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/common/libimage/image.go b/vendor/github.com/containers/common/libimage/image.go index 4d106d42f..9cc77cdb2 100644 --- a/vendor/github.com/containers/common/libimage/image.go +++ b/vendor/github.com/containers/common/libimage/image.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libimage @@ -67,7 +66,7 @@ type Image struct { } } -// reload the image and pessimitically clear all cached data. +// reload the image and pessimistically clear all cached data. func (i *Image) reload() error { logrus.Tracef("Reloading image %s", i.ID()) img, err := i.runtime.store.Image(i.ID()) @@ -85,7 +84,7 @@ func (i *Image) reload() error { } // isCorrupted returns an error if the image may be corrupted. -func (i *Image) isCorrupted(name string) error { +func (i *Image) isCorrupted(ctx context.Context, name string) error { // If it's a manifest list, we're good for now. if _, err := i.getManifestList(); err == nil { return nil @@ -96,7 +95,7 @@ func (i *Image) isCorrupted(name string) error { return err } - img, err := ref.NewImage(context.Background(), nil) + img, err := ref.NewImage(ctx, nil) if err != nil { if name == "" { name = i.ID()[:12] @@ -258,7 +257,7 @@ func (i *Image) TopLayer() string { // Parent returns the parent image or nil if there is none func (i *Image) Parent(ctx context.Context) (*Image, error) { - tree, err := i.runtime.layerTree(nil) + tree, err := i.runtime.layerTree(ctx, nil) if err != nil { return nil, err } @@ -292,7 +291,7 @@ func (i *Image) Children(ctx context.Context) ([]*Image, error) { // created for this invocation only. func (i *Image) getChildren(ctx context.Context, all bool, tree *layerTree) ([]*Image, error) { if tree == nil { - t, err := i.runtime.layerTree(nil) + t, err := i.runtime.layerTree(ctx, nil) if err != nil { return nil, err } @@ -611,7 +610,7 @@ func (i *Image) Untag(name string) error { } // FIXME: this is breaking Podman CI but must be re-enabled once - // c/storage supports alterting the digests of an image. Then, + // c/storage supports altering the digests of an image. Then, // Podman will do the right thing. // // !!! Also make sure to re-enable the tests !!! @@ -1031,7 +1030,7 @@ func getImageID(ctx context.Context, src types.ImageReference, sys *types.System // - 2) a bool indicating whether architecture, os or variant were set (some callers need that to decide whether they need to throw an error) // - 3) a fatal error that occurred prior to check for matches (e.g., storage errors etc.) func (i *Image) matchesPlatform(ctx context.Context, os, arch, variant string) (error, bool, error) { - if err := i.isCorrupted(""); err != nil { + if err := i.isCorrupted(ctx, ""); err != nil { return err, false, nil } inspectInfo, err := i.inspectInfo(ctx) diff --git a/vendor/github.com/containers/common/libimage/image_config.go b/vendor/github.com/containers/common/libimage/image_config.go index 9f5841fe1..cd4ed3c4e 100644 --- a/vendor/github.com/containers/common/libimage/image_config.go +++ b/vendor/github.com/containers/common/libimage/image_config.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libimage diff --git a/vendor/github.com/containers/common/libimage/image_tree.go b/vendor/github.com/containers/common/libimage/image_tree.go index 8143d3779..8b9d1f4c4 100644 --- a/vendor/github.com/containers/common/libimage/image_tree.go +++ b/vendor/github.com/containers/common/libimage/image_tree.go @@ -1,9 +1,9 @@ //go:build !remote -// +build !remote package libimage import ( + "context" "fmt" "strings" @@ -38,7 +38,7 @@ func (i *Image) Tree(traverseChildren bool) (string, error) { fmt.Fprintf(sb, "No Image Layers") } - layerTree, err := i.runtime.layerTree(nil) + layerTree, err := i.runtime.layerTree(context.Background(), nil) if err != nil { return "", err } @@ -53,7 +53,7 @@ func (i *Image) Tree(traverseChildren bool) (string, error) { return tree.Print(), nil } - // Walk all layers of the image and assemlbe their data. Note that the + // Walk all layers of the image and assemble their data. Note that the // tree is constructed in reverse order to remain backwards compatible // with Podman. contents := []string{} diff --git a/vendor/github.com/containers/common/libimage/import.go b/vendor/github.com/containers/common/libimage/import.go index 5519f02ba..552c48eae 100644 --- a/vendor/github.com/containers/common/libimage/import.go +++ b/vendor/github.com/containers/common/libimage/import.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libimage diff --git a/vendor/github.com/containers/common/libimage/inspect.go b/vendor/github.com/containers/common/libimage/inspect.go index 1003b6483..0db94708d 100644 --- a/vendor/github.com/containers/common/libimage/inspect.go +++ b/vendor/github.com/containers/common/libimage/inspect.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libimage @@ -180,22 +179,26 @@ func (i *Image) Inspect(ctx context.Context, options *InspectOptions) (*ImageDat } // Docker image - case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema2MediaType: + case manifest.DockerV2Schema2MediaType: rawConfig, err := i.rawConfigBlob(ctx) if err != nil { return nil, err } - var dockerManifest manifest.Schema2V1Image - if err := json.Unmarshal(rawConfig, &dockerManifest); err != nil { + var dockerConfig manifest.Schema2V1Image + if err := json.Unmarshal(rawConfig, &dockerConfig); err != nil { return nil, err } - data.Comment = dockerManifest.Comment + data.Comment = dockerConfig.Comment // NOTE: Health checks may be listed in the container config or // the config. - data.HealthCheck = dockerManifest.ContainerConfig.Healthcheck - if data.HealthCheck == nil && dockerManifest.Config != nil { - data.HealthCheck = dockerManifest.Config.Healthcheck + data.HealthCheck = dockerConfig.ContainerConfig.Healthcheck + if data.HealthCheck == nil && dockerConfig.Config != nil { + data.HealthCheck = dockerConfig.Config.Healthcheck } + + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType: + // There seem to be at least _some_ images with .Healthcheck set in schema1 (possibly just as an artifact + // of testing format conversion?), so this could plausibly read these values. } if data.Annotations == nil { diff --git a/vendor/github.com/containers/common/libimage/layer_tree.go b/vendor/github.com/containers/common/libimage/layer_tree.go index 71eafb0e7..66829c46b 100644 --- a/vendor/github.com/containers/common/libimage/layer_tree.go +++ b/vendor/github.com/containers/common/libimage/layer_tree.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libimage @@ -92,14 +91,14 @@ func (l *layerNode) repoTags() ([]string, error) { // layerTree extracts a layerTree from the layers in the local storage and // relates them to the specified images. -func (r *Runtime) layerTree(images []*Image) (*layerTree, error) { +func (r *Runtime) layerTree(ctx context.Context, images []*Image) (*layerTree, error) { layers, err := r.store.Layers() if err != nil { return nil, err } if images == nil { - images, err = r.ListImages(context.Background(), nil, nil) + images, err = r.ListImages(ctx, nil, nil) if err != nil { return nil, err } @@ -150,7 +149,9 @@ func (t *layerTree) layersOf(image *Image) []*storage.Layer { var layers []*storage.Layer node := t.node(image.TopLayer()) for node != nil { - layers = append(layers, node.layer) + if node.layer != nil { + layers = append(layers, node.layer) + } node = node.parent } return layers diff --git a/vendor/github.com/containers/common/libimage/load.go b/vendor/github.com/containers/common/libimage/load.go index 36283a99b..2be6a1b64 100644 --- a/vendor/github.com/containers/common/libimage/load.go +++ b/vendor/github.com/containers/common/libimage/load.go @@ -1,10 +1,10 @@ //go:build !remote -// +build !remote package libimage import ( "context" + "errors" "fmt" "os" "time" @@ -92,7 +92,7 @@ func (r *Runtime) Load(ctx context.Context, path string, options *LoadOptions) ( // Give a decent error message if nothing above worked. // we want the colon here for the multiline error //nolint:revive - loadError := fmt.Errorf("payload does not match any of the supported image formats:") + loadError := errors.New("payload does not match any of the supported image formats:") for _, err := range loadErrors { loadError = fmt.Errorf("%v\n * %v", loadError, err) } diff --git a/vendor/github.com/containers/common/libimage/manifest_list.go b/vendor/github.com/containers/common/libimage/manifest_list.go index c36bfda96..d7ee5e6b6 100644 --- a/vendor/github.com/containers/common/libimage/manifest_list.go +++ b/vendor/github.com/containers/common/libimage/manifest_list.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libimage @@ -19,6 +18,9 @@ import ( "github.com/containers/storage" structcopier "github.com/jinzhu/copier" "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" ) // NOTE: the abstractions and APIs here are a first step to further merge @@ -222,22 +224,78 @@ func (i *Image) IsManifestList(ctx context.Context) (bool, error) { // Inspect returns a dockerized version of the manifest list. func (m *ManifestList) Inspect() (*define.ManifestListData, error) { inspectList := define.ManifestListData{} + // Copy the fields from the Docker-format version of the list. dockerFormat := m.list.Docker() err := structcopier.Copy(&inspectList, &dockerFormat) if err != nil { return &inspectList, err } - // Get missing annotation field from OCIv1 Spec - // and populate inspect data. + // Get OCI-specific fields from the OCIv1-format version of the list + // and copy them to the inspect data. ociFormat := m.list.OCIv1() + inspectList.ArtifactType = ociFormat.ArtifactType inspectList.Annotations = ociFormat.Annotations for i, manifest := range ociFormat.Manifests { inspectList.Manifests[i].Annotations = manifest.Annotations + inspectList.Manifests[i].ArtifactType = manifest.ArtifactType + if manifest.URLs != nil { + inspectList.Manifests[i].URLs = slices.Clone(manifest.URLs) + } + inspectList.Manifests[i].Data = manifest.Data + inspectList.Manifests[i].Files, err = m.list.Files(manifest.Digest) + if err != nil { + return &inspectList, err + } + } + if ociFormat.Subject != nil { + platform := ociFormat.Subject.Platform + if platform == nil { + platform = &imgspecv1.Platform{} + } + var osFeatures []string + if platform.OSFeatures != nil { + osFeatures = slices.Clone(platform.OSFeatures) + } + inspectList.Subject = &define.ManifestListDescriptor{ + Platform: manifest.Schema2PlatformSpec{ + OS: platform.OS, + Architecture: platform.Architecture, + OSVersion: platform.OSVersion, + Variant: platform.Variant, + OSFeatures: osFeatures, + }, + Schema2Descriptor: manifest.Schema2Descriptor{ + MediaType: ociFormat.Subject.MediaType, + Digest: ociFormat.Subject.Digest, + Size: ociFormat.Subject.Size, + URLs: ociFormat.Subject.URLs, + }, + Annotations: ociFormat.Subject.Annotations, + ArtifactType: ociFormat.Subject.ArtifactType, + Data: ociFormat.Subject.Data, + } + } + // Set MediaType to mirror the value we'd use when saving the list + // using defaults, instead of forcing it to one or the other by + // using the value from one version or the other that we explicitly + // requested above. + serialized, err := m.list.Serialize("") + if err != nil { + return &inspectList, err + } + var typed struct { + MediaType string `json:"mediaType,omitempty"` + } + if err := json.Unmarshal(serialized, &typed); err != nil { + return &inspectList, err + } + if typed.MediaType != "" { + inspectList.MediaType = typed.MediaType } return &inspectList, nil } -// Options for adding a manifest list. +// Options for adding an image or artifact to a manifest list. type ManifestListAddOptions struct { // Add all images to the list if the to-be-added image itself is a // manifest list. @@ -314,7 +372,105 @@ func (m *ManifestList) Add(ctx context.Context, name string, options *ManifestLi return newDigest, nil } -// Options for annotationg a manifest list. +// Options for creating an artifact manifest for one or more files and adding +// the artifact manifest to a manifest list. +type ManifestListAddArtifactOptions struct { + // The artifactType to set in the artifact manifest. + Type *string `json:"artifact_type"` + // The mediaType to set in the config.MediaType field in the artifact manifest. + ConfigType string `json:"artifact_config_type"` + // Content to point to from the config field in the artifact manifest. + Config string `json:"artifact_config"` + // The mediaType to set in the layer descriptors in the artifact manifest. + LayerType string `json:"artifact_layer_type"` + // Whether or not to suppress the org.opencontainers.image.title annotation in layer descriptors. + ExcludeTitles bool `json:"exclude_layer_titles"` + // Annotations to set in the artifact manifest. + Annotations map[string]string `json:"annotations"` + // Subject to set in the artifact manifest. + Subject string `json:"subject"` +} + +// Add adds one or more manifests to the manifest list and returns the digest +// of the added instance. +func (m *ManifestList) AddArtifact(ctx context.Context, options *ManifestListAddArtifactOptions, files ...string) (digest.Digest, error) { + if options == nil { + options = &ManifestListAddArtifactOptions{} + } + opts := manifests.AddArtifactOptions{ + ManifestArtifactType: options.Type, + Annotations: maps.Clone(options.Annotations), + ExcludeTitles: options.ExcludeTitles, + } + if options.ConfigType != "" { + opts.ConfigDescriptor = &imgspecv1.Descriptor{ + MediaType: options.ConfigType, + Digest: imgspecv1.DescriptorEmptyJSON.Digest, + Size: imgspecv1.DescriptorEmptyJSON.Size, + Data: slices.Clone(imgspecv1.DescriptorEmptyJSON.Data), + } + } + if options.Config != "" { + if opts.ConfigDescriptor == nil { + opts.ConfigDescriptor = &imgspecv1.Descriptor{ + MediaType: imgspecv1.MediaTypeImageConfig, + } + } + opts.ConfigDescriptor.Digest = digest.FromString(options.Config) + opts.ConfigDescriptor.Size = int64(len(options.Config)) + opts.ConfigDescriptor.Data = slices.Clone([]byte(options.Config)) + } + if opts.ConfigDescriptor == nil { + empty := imgspecv1.DescriptorEmptyJSON + opts.ConfigDescriptor = &empty + } + if options.LayerType != "" { + opts.LayerMediaType = &options.LayerType + } + if options.Subject != "" { + ref, err := alltransports.ParseImageName(options.Subject) + if err != nil { + withDocker := fmt.Sprintf("%s://%s", docker.Transport.Name(), options.Subject) + ref, err = alltransports.ParseImageName(withDocker) + if err != nil { + image, _, err := m.image.runtime.LookupImage(options.Subject, &LookupImageOptions{ManifestList: true}) + if err != nil { + return "", fmt.Errorf("locating subject for artifact manifest: %w", err) + } + ref = image.storageReference + } + } + opts.SubjectReference = ref + } + + // Lock the image record where this list lives. + locker, err := manifests.LockerForImage(m.image.runtime.store, m.ID()) + if err != nil { + return "", err + } + locker.Lock() + defer locker.Unlock() + + systemContext := m.image.runtime.systemContextCopy() + + // Make sure to reload the image from the containers storage to fetch + // the latest data (e.g., new or delete digests). + if err := m.reload(); err != nil { + return "", err + } + newDigest, err := m.list.AddArtifact(ctx, systemContext, opts, files...) + if err != nil { + return "", err + } + + // Write the changes to disk. + if err := m.saveAndReload(); err != nil { + return "", err + } + return newDigest, nil +} + +// Options for annotating a manifest list. type ManifestListAnnotateOptions struct { // Add the specified annotations to the added image. Annotations map[string]string @@ -330,10 +486,16 @@ type ManifestListAnnotateOptions struct { OSVersion string // Add the specified variant to the added image. Variant string + // Add the specified annotations to the index itself. + IndexAnnotations map[string]string + // Set the subject to which the index refers. + Subject string } // Annotate an image instance specified by `d` in the manifest list. func (m *ManifestList) AnnotateInstance(d digest.Digest, options *ManifestListAnnotateOptions) error { + ctx := context.Background() + if options == nil { return nil } @@ -373,6 +535,54 @@ func (m *ManifestList) AnnotateInstance(d digest.Digest, options *ManifestListAn return err } } + if len(options.IndexAnnotations) > 0 { + if err := m.list.SetAnnotations(nil, options.IndexAnnotations); err != nil { + return err + } + } + if options.Subject != "" { + ref, err := alltransports.ParseImageName(options.Subject) + if err != nil { + withDocker := fmt.Sprintf("%s://%s", docker.Transport.Name(), options.Subject) + ref, err = alltransports.ParseImageName(withDocker) + if err != nil { + image, _, err := m.image.runtime.LookupImage(options.Subject, &LookupImageOptions{ManifestList: true}) + if err != nil { + return fmt.Errorf("locating subject for image index: %w", err) + } + ref = image.storageReference + } + } + src, err := ref.NewImageSource(ctx, &m.image.runtime.systemContext) + if err != nil { + return err + } + defer src.Close() + subjectManifestBytes, subjectManifestType, err := src.GetManifest(ctx, nil) + if err != nil { + return err + } + subjectManifestDigest, err := manifest.Digest(subjectManifestBytes) + if err != nil { + return err + } + var subjectArtifactType string + if !manifest.MIMETypeIsMultiImage(subjectManifestType) { + var subjectManifest imgspecv1.Manifest + if json.Unmarshal(subjectManifestBytes, &subjectManifest) == nil { + subjectArtifactType = subjectManifest.ArtifactType + } + } + descriptor := &imgspecv1.Descriptor{ + MediaType: subjectManifestType, + ArtifactType: subjectArtifactType, + Digest: subjectManifestDigest, + Size: int64(len(subjectManifestBytes)), + } + if err := m.list.SetSubject(descriptor); err != nil { + return err + } + } // Write the changes to disk. return m.saveAndReload() diff --git a/vendor/github.com/containers/common/libimage/manifests/manifests.go b/vendor/github.com/containers/common/libimage/manifests/manifests.go index d28ac87bb..78349b503 100644 --- a/vendor/github.com/containers/common/libimage/manifests/manifests.go +++ b/vendor/github.com/containers/common/libimage/manifests/manifests.go @@ -1,13 +1,21 @@ package manifests import ( + "bytes" "context" "encoding/json" "errors" "fmt" "io" + "mime" + "net/http" + "os" + "path/filepath" + "strconv" + "strings" "time" + "github.com/containers/common/internal" "github.com/containers/common/pkg/manifests" "github.com/containers/common/pkg/retry" "github.com/containers/common/pkg/supplemented" @@ -15,6 +23,7 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/image" "github.com/containers/image/v5/manifest" + ocilayout "github.com/containers/image/v5/oci/layout" "github.com/containers/image/v5/pkg/compression" "github.com/containers/image/v5/signature" "github.com/containers/image/v5/signature/signer" @@ -23,17 +32,25 @@ import ( "github.com/containers/image/v5/transports/alltransports" "github.com/containers/image/v5/types" "github.com/containers/storage" + "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/lockfile" digest "github.com/opencontainers/go-digest" + imgspec "github.com/opencontainers/image-spec/specs-go" v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" ) const ( defaultMaxRetries = 3 ) -const instancesData = "instances.json" +const ( + instancesData = "instances.json" + artifactsData = "artifacts.json" + pushingArtifactsSubdirectory = "referenced-artifacts" +) // LookupReferenceFunc return an image reference based on the specified one. // The returned reference can return custom ImageSource or ImageDestination @@ -45,9 +62,19 @@ type LookupReferenceFunc func(ref types.ImageReference) (types.ImageReference, e // for a List that has not yet been saved to an image. var ErrListImageUnknown = errors.New("unable to determine which image holds the manifest list") +type artifactsDetails struct { + Manifests map[digest.Digest]string `json:"manifests,omitempty"` // artifact (and other?) manifest digests → manifest contents + Files map[digest.Digest][]string `json:"files,omitempty"` // artifact (and other?) manifest digests → file paths (mainly for display) + Configs map[digest.Digest]digest.Digest `json:"config,omitempty"` // artifact (and other?) manifest digests → referenced config digests + Layers map[digest.Digest][]digest.Digest `json:"layers,omitempty"` // artifact (and other?) manifest digests → referenced layer digests + Detached map[digest.Digest]string `json:"detached,omitempty"` // "config" and "layer" (and other?) digests in (usually artifact) manifests → file paths + Blobs map[digest.Digest][]byte `json:"blobs,omitempty"` // "config" and "layer" (and other?) manifest digests → inlined blob contents +} + type list struct { manifests.List - instances map[digest.Digest]string + instances map[digest.Digest]string // instance manifest digests → image references + artifacts artifactsDetails } // List is a manifest list or image index, either created using Create(), or @@ -58,6 +85,9 @@ type List interface { Reference(store storage.Store, multiple cp.ImageListSelection, instances []digest.Digest) (types.ImageReference, error) Push(ctx context.Context, dest types.ImageReference, options PushOptions) (reference.Canonical, digest.Digest, error) Add(ctx context.Context, sys *types.SystemContext, ref types.ImageReference, all bool) (digest.Digest, error) + AddArtifact(ctx context.Context, sys *types.SystemContext, options AddArtifactOptions, files ...string) (digest.Digest, error) + InstanceByFile(file string) (digest.Digest, error) + Files(instanceDigest digest.Digest) ([]string, error) } // PushOptions includes various settings which are needed for pushing the @@ -93,6 +123,14 @@ func Create() List { return &list{ List: manifests.Create(), instances: make(map[digest.Digest]string), + artifacts: artifactsDetails{ + Manifests: make(map[digest.Digest]string), + Files: make(map[digest.Digest][]string), + Configs: make(map[digest.Digest]digest.Digest), + Layers: make(map[digest.Digest][]digest.Digest), + Detached: make(map[digest.Digest]string), + Blobs: make(map[digest.Digest][]byte), + }, } } @@ -115,6 +153,14 @@ func LoadFromImage(store storage.Store, image string) (string, List, error) { list := &list{ List: manifestList, instances: make(map[digest.Digest]string), + artifacts: artifactsDetails{ + Manifests: make(map[digest.Digest]string), + Files: make(map[digest.Digest][]string), + Configs: make(map[digest.Digest]digest.Digest), + Layers: make(map[digest.Digest][]digest.Digest), + Detached: make(map[digest.Digest]string), + Blobs: make(map[digest.Digest][]byte), + }, } instancesBytes, err := store.ImageBigData(img.ID, instancesData) if err != nil { @@ -123,8 +169,18 @@ func LoadFromImage(store storage.Store, image string) (string, List, error) { if err := json.Unmarshal(instancesBytes, &list.instances); err != nil { return "", nil, fmt.Errorf("decoding instance list for image %q: %w", image, err) } + artifactsBytes, err := store.ImageBigData(img.ID, artifactsData) + if err != nil { + if !errors.Is(err, os.ErrNotExist) { + return "", nil, fmt.Errorf("locating image %q for loading instance list: %w", image, err) + } + artifactsBytes = []byte("{}") + } + if err := json.Unmarshal(artifactsBytes, &list.artifacts); err != nil { + return "", nil, fmt.Errorf("decoding artifact list for image %q: %w", image, err) + } list.instances[""] = img.ID - return img.ID, list, err + return img.ID, list, nil } // SaveToImage saves the manifest list or image index as the manifest of an @@ -140,41 +196,79 @@ func (l *list) SaveToImage(store storage.Store, imageID string, names []string, if err != nil { return "", err } - img, err := store.CreateImage(imageID, names, "", "", &storage.ImageOptions{}) - if err == nil || errors.Is(err, storage.ErrDuplicateID) { - created := (err == nil) - if created { - imageID = img.ID - l.instances[""] = img.ID - } - err := store.SetImageBigData(imageID, storage.ImageDigestManifestBigDataNamePrefix, manifestBytes, manifest.Digest) - if err != nil { - if created { - if _, err2 := store.DeleteImage(img.ID, true); err2 != nil { - logrus.Errorf("Deleting image %q after failing to save manifest for it", img.ID) + artifactsBytes, err := json.Marshal(&l.artifacts) + if err != nil { + return "", err + } + manifestDigest, err := manifest.Digest(manifestBytes) + if err != nil { + return "", err + } + imageOptions := &storage.ImageOptions{ + BigData: []storage.ImageBigDataOption{ + {Key: storage.ImageDigestManifestBigDataNamePrefix, Data: manifestBytes, Digest: manifestDigest}, + {Key: instancesData, Data: instancesBytes}, + {Key: artifactsData, Data: artifactsBytes}, + }, + } + img, err := store.CreateImage(imageID, names, "", "", imageOptions) + if err != nil { + if imageID != "" && errors.Is(err, storage.ErrDuplicateID) { + for _, bd := range imageOptions.BigData { + digester := manifest.Digest + if !strings.HasPrefix(bd.Key, storage.ImageDigestManifestBigDataNamePrefix) { + digester = nil + } + err := store.SetImageBigData(imageID, bd.Key, bd.Data, digester) + if err != nil { + return "", fmt.Errorf("saving manifest list to image %q: %w", imageID, err) } } - return "", fmt.Errorf("saving manifest list to image %q: %w", imageID, err) + return imageID, nil } - err = store.SetImageBigData(imageID, instancesData, instancesBytes, nil) - if err != nil { - if created { - if _, err2 := store.DeleteImage(img.ID, true); err2 != nil { - logrus.Errorf("Deleting image %q after failing to save instance locations for it", img.ID) - } + return "", err + } + l.instances[""] = img.ID + return img.ID, nil +} + +// Files returns the list of files associated with a particular artifact +// instance in the image index, primarily for display purposes. +func (l *list) Files(instanceDigest digest.Digest) ([]string, error) { + filesList, ok := l.artifacts.Files[instanceDigest] + if ok { + return slices.Clone(filesList), nil + } + return nil, nil +} + +// instanceByFile returns the instanceDigest of the first manifest in the index +// which refers to the named file. The name will be passed to filepath.Abs() +// before searching for an instance which references it. +func (l *list) InstanceByFile(file string) (digest.Digest, error) { + if parsedDigest, err := digest.Parse(file); err == nil { + // nice try, but that's already a digest! + return parsedDigest, nil + } + abs, err := filepath.Abs(file) + if err != nil { + return "", err + } + for instanceDigest, files := range l.artifacts.Files { + for _, file := range files { + if file == abs { + return instanceDigest, nil } - return "", fmt.Errorf("saving instance list to image %q: %w", imageID, err) } - return imageID, nil } - return "", fmt.Errorf("creating image to hold manifest list: %w", err) + return "", os.ErrNotExist } // Reference returns an image reference for the composite image being built // in the list, or an error if the list has never been saved to a local image. func (l *list) Reference(store storage.Store, multiple cp.ImageListSelection, instances []digest.Digest) (types.ImageReference, error) { if l.instances[""] == "" { - return nil, fmt.Errorf("building reference to list: %w", ErrListImageUnknown) + return nil, fmt.Errorf("building reference to list, appears to have not been saved first: %w", ErrListImageUnknown) } s, err := is.Transport.ParseStoreReference(store, l.instances[""]) if err != nil { @@ -191,11 +285,100 @@ func (l *list) Reference(store storage.Store, multiple cp.ImageListSelection, in } case cp.CopySpecificImages: for instance := range l.instances { - for _, allowed := range instances { - if instance == allowed { - whichInstances = append(whichInstances, instance) + if slices.Contains(instances, instance) { + whichInstances = append(whichInstances, instance) + } + } + } + if len(l.artifacts.Manifests) > 0 { + img, err := is.Transport.GetImage(s) + if err != nil { + return nil, fmt.Errorf("locating image %s: %w", transports.ImageName(s), err) + } + imgDirectory, err := store.ImageDirectory(img.ID) + if err != nil { + return nil, fmt.Errorf("locating per-image directory for %s: %w", img.ID, err) + } + tmp, err := os.MkdirTemp(imgDirectory, pushingArtifactsSubdirectory) + if err != nil { + return nil, err + } + subdir := 0 + for artifactManifestDigest, contents := range l.artifacts.Manifests { + // create the blobs directory + subdir++ + tmp := filepath.Join(tmp, strconv.Itoa(subdir)) + blobsDir := filepath.Join(tmp, "blobs", artifactManifestDigest.Algorithm().String()) + if err := os.MkdirAll(blobsDir, 0o700); err != nil { + return nil, fmt.Errorf("creating directory for blobs: %w", err) + } + // write the artifact manifest + if err := os.WriteFile(filepath.Join(blobsDir, artifactManifestDigest.Encoded()), []byte(contents), 0o644); err != nil { + return nil, fmt.Errorf("writing artifact manifest as blob: %w", err) + } + // symlink all of the referenced files and write the inlined blobs into the blobs directory + var referencedBlobDigests []digest.Digest + var symlinkedFiles []string + if referencedConfigDigest, ok := l.artifacts.Configs[artifactManifestDigest]; ok { + referencedBlobDigests = append(referencedBlobDigests, referencedConfigDigest) + } + referencedBlobDigests = append(referencedBlobDigests, l.artifacts.Layers[artifactManifestDigest]...) + for _, referencedBlobDigest := range referencedBlobDigests { + referencedFile, knownFile := l.artifacts.Detached[referencedBlobDigest] + referencedBlob, knownBlob := l.artifacts.Blobs[referencedBlobDigest] + if !knownFile && !knownBlob { + return nil, fmt.Errorf(`internal error: no file or blob with artifact "config" or "layer" digest %q recorded`, referencedBlobDigest) + } + expectedLayerBlobPath := filepath.Join(blobsDir, referencedBlobDigest.Encoded()) + if _, err := os.Lstat(expectedLayerBlobPath); err == nil { + // did this one already + continue + } else if knownFile { + if err := os.Symlink(referencedFile, expectedLayerBlobPath); err != nil { + return nil, err + } + symlinkedFiles = append(symlinkedFiles, referencedFile) + } else if knownBlob { + if err := os.WriteFile(expectedLayerBlobPath, referencedBlob, 0o600); err != nil { + return nil, err + } } } + // write the index that refers to this one artifact image + tag := "latest" + indexFile := filepath.Join(tmp, "index.json") + index := v1.Index{ + Versioned: imgspec.Versioned{ + SchemaVersion: 2, + }, + MediaType: v1.MediaTypeImageIndex, + Manifests: []v1.Descriptor{{ + MediaType: v1.MediaTypeImageManifest, + Digest: artifactManifestDigest, + Size: int64(len(contents)), + Annotations: map[string]string{ + v1.AnnotationRefName: tag, + }, + }}, + } + indexBytes, err := json.Marshal(&index) + if err != nil { + return nil, fmt.Errorf("encoding image index for OCI layout: %w", err) + } + if err := os.WriteFile(indexFile, indexBytes, 0o644); err != nil { + return nil, fmt.Errorf("writing image index for OCI layout: %w", err) + } + // write the layout file + layoutFile := filepath.Join(tmp, "oci-layout") + if err := os.WriteFile(layoutFile, []byte(`{"imageLayoutVersion": "1.0.0"}`), 0o644); err != nil { + return nil, fmt.Errorf("writing oci-layout file: %w", err) + } + // build the reference to this artifact image's oci layout + ref, err := ocilayout.NewReference(tmp, tag) + if err != nil { + return nil, fmt.Errorf("creating ImageReference for artifact with files %q: %w", symlinkedFiles, err) + } + references = append(references, ref) } } for _, instance := range whichInstances { @@ -331,6 +514,8 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag OS, Architecture, OSVersion, Variant string Features, OSFeatures, Annotations []string Size int64 + ConfigInfo types.BlobInfo + ArtifactType string } var instanceInfos []instanceInfo var manifestDigest digest.Digest @@ -361,6 +546,7 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag Features: append([]string{}, lists.Docker().Manifests[i].Platform.Features...), OSFeatures: append([]string{}, platform.OSFeatures...), Size: instance.Size, + ArtifactType: instance.ArtifactType, } instanceInfos = append(instanceInfos, instanceInfo) } @@ -391,6 +577,7 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag Features: append([]string{}, lists.Docker().Manifests[i].Platform.Features...), OSFeatures: append([]string{}, platform.OSFeatures...), Size: instance.Size, + ArtifactType: instance.ArtifactType, } instanceInfos = append(instanceInfos, instanceInfo) added = true @@ -406,11 +593,28 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag instanceInfo := instanceInfo{ instanceDigest: nil, } + if primaryManifestType == v1.MediaTypeImageManifest { + if m, err := manifest.OCI1FromManifest(primaryManifestBytes); err == nil { + instanceInfo.ArtifactType = m.ArtifactType + } + } instanceInfos = append(instanceInfos, instanceInfo) } + knownConfigTypes := []string{manifest.DockerV2Schema2ConfigMediaType, v1.MediaTypeImageConfig} for _, instanceInfo := range instanceInfos { - if instanceInfo.OS == "" || instanceInfo.Architecture == "" { + manifestBytes, manifestType, err := src.GetManifest(ctx, instanceInfo.instanceDigest) + if err != nil { + return "", fmt.Errorf("reading manifest from %q, instance %q: %w", transports.ImageName(ref), instanceInfo.instanceDigest, err) + } + instanceManifest, err := manifest.FromBlob(manifestBytes, manifestType) + if err != nil { + return "", fmt.Errorf("parsing manifest from %q, instance %q: %w", transports.ImageName(ref), instanceInfo.instanceDigest, err) + } + instanceInfo.ConfigInfo = instanceManifest.ConfigInfo() + hasPlatformConfig := instanceInfo.ArtifactType == "" && slices.Contains(knownConfigTypes, instanceInfo.ConfigInfo.MediaType) + needToParsePlatformConfig := (instanceInfo.OS == "" || instanceInfo.Architecture == "") + if hasPlatformConfig && needToParsePlatformConfig { img, err := image.FromUnparsedImage(ctx, sys, image.UnparsedInstance(src, instanceInfo.instanceDigest)) if err != nil { return "", fmt.Errorf("reading configuration blob from %q: %w", transports.ImageName(ref), err) @@ -422,17 +626,15 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag if instanceInfo.OS == "" { instanceInfo.OS = config.OS instanceInfo.OSVersion = config.OSVersion - instanceInfo.OSFeatures = config.OSFeatures + if config.OSFeatures != nil { + instanceInfo.OSFeatures = slices.Clone(config.OSFeatures) + } } if instanceInfo.Architecture == "" { instanceInfo.Architecture = config.Architecture instanceInfo.Variant = config.Variant } } - manifestBytes, manifestType, err := src.GetManifest(ctx, instanceInfo.instanceDigest) - if err != nil { - return "", fmt.Errorf("reading manifest from %q, instance %q: %w", transports.ImageName(ref), instanceInfo.instanceDigest, err) - } if instanceInfo.instanceDigest == nil { manifestDigest, err = manifest.Digest(manifestBytes) if err != nil { @@ -455,6 +657,281 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag return manifestDigest, nil } +// AddArtifactOptions contains options which control the contents of the +// artifact manifest that AddArtifact will create and add to the image index. + +// This should provide for all of the ways to construct a manifest outlined in +// https://github.com/opencontainers/image-spec/blob/main/manifest.md#guidelines-for-artifact-usage +// * no blobs → set ManifestArtifactType +// * blobs, no configuration → set ManifestArtifactType and possibly LayerMediaType, and provide file names +// * blobs and configuration → set ManifestArtifactType, possibly LayerMediaType, and ConfigDescriptor, and provide file names +// +// The older style of describing artifacts: +// * leave ManifestArtifactType blank +// * specify a zero-length application/vnd.oci.image.config.v1+json config blob +// * set LayerMediaType to a custom type +// +// When reading data produced elsewhere, note that newer tooling will produce +// manifests with ArtifactType set. If the manifest's ArtifactType is not set, +// consumers should consult the config descriptor's MediaType. +type AddArtifactOptions struct { + ManifestArtifactType *string // overall type of the artifact manifest. default: "application/vnd.unknown.artifact.v1" + Platform v1.Platform // default: add to the index without platform information + ConfigDescriptor *v1.Descriptor // default: a descriptor for an explicitly empty config blob + ConfigFile string // path to config contents, recorded if ConfigDescriptor.Size != 0 and ConfigDescriptor.Data is not set + LayerMediaType *string // default: mime.TypeByExtension() if basename contains ".", else http.DetectContentType() + Annotations map[string]string // optional, default is none + SubjectReference types.ImageReference // optional + ExcludeTitles bool // don't add "org.opencontainers.image.title" annotations set to file base names +} + +// AddArtifact creates an artifact manifest describing the specified file or +// files, then adds them to the specified image index. Returns the +// instanceDigest for the artifact manifest. +// The caller could craft the manifest themselves and use Add() to add it to +// the image index and get the same end-result, but this should save them some +// work. +func (l *list) AddArtifact(ctx context.Context, sys *types.SystemContext, options AddArtifactOptions, files ...string) (digest.Digest, error) { + // If we were given a subject, build a descriptor for it first, since + // it might be remote, and anything else we do before looking at it + // might have to get thrown away if we can't get to it for whatever + // reason. + var subject *v1.Descriptor + if options.SubjectReference != nil { + subjectReference, err := options.SubjectReference.NewImageSource(ctx, sys) + if err != nil { + return "", fmt.Errorf("setting up to read manifest and configuration from subject %q: %w", transports.ImageName(options.SubjectReference), err) + } + defer subjectReference.Close() + subjectManifestBytes, subjectManifestType, err := subjectReference.GetManifest(ctx, nil) + if err != nil { + return "", fmt.Errorf("reading manifest from subject %q: %w", transports.ImageName(options.SubjectReference), err) + } + subjectManifestDigest, err := manifest.Digest(subjectManifestBytes) + if err != nil { + return "", fmt.Errorf("digesting manifest of subject %q: %w", transports.ImageName(options.SubjectReference), err) + } + var subjectArtifactType string + if !manifest.MIMETypeIsMultiImage(subjectManifestType) { + var subjectManifest v1.Manifest + if json.Unmarshal(subjectManifestBytes, &subjectManifest) == nil { + subjectArtifactType = subjectManifest.ArtifactType + } + } + subject = &v1.Descriptor{ + MediaType: subjectManifestType, + ArtifactType: subjectArtifactType, + Digest: subjectManifestDigest, + Size: int64(len(subjectManifestBytes)), + } + } + + // Build up the layers list piece by piece. + var layers []v1.Descriptor + fileDigests := make(map[string]digest.Digest) + + if len(files) == 0 { + // https://github.com/opencontainers/image-spec/blob/main/manifest.md#guidelines-for-artifact-usage + // says that we should have at least one layer listed, even if it's just a placeholder + layers = append(layers, v1.DescriptorEmptyJSON) + } + for _, file := range files { + if err := func() error { + // Open the file so that we can digest it. + absFile, err := filepath.Abs(file) + if err != nil { + return fmt.Errorf("converting %q to an absolute path: %w", file, err) + } + + f, err := os.Open(absFile) + if err != nil { + return fmt.Errorf("reading %q to determine its digest: %w", file, err) + } + defer f.Close() + + // Hang on to a copy of the first 512 bytes, but digest the whole thing. + digester := digest.Canonical.Digester() + writeCounter := ioutils.NewWriteCounter(digester.Hash()) + var detectableData bytes.Buffer + _, err = io.CopyN(writeCounter, io.TeeReader(f, &detectableData), 512) + if err != nil && !errors.Is(err, io.EOF) { + return fmt.Errorf("reading %q to determine its digest: %w", file, err) + } + if err == nil { + if _, err := io.Copy(writeCounter, f); err != nil { + return fmt.Errorf("reading %q to determine its digest: %w", file, err) + } + } + fileDigests[absFile] = digester.Digest() + + // If one wasn't specified, figure out what the MediaType should be. + title := filepath.Base(absFile) + layerMediaType := options.LayerMediaType + if layerMediaType == nil { + if index := strings.LastIndex(title, "."); index != -1 { + // File's basename has an extension, try to use a shortcut. + tmp := mime.TypeByExtension(title[index:]) + if tmp != "" { + layerMediaType = &tmp + } + } + if layerMediaType == nil { + // File's basename has no extension or didn't map to a type, look at the contents we saved. + tmp := http.DetectContentType(detectableData.Bytes()) + layerMediaType = &tmp + } + if layerMediaType != nil { + // Strip off any parameters, since we only want the type name. + if parsedMediaType, _, err := mime.ParseMediaType(*layerMediaType); err == nil { + layerMediaType = &parsedMediaType + } + } + } + + // Build the descriptor for the layer. + descriptor := v1.Descriptor{ + MediaType: *layerMediaType, + Digest: fileDigests[absFile], + Size: writeCounter.Count, + } + // OCI annotations are usually applied at the image manifest as a whole, + // but tools like oras (https://oras.land/) also apply them to blob + // descriptors. AnnotationTitle is used as a suggestion for the name + // to give to a blob if it's being stored as a file, and we default + // to adding one based on its original name. + if !options.ExcludeTitles { + descriptor.Annotations = map[string]string{ + v1.AnnotationTitle: title, + } + } + layers = append(layers, descriptor) + return nil + }(); err != nil { + return "", err + } + } + + // Unless we were told what this is, use the default that ORAS uses. + artifactType := "application/vnd.unknown.artifact.v1" + if options.ManifestArtifactType != nil { + artifactType = *options.ManifestArtifactType + } + + // Unless we were explicitly told otherwise, default to an empty config blob. + configDescriptor := internal.DeepCopyDescriptor(&v1.DescriptorEmptyJSON) + if options.ConfigDescriptor != nil { + configDescriptor = internal.DeepCopyDescriptor(options.ConfigDescriptor) + } + if options.ConfigFile != "" { + if options.ConfigDescriptor == nil { // i.e., we assigned the default mediatype + configDescriptor.MediaType = v1.MediaTypeImageConfig + } + configDescriptor.Data = nil + configDescriptor.Digest = "" // to be figured out below + configDescriptor.Size = -1 // to be figured out below + } + configFilePath := "" + if configDescriptor.Size != 0 { + if len(configDescriptor.Data) == 0 { + if options.ConfigFile == "" { + return "", errors.New("needed config data file, but none was provided") + } + filePath, err := filepath.Abs(options.ConfigFile) + if err != nil { + return "", fmt.Errorf("recording artifact config data file %q: %w", options.ConfigFile, err) + } + digester := digest.Canonical.Digester() + counter := ioutils.NewWriteCounter(digester.Hash()) + if err := func() error { + f, err := os.Open(filePath) + if err != nil { + return fmt.Errorf("reading artifact config data file %q: %w", options.ConfigFile, err) + } + defer f.Close() + if _, err := io.Copy(counter, f); err != nil { + return fmt.Errorf("digesting artifact config data file %q: %w", options.ConfigFile, err) + } + return nil + }(); err != nil { + return "", err + } + configDescriptor.Data = nil + configDescriptor.Size = counter.Count + configDescriptor.Digest = digester.Digest() + configFilePath = filePath + } else { + decoder := bytes.NewReader(configDescriptor.Data) + digester := digest.Canonical.Digester() + counter := ioutils.NewWriteCounter(digester.Hash()) + if _, err := io.Copy(counter, decoder); err != nil { + return "", fmt.Errorf("digesting inlined artifact config data: %w", err) + } + configDescriptor.Size = counter.Count + configDescriptor.Digest = digester.Digest() + } + } else { + configDescriptor.Data = nil + configDescriptor.Digest = digest.Canonical.FromString("") + } + + // Construct the manifest. + artifactManifest := v1.Manifest{ + Versioned: imgspec.Versioned{ + SchemaVersion: 2, + }, + MediaType: v1.MediaTypeImageManifest, + ArtifactType: artifactType, + Config: *configDescriptor, + Layers: layers, + Subject: subject, + } + // Add in annotations, more or less exactly as specified. + if options.Annotations != nil { + artifactManifest.Annotations = maps.Clone(options.Annotations) + } + + // Encode and save the data we care about. + artifactManifestBytes, err := json.Marshal(artifactManifest) + if err != nil { + return "", fmt.Errorf("marshalling the artifact manifest: %w", err) + } + artifactManifestDigest, err := manifest.Digest(artifactManifestBytes) + if err != nil { + return "", fmt.Errorf("digesting the artifact manifest: %w", err) + } + l.artifacts.Manifests[artifactManifestDigest] = string(artifactManifestBytes) + l.artifacts.Layers[artifactManifestDigest] = nil + l.artifacts.Configs[artifactManifestDigest] = artifactManifest.Config.Digest + if configFilePath != "" { + l.artifacts.Detached[artifactManifest.Config.Digest] = configFilePath + l.artifacts.Files[artifactManifestDigest] = append(l.artifacts.Files[artifactManifestDigest], configFilePath) + } else { + l.artifacts.Blobs[artifactManifest.Config.Digest] = slices.Clone(artifactManifest.Config.Data) + } + for filePath, fileDigest := range fileDigests { + l.artifacts.Layers[artifactManifestDigest] = append(l.artifacts.Layers[artifactManifestDigest], fileDigest) + l.artifacts.Detached[fileDigest] = filePath + l.artifacts.Files[artifactManifestDigest] = append(l.artifacts.Files[artifactManifestDigest], filePath) + } + for _, layer := range layers { + if len(layer.Data) != 0 { + l.artifacts.Blobs[layer.Digest] = slices.Clone(layer.Data) + l.artifacts.Layers[artifactManifestDigest] = append(l.artifacts.Layers[artifactManifestDigest], layer.Digest) + } + } + // Add this artifact manifest to the image index. + if err := l.AddInstance(artifactManifestDigest, int64(len(artifactManifestBytes)), artifactManifest.MediaType, options.Platform.OS, options.Platform.Architecture, options.Platform.OSVersion, options.Platform.OSFeatures, options.Platform.Variant, nil, nil); err != nil { + return "", fmt.Errorf("adding artifact manifest for %q to image index: %w", files, err) + } + // Set the artifact type in the image index entry if we have one, since AddInstance() didn't do that for us. + if artifactManifest.ArtifactType != "" { + if err := l.List.SetArtifactType(&artifactManifestDigest, artifactManifest.ArtifactType); err != nil { + return "", fmt.Errorf("adding artifact manifest for %q to image index: %w", files, err) + } + } + return artifactManifestDigest, nil +} + // Remove filters out any instances in the list which match the specified digest. func (l *list) Remove(instanceDigest digest.Digest) error { err := l.List.Remove(instanceDigest) diff --git a/vendor/github.com/containers/common/libimage/normalize.go b/vendor/github.com/containers/common/libimage/normalize.go index 2b3402861..b00af66a0 100644 --- a/vendor/github.com/containers/common/libimage/normalize.go +++ b/vendor/github.com/containers/common/libimage/normalize.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libimage diff --git a/vendor/github.com/containers/common/libimage/oci.go b/vendor/github.com/containers/common/libimage/oci.go index fcbd10ada..80aefc974 100644 --- a/vendor/github.com/containers/common/libimage/oci.go +++ b/vendor/github.com/containers/common/libimage/oci.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libimage diff --git a/vendor/github.com/containers/common/libimage/platform.go b/vendor/github.com/containers/common/libimage/platform.go index c378bc27f..bf8b054b1 100644 --- a/vendor/github.com/containers/common/libimage/platform.go +++ b/vendor/github.com/containers/common/libimage/platform.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libimage diff --git a/vendor/github.com/containers/common/libimage/pull.go b/vendor/github.com/containers/common/libimage/pull.go index bc8e84981..6c8d87c57 100644 --- a/vendor/github.com/containers/common/libimage/pull.go +++ b/vendor/github.com/containers/common/libimage/pull.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libimage @@ -31,7 +30,7 @@ import ( "github.com/sirupsen/logrus" ) -// PullOptions allows for custommizing image pulls. +// PullOptions allows for customizing image pulls. type PullOptions struct { CopyOptions @@ -54,13 +53,37 @@ type PullOptions struct { // The error is storage.ErrImageUnknown iff the pull policy is set to "never" // and no local image has been found. This allows for an easier integration // into some users of this package (e.g., Buildah). -func (r *Runtime) Pull(ctx context.Context, name string, pullPolicy config.PullPolicy, options *PullOptions) ([]*Image, error) { +func (r *Runtime) Pull(ctx context.Context, name string, pullPolicy config.PullPolicy, options *PullOptions) (_ []*Image, pullError error) { logrus.Debugf("Pulling image %s (policy: %s)", name, pullPolicy) - + if r.eventChannel != nil { + defer func() { + if pullError != nil { + // Note that we use the input name here to preserve the transport data. + r.writeEvent(&Event{Name: name, Time: time.Now(), Type: EventTypeImagePullError, Error: pullError}) + } + }() + } if options == nil { options = &PullOptions{} } + defaultConfig, err := config.Default() + if err != nil { + return nil, err + } + if options.MaxRetries == nil { + options.MaxRetries = &defaultConfig.Engine.Retry + } + if options.RetryDelay == nil { + if defaultConfig.Engine.RetryDelay != "" { + duration, err := time.ParseDuration(defaultConfig.Engine.RetryDelay) + if err != nil { + return nil, fmt.Errorf("failed to parse containers.conf retry_delay: %w", err) + } + options.RetryDelay = &duration + } + } + var possiblyUnqualifiedName string // used for short-name resolution ref, err := alltransports.ParseImageName(name) if err != nil { @@ -134,28 +157,25 @@ func (r *Runtime) Pull(ctx context.Context, name string, pullPolicy config.PullP options.Variant = r.systemContext.VariantChoice } - var ( - pulledImages []string - pullError error - ) + var pulledImages []string // Dispatch the copy operation. switch ref.Transport().Name() { // DOCKER REGISTRY case registryTransport.Transport.Name(): - pulledImages, pullError = r.copyFromRegistry(ctx, ref, possiblyUnqualifiedName, pullPolicy, options) + pulledImages, err = r.copyFromRegistry(ctx, ref, possiblyUnqualifiedName, pullPolicy, options) // DOCKER ARCHIVE case dockerArchiveTransport.Transport.Name(): - pulledImages, pullError = r.copyFromDockerArchive(ctx, ref, &options.CopyOptions) + pulledImages, err = r.copyFromDockerArchive(ctx, ref, &options.CopyOptions) // ALL OTHER TRANSPORTS default: - pulledImages, pullError = r.copyFromDefault(ctx, ref, &options.CopyOptions) + pulledImages, err = r.copyFromDefault(ctx, ref, &options.CopyOptions) } - if pullError != nil { - return nil, pullError + if err != nil { + return nil, err } localImages := []*Image{} @@ -406,7 +426,7 @@ func (r *Runtime) copyFromRegistry(ctx context.Context, ref types.ImageReference for _, tag := range tags { select { // Let's be gentle with Podman remote. case <-ctx.Done(): - return nil, fmt.Errorf("pulling cancelled") + return nil, errors.New("pulling cancelled") default: // We can continue. } @@ -446,7 +466,7 @@ func (r *Runtime) imagesIDsForManifest(manifestBytes []byte, sys *types.SystemCo } else { d, err := manifest.Digest(manifestBytes) if err != nil { - return nil, fmt.Errorf("digesting manifest") + return nil, errors.New("digesting manifest") } imageDigest = d } @@ -511,7 +531,7 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str // If the local image is corrupted, we need to repull it. if localImage != nil { - if err := localImage.isCorrupted(imageName); err != nil { + if err := localImage.isCorrupted(ctx, imageName); err != nil { logrus.Error(err) localImage = nil } diff --git a/vendor/github.com/containers/common/libimage/push.go b/vendor/github.com/containers/common/libimage/push.go index ed1d90c14..f89b8fc07 100644 --- a/vendor/github.com/containers/common/libimage/push.go +++ b/vendor/github.com/containers/common/libimage/push.go @@ -1,19 +1,22 @@ //go:build !remote -// +build !remote package libimage import ( "context" + "fmt" "time" + "github.com/containers/common/pkg/config" dockerArchiveTransport "github.com/containers/image/v5/docker/archive" + dockerDaemonTransport "github.com/containers/image/v5/docker/daemon" "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/transports/alltransports" "github.com/sirupsen/logrus" ) -// PushOptions allows for custommizing image pushes. +// PushOptions allows for customizing image pushes. type PushOptions struct { CopyOptions } @@ -32,6 +35,23 @@ func (r *Runtime) Push(ctx context.Context, source, destination string, options options = &PushOptions{} } + defaultConfig, err := config.Default() + if err != nil { + return nil, err + } + if options.MaxRetries == nil { + options.MaxRetries = &defaultConfig.Engine.Retry + } + if options.RetryDelay == nil { + if defaultConfig.Engine.RetryDelay != "" { + duration, err := time.ParseDuration(defaultConfig.Engine.RetryDelay) + if err != nil { + return nil, fmt.Errorf("failed to parse containers.conf retry_delay: %w", err) + } + options.RetryDelay = &duration + } + } + // Look up the local image. Note that we need to ignore the platform // and push what the user specified (containers/podman/issues/10344). image, resolvedSource, err := r.LookupImage(source, nil) @@ -66,6 +86,14 @@ func (r *Runtime) Push(ctx context.Context, source, destination string, options destRef = dockerRef } + // docker-archive and only DockerV2Schema2MediaType support Gzip compression + if options.CompressionFormat != nil && + (destRef.Transport().Name() == dockerArchiveTransport.Transport.Name() || + destRef.Transport().Name() == dockerDaemonTransport.Transport.Name() || + options.ManifestMIMEType == manifest.DockerV2Schema2MediaType) { + options.CompressionFormat = nil + } + if r.eventChannel != nil { defer r.writeEvent(&Event{ID: image.ID(), Name: destination, Time: time.Now(), Type: EventTypeImagePush}) } diff --git a/vendor/github.com/containers/common/libimage/runtime.go b/vendor/github.com/containers/common/libimage/runtime.go index 1948fe0ad..632f0fccf 100644 --- a/vendor/github.com/containers/common/libimage/runtime.go +++ b/vendor/github.com/containers/common/libimage/runtime.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libimage @@ -162,7 +161,7 @@ func (r *Runtime) storageToImage(storageImage *storage.Image, ref types.ImageRef } } -// Exists returns true if the specicifed image exists in the local containers +// Exists returns true if the specified image exists in the local containers // storage. Note that it may return false if an image corrupted. func (r *Runtime) Exists(name string) (bool, error) { image, _, err := r.LookupImage(name, nil) @@ -172,7 +171,7 @@ func (r *Runtime) Exists(name string) (bool, error) { if image == nil { return false, nil } - if err := image.isCorrupted(name); err != nil { + if err := image.isCorrupted(context.Background(), name); err != nil { logrus.Error(err) return false, nil } @@ -235,8 +234,12 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image, if storageRef.Transport().Name() != storageTransport.Transport.Name() { return nil, "", fmt.Errorf("unsupported transport %q for looking up local images", storageRef.Transport().Name()) } - img, err := storageTransport.Transport.GetStoreImage(r.store, storageRef) + _, img, err := storageTransport.ResolveReference(storageRef) if err != nil { + if errors.Is(err, storageTransport.ErrNoSuchImage) { + // backward compatibility + return nil, "", storage.ErrImageUnknown + } return nil, "", err } logrus.Debugf("Found image %q in local containers storage (%s)", name, storageRef.StringWithinTransport()) @@ -347,9 +350,9 @@ func (r *Runtime) lookupImageInLocalStorage(name, candidate string, namedCandida if err != nil { return nil, err } - img, err = storageTransport.Transport.GetStoreImage(r.store, ref) + _, img, err = storageTransport.ResolveReference(ref) if err != nil { - if errors.Is(err, storage.ErrImageUnknown) { + if errors.Is(err, storageTransport.ErrNoSuchImage) { return nil, nil } return nil, err @@ -605,7 +608,7 @@ func (r *Runtime) ListImages(ctx context.Context, names []string, options *ListI // as the layer tree will computed once for all instead of once for // each individual image (see containers/podman/issues/17828). - tree, err := r.layerTree(images) + tree, err := r.layerTree(ctx, images) if err != nil { return nil, err } @@ -687,7 +690,7 @@ func (r *Runtime) RemoveImages(ctx context.Context, names []string, options *Rem } if options.ExternalContainers && options.IsExternalContainerFunc == nil { - return nil, []error{fmt.Errorf("libimage error: cannot remove external containers without callback")} + return nil, []error{errors.New("libimage error: cannot remove external containers without callback")} } // The logic here may require some explanation. Image removal is diff --git a/vendor/github.com/containers/common/libimage/save.go b/vendor/github.com/containers/common/libimage/save.go index 47a3a566b..62cad3288 100644 --- a/vendor/github.com/containers/common/libimage/save.go +++ b/vendor/github.com/containers/common/libimage/save.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libimage @@ -54,7 +53,7 @@ func (r *Runtime) Save(ctx context.Context, names []string, format, path string, return fmt.Errorf("unsupported format %q for saving multiple images (only docker-archive)", format) } if len(options.AdditionalTags) > 0 { - return fmt.Errorf("cannot save multiple images with multiple tags") + return errors.New("cannot save multiple images with multiple tags") } } diff --git a/vendor/github.com/containers/common/libimage/search.go b/vendor/github.com/containers/common/libimage/search.go index 9ef0e8320..b26ad80d2 100644 --- a/vendor/github.com/containers/common/libimage/search.go +++ b/vendor/github.com/containers/common/libimage/search.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libimage diff --git a/vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go b/vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go index 6e4514b99..ffadae0d2 100644 --- a/vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go +++ b/vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go @@ -1,5 +1,4 @@ -//go:build linux || freebsd -// +build linux freebsd +//go:build (linux || freebsd) && cni package cni @@ -18,8 +17,8 @@ import ( internalutil "github.com/containers/common/libnetwork/internal/util" "github.com/containers/common/libnetwork/types" "github.com/containers/common/libnetwork/util" - pkgutil "github.com/containers/common/pkg/util" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" "golang.org/x/sys/unix" ) @@ -32,13 +31,13 @@ func createNetworkFromCNIConfigList(conf *libcni.NetworkConfigList, confPath str IPAMOptions: map[string]string{}, } - cniJSON := make(map[string]interface{}) + cniJSON := make(map[string]any) err := json.Unmarshal(conf.Bytes, &cniJSON) if err != nil { return nil, fmt.Errorf("failed to unmarshal network config %s: %w", conf.Name, err) } if args, ok := cniJSON["args"]; ok { - if key, ok := args.(map[string]interface{}); ok { + if key, ok := args.(map[string]any); ok { // read network labels and options from the conf file network.Labels = getNetworkArgsFromConfList(key, podmanLabelKey) network.Options = getNetworkArgsFromConfList(key, podmanOptionsKey) @@ -215,9 +214,9 @@ func convertIPAMConfToNetwork(network *types.Network, ipam *ipamConfig, confPath } // getNetworkArgsFromConfList returns the map of args in a conflist, argType should be labels or options -func getNetworkArgsFromConfList(args map[string]interface{}, argType string) map[string]string { +func getNetworkArgsFromConfList(args map[string]any, argType string) map[string]string { if args, ok := args[argType]; ok { - if labels, ok := args.(map[string]interface{}); ok { + if labels, ok := args.(map[string]any); ok { result := make(map[string]string, len(labels)) for k, v := range labels { if v, ok := v.(string); ok { @@ -299,7 +298,7 @@ func (n *cniNetwork) createCNIConfigListFromNetwork(network *types.Network, writ // the dnsname plugin also needs to be updated for 1.0.0 // TODO change to 1.0.0 when most distros support it ncList := newNcList(network.Name, "0.4.0", network.Labels, network.Options) - var plugins []interface{} + var plugins []any switch network.Driver { case types.BridgeNetworkDriver: @@ -359,7 +358,7 @@ func convertSpecgenPortsToCNIPorts(ports []types.PortMapping) ([]cniPortMapEntry protocols := strings.Split(port.Protocol, ",") for _, protocol := range protocols { - if !pkgutil.StringInSlice(protocol, []string{"tcp", "udp", "sctp"}) { + if !slices.Contains([]string{"tcp", "udp", "sctp"}, protocol) { return nil, fmt.Errorf("unknown port protocol %s", protocol) } cniPort := cniPortMapEntry{ @@ -421,11 +420,11 @@ func parseOptions(networkOptions map[string]string, networkDriver string) (*opti case types.ModeOption: switch networkDriver { case types.MacVLANNetworkDriver: - if !pkgutil.StringInSlice(v, types.ValidMacVLANModes) { + if !slices.Contains(types.ValidMacVLANModes, v) { return nil, fmt.Errorf("unknown macvlan mode %q", v) } case types.IPVLANNetworkDriver: - if !pkgutil.StringInSlice(v, types.ValidIPVLANModes) { + if !slices.Contains(types.ValidIPVLANModes, v) { return nil, fmt.Errorf("unknown ipvlan mode %q", v) } default: diff --git a/vendor/github.com/containers/common/libnetwork/cni/cni_exec.go b/vendor/github.com/containers/common/libnetwork/cni/cni_exec.go index 79d7ef120..9ccf4eff4 100644 --- a/vendor/github.com/containers/common/libnetwork/cni/cni_exec.go +++ b/vendor/github.com/containers/common/libnetwork/cni/cni_exec.go @@ -16,8 +16,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build linux || freebsd -// +build linux freebsd +//go:build (linux || freebsd) && cni package cni @@ -26,8 +25,10 @@ import ( "context" "encoding/json" "fmt" + "os" "os/exec" "path/filepath" + "strings" "github.com/containernetworking/cni/pkg/invoke" "github.com/containernetworking/cni/pkg/version" @@ -80,6 +81,16 @@ func (e *cniExec) ExecPlugin(ctx context.Context, pluginPath string, stdinData [ c.Env = append(c.Env, "XDG_RUNTIME_DIR=") } + // The CNI plugins need access to iptables in $PATH. As it turns out debian doesn't put + // /usr/sbin in $PATH for rootless users. This will break rootless networking completely. + // We might break existing users and we cannot expect everyone to change their $PATH so + // let's add /usr/sbin to $PATH ourselves. + path := os.Getenv("PATH") + if !strings.Contains(path, "/usr/sbin") { + path += ":/usr/sbin" + c.Env = append(c.Env, "PATH="+path) + } + err := c.Run() if err != nil { return nil, annotatePluginError(err, pluginPath, stdout.Bytes(), stderr.Bytes()) diff --git a/vendor/github.com/containers/common/libnetwork/cni/cni_types.go b/vendor/github.com/containers/common/libnetwork/cni/cni_types.go index 1d48d0805..711535ced 100644 --- a/vendor/github.com/containers/common/libnetwork/cni/cni_types.go +++ b/vendor/github.com/containers/common/libnetwork/cni/cni_types.go @@ -1,5 +1,4 @@ -//go:build linux || freebsd -// +build linux freebsd +//go:build (linux || freebsd) && cni package cni @@ -116,7 +115,7 @@ type dnsNameConfig struct { } // ncList describes a generic map -type ncList map[string]interface{} +type ncList map[string]any // newNcList creates a generic map of values with string // keys and adds in version and network name @@ -139,8 +138,6 @@ func newNcList(name, version string, labels, options map[string]string) ncList { // newHostLocalBridge creates a new LocalBridge for host-local func newHostLocalBridge(name string, isGateWay, ipMasq bool, mtu, vlan int, ipamConf *ipamConfig) *hostLocalBridge { - caps := make(map[string]bool) - caps["ips"] = true bridge := hostLocalBridge{ PluginType: "bridge", BrName: name, @@ -154,7 +151,7 @@ func newHostLocalBridge(name string, isGateWay, ipMasq bool, mtu, vlan int, ipam bridge.IPAM = *ipamConf // if we use host-local set the ips cap to ensure we can set static ips via runtime config if ipamConf.PluginType == types.HostLocalIPAMDriver { - bridge.Capabilities = caps + bridge.Capabilities = map[string]bool{"ips": true} } } return &bridge @@ -216,13 +213,10 @@ func newIPAMDefaultRoute(isIPv6 bool) (ipamRoute, error) { // newPortMapPlugin creates a predefined, default portmapping // configuration func newPortMapPlugin() portMapConfig { - caps := make(map[string]bool) - caps["portMappings"] = true - p := portMapConfig{ + return portMapConfig{ PluginType: "portmap", - Capabilities: caps, + Capabilities: map[string]bool{"portMappings": true}, } - return p } // newFirewallPlugin creates a generic firewall plugin @@ -246,12 +240,10 @@ func newTuningPlugin() tuningConfig { // newDNSNamePlugin creates the dnsname config with a given // domainname func newDNSNamePlugin(domainName string) dnsNameConfig { - caps := make(map[string]bool, 1) - caps["aliases"] = true return dnsNameConfig{ PluginType: "dnsname", DomainName: domainName, - Capabilities: caps, + Capabilities: map[string]bool{"aliases": true}, } } diff --git a/vendor/github.com/containers/common/libnetwork/cni/config.go b/vendor/github.com/containers/common/libnetwork/cni/config.go index a1eeceb72..71b7872f1 100644 --- a/vendor/github.com/containers/common/libnetwork/cni/config.go +++ b/vendor/github.com/containers/common/libnetwork/cni/config.go @@ -1,5 +1,4 @@ -//go:build linux || freebsd -// +build linux freebsd +//go:build (linux || freebsd) && cni package cni @@ -11,8 +10,8 @@ import ( internalutil "github.com/containers/common/libnetwork/internal/util" "github.com/containers/common/libnetwork/types" - pkgutil "github.com/containers/common/pkg/util" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" ) func (n *cniNetwork) NetworkUpdate(_ string, _ types.NetworkUpdateOptions) error { @@ -206,7 +205,7 @@ func createIPMACVLAN(network *types.Network) error { if err != nil { return err } - if !pkgutil.StringInSlice(network.NetworkInterface, interfaceNames) { + if !slices.Contains(interfaceNames, network.NetworkInterface) { return fmt.Errorf("parent interface %s does not exist", network.NetworkInterface) } } diff --git a/vendor/github.com/containers/common/libnetwork/cni/config_freebsd.go b/vendor/github.com/containers/common/libnetwork/cni/config_freebsd.go index ff95c0e17..ddee6d2e0 100644 --- a/vendor/github.com/containers/common/libnetwork/cni/config_freebsd.go +++ b/vendor/github.com/containers/common/libnetwork/cni/config_freebsd.go @@ -1,5 +1,4 @@ -//go:build freebsd -// +build freebsd +//go:build (linux || freebsd) && cni package cni diff --git a/vendor/github.com/containers/common/libnetwork/cni/config_linux.go b/vendor/github.com/containers/common/libnetwork/cni/config_linux.go index 836fd73bf..efc920614 100644 --- a/vendor/github.com/containers/common/libnetwork/cni/config_linux.go +++ b/vendor/github.com/containers/common/libnetwork/cni/config_linux.go @@ -1,5 +1,4 @@ -//go:build linux -// +build linux +//go:build (linux || freebsd) && cni package cni diff --git a/vendor/github.com/containers/common/libnetwork/cni/network.go b/vendor/github.com/containers/common/libnetwork/cni/network.go index 49d20b915..06b78f675 100644 --- a/vendor/github.com/containers/common/libnetwork/cni/network.go +++ b/vendor/github.com/containers/common/libnetwork/cni/network.go @@ -1,5 +1,4 @@ -//go:build linux || freebsd -// +build linux freebsd +//go:build (linux || freebsd) && cni package cni @@ -16,6 +15,7 @@ import ( "time" "github.com/containernetworking/cni/libcni" + "github.com/containers/common/libnetwork/internal/rootlessnetns" "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/version" @@ -53,6 +53,9 @@ type cniNetwork struct { // networks is a map with loaded networks, the key is the network name networks map[string]*network + + // rootlessNetns is used for the rootless network setup/teardown + rootlessNetns *rootlessnetns.Netns } type network struct { @@ -65,21 +68,14 @@ type network struct { type InitConfig struct { // CNIConfigDir is directory where the cni config files are stored. CNIConfigDir string - // CNIPluginDirs is a list of directories where cni should look for the plugins. - CNIPluginDirs []string // RunDir is a directory where temporary files can be stored. RunDir string - // DefaultNetwork is the name for the default network. - DefaultNetwork string - // DefaultSubnet is the default subnet for the default network. - DefaultSubnet string - - // DefaultsubnetPools contains the subnets which must be used to allocate a free subnet by network create - DefaultsubnetPools []config.SubnetPool - // IsMachine describes whenever podman runs in a podman machine environment. IsMachine bool + + // Config containers.conf options + Config *config.Config } // NewCNINetworkInterface creates the ContainerNetwork interface for the CNI backend. @@ -96,12 +92,12 @@ func NewCNINetworkInterface(conf *InitConfig) (types.ContainerNetwork, error) { return nil, err } - defaultNetworkName := conf.DefaultNetwork + defaultNetworkName := conf.Config.Network.DefaultNetwork if defaultNetworkName == "" { defaultNetworkName = types.DefaultNetworkName } - defaultSubnet := conf.DefaultSubnet + defaultSubnet := conf.Config.Network.DefaultSubnet if defaultSubnet == "" { defaultSubnet = types.DefaultSubnet } @@ -110,21 +106,30 @@ func NewCNINetworkInterface(conf *InitConfig) (types.ContainerNetwork, error) { return nil, fmt.Errorf("failed to parse default subnet: %w", err) } - defaultSubnetPools := conf.DefaultsubnetPools + defaultSubnetPools := conf.Config.Network.DefaultSubnetPools if defaultSubnetPools == nil { defaultSubnetPools = config.DefaultSubnetPools } - cni := libcni.NewCNIConfig(conf.CNIPluginDirs, &cniExec{}) + var netns *rootlessnetns.Netns + if unshare.IsRootless() { + netns, err = rootlessnetns.New(conf.RunDir, rootlessnetns.CNI, conf.Config) + if err != nil { + return nil, err + } + } + + cni := libcni.NewCNIConfig(conf.Config.Network.CNIPluginDirs.Values, &cniExec{}) n := &cniNetwork{ cniConfigDir: conf.CNIConfigDir, - cniPluginDirs: conf.CNIPluginDirs, + cniPluginDirs: conf.Config.Network.CNIPluginDirs.Get(), cniConf: cni, defaultNetwork: defaultNetworkName, defaultSubnet: defaultNet, defaultsubnetPools: defaultSubnetPools, isMachine: conf.IsMachine, lock: lock, + rootlessNetns: netns, } return n, nil diff --git a/vendor/github.com/containers/common/libnetwork/cni/run.go b/vendor/github.com/containers/common/libnetwork/cni/run.go index 2da8da1ad..d8fb47759 100644 --- a/vendor/github.com/containers/common/libnetwork/cni/run.go +++ b/vendor/github.com/containers/common/libnetwork/cni/run.go @@ -1,5 +1,4 @@ -//go:build linux || freebsd -// +build linux freebsd +//go:build (linux || freebsd) && cni package cni @@ -39,61 +38,72 @@ func (n *cniNetwork) Setup(namespacePath string, options types.SetupOptions) (ma return nil, fmt.Errorf("failed to set the loopback adapter up: %w", err) } - var retErr error - teardownOpts := options - teardownOpts.Networks = map[string]types.PerNetworkOptions{} - // make sure to teardown the already connected networks on error - defer func() { - if retErr != nil { - if len(teardownOpts.Networks) > 0 { - err := n.teardown(namespacePath, types.TeardownOptions(teardownOpts)) - if err != nil { - logrus.Warn(err) + results := make(map[string]types.StatusBlock, len(options.Networks)) + + setup := func() error { + var retErr error + teardownOpts := options + teardownOpts.Networks = map[string]types.PerNetworkOptions{} + // make sure to teardown the already connected networks on error + defer func() { + if retErr != nil { + if len(teardownOpts.Networks) > 0 { + err := n.teardown(namespacePath, types.TeardownOptions(teardownOpts)) + if err != nil { + logrus.Warn(err) + } } } + }() + + ports, err := convertSpecgenPortsToCNIPorts(options.PortMappings) + if err != nil { + return err } - }() - ports, err := convertSpecgenPortsToCNIPorts(options.PortMappings) - if err != nil { - return nil, err - } + for name, netOpts := range options.Networks { + netOpts := netOpts + network := n.networks[name] + rt := getRuntimeConfig(namespacePath, options.ContainerName, options.ContainerID, name, ports, &netOpts) - results := make(map[string]types.StatusBlock, len(options.Networks)) - for name, netOpts := range options.Networks { - netOpts := netOpts - network := n.networks[name] - rt := getRuntimeConfig(namespacePath, options.ContainerName, options.ContainerID, name, ports, &netOpts) - - // If we have more than one static ip we need parse the ips via runtime config, - // make sure to add the ips capability to the first plugin otherwise it doesn't get the ips - if len(netOpts.StaticIPs) > 0 && !network.cniNet.Plugins[0].Network.Capabilities["ips"] { - caps := make(map[string]interface{}) - caps["capabilities"] = map[string]bool{"ips": true} - network.cniNet.Plugins[0], retErr = libcni.InjectConf(network.cniNet.Plugins[0], caps) + // If we have more than one static ip we need parse the ips via runtime config, + // make sure to add the ips capability to the first plugin otherwise it doesn't get the ips + if len(netOpts.StaticIPs) > 0 && !network.cniNet.Plugins[0].Network.Capabilities["ips"] { + caps := map[string]any{ + "capabilities": map[string]bool{"ips": true}, + } + network.cniNet.Plugins[0], retErr = libcni.InjectConf(network.cniNet.Plugins[0], caps) + if retErr != nil { + return retErr + } + } + + var res cnitypes.Result + res, retErr = n.cniConf.AddNetworkList(context.Background(), network.cniNet, rt) + // Add this network to teardown opts since it is now connected. + // Also add this if an errors was returned since we want to call teardown on this regardless. + teardownOpts.Networks[name] = netOpts if retErr != nil { - return nil, retErr + return retErr } - } - var res cnitypes.Result - res, retErr = n.cniConf.AddNetworkList(context.Background(), network.cniNet, rt) - // Add this network to teardown opts since it is now connected. - // Also add this if an errors was returned since we want to call teardown on this regardless. - teardownOpts.Networks[name] = netOpts - if retErr != nil { - return nil, retErr + logrus.Debugf("cni result for container %s network %s: %v", options.ContainerID, name, res) + var status types.StatusBlock + status, retErr = CNIResultToStatus(res) + if retErr != nil { + return retErr + } + results[name] = status } + return nil + } - logrus.Debugf("cni result for container %s network %s: %v", options.ContainerID, name, res) - var status types.StatusBlock - status, retErr = CNIResultToStatus(res) - if retErr != nil { - return nil, retErr - } - results[name] = status + if n.rootlessNetns != nil { + err = n.rootlessNetns.Setup(len(options.Networks), setup) + } else { + err = setup() } - return results, nil + return results, err } // CNIResultToStatus convert the cni result to status block @@ -164,7 +174,7 @@ func getRuntimeConfig(netns, conName, conID, networkName string, ports []cniPort // Only K8S_POD_NAME is used by dnsname to get the container name. {"K8S_POD_NAME", conName}, }, - CapabilityArgs: map[string]interface{}{}, + CapabilityArgs: map[string]any{}, } // Propagate environment CNI_ARGS @@ -225,28 +235,39 @@ func (n *cniNetwork) teardown(namespacePath string, options types.TeardownOption } var multiErr *multierror.Error - for name, netOpts := range options.Networks { - netOpts := netOpts - rt := getRuntimeConfig(namespacePath, options.ContainerName, options.ContainerID, name, ports, &netOpts) - - cniConfList, newRt, err := getCachedNetworkConfig(n.cniConf, name, rt) - if err == nil { - rt = newRt - } else { - logrus.Warnf("Failed to load cached network config: %v, falling back to loading network %s from disk", err, name) - network := n.networks[name] - if network == nil { - multiErr = multierror.Append(multiErr, fmt.Errorf("network %s: %w", name, types.ErrNoSuchNetwork)) - continue + teardown := func() error { + for name, netOpts := range options.Networks { + netOpts := netOpts + rt := getRuntimeConfig(namespacePath, options.ContainerName, options.ContainerID, name, ports, &netOpts) + + cniConfList, newRt, err := getCachedNetworkConfig(n.cniConf, name, rt) + if err == nil { + rt = newRt + } else { + logrus.Warnf("Failed to load cached network config: %v, falling back to loading network %s from disk", err, name) + network := n.networks[name] + if network == nil { + multiErr = multierror.Append(multiErr, fmt.Errorf("network %s: %w", name, types.ErrNoSuchNetwork)) + continue + } + cniConfList = network.cniNet } - cniConfList = network.cniNet - } - err = n.cniConf.DelNetworkList(context.Background(), cniConfList, rt) - if err != nil { - multiErr = multierror.Append(multiErr, err) + err = n.cniConf.DelNetworkList(context.Background(), cniConfList, rt) + if err != nil { + multiErr = multierror.Append(multiErr, err) + } } + return nil + } + + if n.rootlessNetns != nil { + err = n.rootlessNetns.Teardown(len(options.Networks), teardown) + } else { + err = teardown() } + multiErr = multierror.Append(multiErr, err) + return multiErr.ErrorOrNil() } @@ -267,3 +288,10 @@ func getCachedNetworkConfig(cniConf *libcni.CNIConfig, name string, rt *libcni.R } return cniConfList, rt, nil } + +func (n *cniNetwork) RunInRootlessNetns(toRun func() error) error { + if n.rootlessNetns == nil { + return types.ErrNotRootlessNetns + } + return n.rootlessNetns.Run(n.lock, toRun) +} diff --git a/vendor/github.com/containers/common/libnetwork/etchosts/hosts.go b/vendor/github.com/containers/common/libnetwork/etchosts/hosts.go index ed65921ce..6bdb34e64 100644 --- a/vendor/github.com/containers/common/libnetwork/etchosts/hosts.go +++ b/vendor/github.com/containers/common/libnetwork/etchosts/hosts.go @@ -9,7 +9,7 @@ import ( "strings" "github.com/containers/common/pkg/config" - "github.com/containers/common/pkg/util" + "golang.org/x/exp/slices" ) const ( @@ -220,7 +220,7 @@ func checkIfEntryExists(current HostEntry, entries HostEntries) bool { if current.IP == rm.IP { // it is enough if one of the names match, in this case we remove the full entry for _, name := range current.Names { - if util.StringInSlice(name, rm.Names) { + if slices.Contains(rm.Names, name) { return true } } diff --git a/vendor/github.com/containers/common/libnetwork/internal/rootlessnetns/netns.go b/vendor/github.com/containers/common/libnetwork/internal/rootlessnetns/netns.go new file mode 100644 index 000000000..edc29f66f --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/internal/rootlessnetns/netns.go @@ -0,0 +1,8 @@ +package rootlessnetns + +type NetworkBackend int + +const ( + Netavark NetworkBackend = iota + CNI +) diff --git a/vendor/github.com/containers/common/libnetwork/internal/rootlessnetns/netns_freebsd.go b/vendor/github.com/containers/common/libnetwork/internal/rootlessnetns/netns_freebsd.go new file mode 100644 index 000000000..a176d2d82 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/internal/rootlessnetns/netns_freebsd.go @@ -0,0 +1,28 @@ +package rootlessnetns + +import ( + "errors" + + "github.com/containers/common/pkg/config" + "github.com/containers/storage/pkg/lockfile" +) + +var ErrNotSupported = errors.New("rootless netns only supported on linux") + +type Netns struct{} + +func New(dir string, backend NetworkBackend, conf *config.Config) (*Netns, error) { + return nil, ErrNotSupported +} + +func (n *Netns) Setup(nets int, toRun func() error) error { + return ErrNotSupported +} + +func (n *Netns) Teardown(nets int, toRun func() error) error { + return ErrNotSupported +} + +func (n *Netns) Run(lock *lockfile.LockFile, toRun func() error) error { + return ErrNotSupported +} diff --git a/vendor/github.com/containers/common/libnetwork/internal/rootlessnetns/netns_linux.go b/vendor/github.com/containers/common/libnetwork/internal/rootlessnetns/netns_linux.go new file mode 100644 index 000000000..1531ee52e --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/internal/rootlessnetns/netns_linux.go @@ -0,0 +1,602 @@ +package rootlessnetns + +import ( + "errors" + "fmt" + "io/fs" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/containernetworking/plugins/pkg/ns" + "github.com/containers/common/libnetwork/pasta" + "github.com/containers/common/libnetwork/resolvconf" + "github.com/containers/common/libnetwork/slirp4netns" + "github.com/containers/common/pkg/config" + "github.com/containers/common/pkg/netns" + "github.com/containers/common/pkg/systemd" + "github.com/containers/storage/pkg/homedir" + "github.com/containers/storage/pkg/lockfile" + "github.com/hashicorp/go-multierror" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +const ( + // rootlessNetnsDir is the directory name + rootlessNetnsDir = "rootless-netns" + // refCountFile file name for the ref count file + refCountFile = "ref-count" + + // rootlessNetNsConnPidFile is the name of the rootless netns slirp4netns/pasta pid file + rootlessNetNsConnPidFile = "rootless-netns-conn.pid" + + // persistentCNIDir is the directory where the CNI files are stored + persistentCNIDir = "/var/lib/cni" + + tmpfs = "tmpfs" + none = "none" + resolvConfName = "resolv.conf" +) + +type Netns struct { + // dir used for the rootless netns + dir string + // backend used for the network setup/teardown + backend NetworkBackend + + // config contains containers.conf options. + config *config.Config +} + +type rootlessNetnsError struct { + msg string + err error +} + +func (e *rootlessNetnsError) Error() string { + msg := e.msg + ": " + return fmt.Sprintf("rootless netns: %s%v", msg, e.err) +} + +func (e *rootlessNetnsError) Unwrap() error { + return e.err +} + +// wrapError wraps the error with extra context +// It will always include "rootless netns:" so the msg should not mention it again, +// msg can be empty to just include the rootless netns part. +// err must be non nil. +func wrapError(msg string, err error) *rootlessNetnsError { + return &rootlessNetnsError{ + msg: msg, + err: err, + } +} + +func New(dir string, backend NetworkBackend, conf *config.Config) (*Netns, error) { + netnsDir := filepath.Join(dir, rootlessNetnsDir) + if err := os.MkdirAll(netnsDir, 0o700); err != nil { + return nil, wrapError("", err) + } + return &Netns{ + dir: netnsDir, + backend: backend, + config: conf, + }, nil +} + +// getPath is a small wrapper around filepath.Join() to have a bit less code +func (n *Netns) getPath(path string) string { + return filepath.Join(n.dir, path) +} + +// getOrCreateNetns returns the rootless netns, if it created a new one the +// returned bool is set to true. +func (n *Netns) getOrCreateNetns() (ns.NetNS, bool, error) { + nsPath := n.getPath(rootlessNetnsDir) + nsRef, err := ns.GetNS(nsPath) + if err == nil { + // TODO check if slirp4netns is alive + return nsRef, false, nil + } + logrus.Debugf("Creating rootless network namespace at %q", nsPath) + // We have to create the netns dir again here because it is possible + // that cleanup() removed it. + if err := os.MkdirAll(n.dir, 0o700); err != nil { + return nil, false, wrapError("", err) + } + netns, err := netns.NewNSAtPath(nsPath) + if err != nil { + return nil, false, wrapError("create netns", err) + } + switch strings.ToLower(n.config.Network.DefaultRootlessNetworkCmd) { + case "", slirp4netns.BinaryName: + err = n.setupSlirp4netns(nsPath) + case pasta.BinaryName: + err = n.setupPasta(nsPath) + default: + err = fmt.Errorf("invalid rootless network command %q", n.config.Network.DefaultRootlessNetworkCmd) + } + return netns, true, err +} + +func (n *Netns) cleanup() error { + if _, err := os.Stat(n.dir); err != nil { + if errors.Is(err, fs.ErrNotExist) { + // dir does not exists no need for cleanup + return nil + } + return err + } + + logrus.Debug("Cleaning up rootless network namespace") + + nsPath := n.getPath(rootlessNetnsDir) + var multiErr *multierror.Error + if err := netns.UnmountNS(nsPath); err != nil { + multiErr = multierror.Append(multiErr, err) + } + if err := n.cleanupRootlessNetns(); err != nil { + multiErr = multierror.Append(multiErr, wrapError("kill network process", err)) + } + if err := os.RemoveAll(n.dir); err != nil { + multiErr = multierror.Append(multiErr, wrapError("remove rootless netns dir", err)) + } + + return multiErr.ErrorOrNil() +} + +func (n *Netns) setupPasta(nsPath string) error { + pidPath := n.getPath(rootlessNetNsConnPidFile) + + pastaOpts := pasta.SetupOptions{ + Config: n.config, + Netns: nsPath, + ExtraOptions: []string{"--pid", pidPath}, + } + if err := pasta.Setup(&pastaOpts); err != nil { + return fmt.Errorf("setting up Pasta: %w", err) + } + + if systemd.RunsOnSystemd() { + // Treat these as fatal - if pasta failed to write a PID file something is probably wrong. + pidfile, err := os.ReadFile(pidPath) + if err != nil { + return fmt.Errorf("unable to open pasta PID file: %w", err) + } + pid, err := strconv.Atoi(strings.TrimSpace(string(pidfile))) + if err != nil { + return fmt.Errorf("unable to decode pasta PID: %w", err) + } + + if err := systemd.MoveRootlessNetnsSlirpProcessToUserSlice(pid); err != nil { + // only log this, it is not fatal but can lead to issues when running podman inside systemd units + logrus.Errorf("failed to move the rootless netns pasta process to the systemd user.slice: %v", err) + } + } + + if err := resolvconf.New(&resolvconf.Params{ + Path: n.getPath(resolvConfName), + // fake the netns since we want to filter localhost + Namespaces: []specs.LinuxNamespace{ + {Type: specs.NetworkNamespace}, + }, + // TODO: Need a way to determine if there is a valid v6 address on any + // external interface of the system. + IPv6Enabled: false, + KeepHostServers: true, + Nameservers: []string{}, + }); err != nil { + return wrapError("create resolv.conf", err) + } + + return nil +} + +func (n *Netns) setupSlirp4netns(nsPath string) error { + res, err := slirp4netns.Setup(&slirp4netns.SetupOptions{ + Config: n.config, + ContainerID: "rootless-netns", + Netns: nsPath, + }) + if err != nil { + return wrapError("start slirp4netns", err) + } + // create pid file for the slirp4netns process + // this is need to kill the process in the cleanup + pid := strconv.Itoa(res.Pid) + err = os.WriteFile(n.getPath(rootlessNetNsConnPidFile), []byte(pid), 0o600) + if err != nil { + return wrapError("write slirp4netns pid file", err) + } + + if systemd.RunsOnSystemd() { + // move to systemd scope to prevent systemd from killing it + err = systemd.MoveRootlessNetnsSlirpProcessToUserSlice(res.Pid) + if err != nil { + // only log this, it is not fatal but can lead to issues when running podman inside systemd units + logrus.Errorf("failed to move the rootless netns slirp4netns process to the systemd user.slice: %v", err) + } + } + + // build a new resolv.conf file which uses the slirp4netns dns server address + resolveIP, err := slirp4netns.GetDNS(res.Subnet) + if err != nil { + return wrapError("determine default slirp4netns DNS address", err) + } + + if err := resolvconf.New(&resolvconf.Params{ + Path: n.getPath(resolvConfName), + // fake the netns since we want to filter localhost + Namespaces: []specs.LinuxNamespace{ + {Type: specs.NetworkNamespace}, + }, + IPv6Enabled: res.IPv6, + KeepHostServers: true, + Nameservers: []string{resolveIP.String()}, + }); err != nil { + return wrapError("create resolv.conf", err) + } + return nil +} + +func (n *Netns) cleanupRootlessNetns() error { + pidFile := n.getPath(rootlessNetNsConnPidFile) + b, err := os.ReadFile(pidFile) + if err == nil { + var i int + i, err = strconv.Atoi(strings.TrimSpace(string(b))) + if err == nil { + // kill the slirp process so we do not leak it + err = unix.Kill(i, unix.SIGTERM) + if err == unix.ESRCH { + err = nil + } + } + } + return err +} + +// mountAndMkdirDest convenience wrapper for mount and mkdir +func mountAndMkdirDest(source string, target string, fstype string, flags uintptr) error { + if err := os.MkdirAll(target, 0o700); err != nil { + return wrapError("create mount point", err) + } + if err := unix.Mount(source, target, fstype, flags, ""); err != nil { + return wrapError(fmt.Sprintf("mount %q to %q", source, target), err) + } + return nil +} + +func (n *Netns) setupMounts() error { + // Before we can run the given function, + // we have to set up all mounts correctly. + + // The order of the mounts is IMPORTANT. + // The idea of the extra mount ns is to make /run and /var/lib/cni writeable + // for the cni plugins but not affecting the podman user namespace. + // Because the plugins also need access to XDG_RUNTIME_DIR/netns some special setup is needed. + + // The following bind mounts are needed + // 1. XDG_RUNTIME_DIR -> XDG_RUNTIME_DIR/rootless-netns/XDG_RUNTIME_DIR + // 2. /run/systemd -> XDG_RUNTIME_DIR/rootless-netns/run/systemd (only if it exists) + // 3. XDG_RUNTIME_DIR/rootless-netns/resolv.conf -> /etc/resolv.conf or XDG_RUNTIME_DIR/rootless-netns/run/symlink/target + // 4. XDG_RUNTIME_DIR/rootless-netns/var/lib/cni -> /var/lib/cni (if /var/lib/cni does not exist, use the parent dir) + // 5. XDG_RUNTIME_DIR/rootless-netns/run -> /run + + // Create a new mount namespace, + // this must happen inside the netns thread. + err := unix.Unshare(unix.CLONE_NEWNS) + if err != nil { + return wrapError("create new mount namespace", err) + } + + xdgRuntimeDir, err := homedir.GetRuntimeDir() + if err != nil { + return fmt.Errorf("could not get runtime directory: %w", err) + } + newXDGRuntimeDir := n.getPath(xdgRuntimeDir) + // 1. Mount the netns into the new run to keep them accessible. + // Otherwise cni setup will fail because it cannot access the netns files. + err = mountAndMkdirDest(xdgRuntimeDir, newXDGRuntimeDir, none, unix.MS_BIND|unix.MS_SHARED|unix.MS_REC) + if err != nil { + return err + } + + // 2. Also keep /run/systemd if it exists. + // Many files are symlinked into this dir, for example /dev/log. + runSystemd := "/run/systemd" + _, err = os.Stat(runSystemd) + if err == nil { + newRunSystemd := n.getPath(runSystemd) + err = mountAndMkdirDest(runSystemd, newRunSystemd, none, unix.MS_BIND|unix.MS_REC) + if err != nil { + return err + } + } + + // 3. On some distros /etc/resolv.conf is symlinked to somewhere under /run. + // Because the kernel will follow the symlink before mounting, it is not + // possible to mount a file at /etc/resolv.conf. We have to ensure that + // the link target will be available in the mount ns. + // see: https://github.com/containers/podman/issues/10855 + resolvePath := resolvconf.DefaultResolvConf + linkCount := 0 + for i := 1; i < len(resolvePath); i++ { + // Do not use filepath.EvalSymlinks, we only want the first symlink under /run. + // If /etc/resolv.conf has more than one symlink under /run, e.g. + // -> /run/systemd/resolve/stub-resolv.conf -> /run/systemd/resolve/resolv.conf + // we would put the netns resolv.conf file to the last path. However this will + // break dns because the second link does not exist in the mount ns. + // see https://github.com/containers/podman/issues/11222 + // + // We also need to resolve all path components not just the last file. + // see https://github.com/containers/podman/issues/12461 + + if resolvePath[i] != '/' { + // if we are at the last char we need to inc i by one because there is no final slash + if i == len(resolvePath)-1 { + i++ + } else { + // not the end of path, keep going + continue + } + } + path := resolvePath[:i] + + fi, err := os.Lstat(path) + if err != nil { + return fmt.Errorf("failed to stat resolv.conf path: %w", err) + } + + // no link, just continue + if fi.Mode()&os.ModeSymlink == 0 { + continue + } + + link, err := os.Readlink(path) + if err != nil { + return fmt.Errorf("failed to read resolv.conf symlink: %w", err) + } + linkCount++ + if filepath.IsAbs(link) { + // link is as an absolute path + resolvePath = filepath.Join(link, resolvePath[i:]) + } else { + // link is as a relative, join it with the previous path + base := filepath.Dir(path) + resolvePath = filepath.Join(base, link, resolvePath[i:]) + } + // set i back to zero since we now have a new base path + i = 0 + + // we have to stop at the first path under /run because we will have an empty /run and will create the path anyway + // if we would continue we would need to recreate all links under /run + if strings.HasPrefix(resolvePath, "/run/") { + break + } + // make sure wo do not loop forever + if linkCount == 255 { + return errors.New("too many symlinks while resolving /etc/resolv.conf") + } + } + logrus.Debugf("The path of /etc/resolv.conf in the mount ns is %q", resolvePath) + // When /etc/resolv.conf on the host is a symlink to /run/systemd/resolve/stub-resolv.conf, + // we have to mount an empty filesystem on /run/systemd/resolve in the child namespace, + // so as to isolate the directory from the host mount namespace. + // + // Otherwise our bind-mount for /run/systemd/resolve/stub-resolv.conf is unmounted + // when systemd-resolved unlinks and recreates /run/systemd/resolve/stub-resolv.conf on the host. + // see: https://github.com/containers/podman/issues/10929 + if strings.HasPrefix(resolvePath, "/run/systemd/resolve/") { + rsr := n.getPath("/run/systemd/resolve") + err = mountAndMkdirDest("", rsr, tmpfs, unix.MS_NOEXEC|unix.MS_NOSUID|unix.MS_NODEV) + if err != nil { + return err + } + } + if strings.HasPrefix(resolvePath, "/run/") { + resolvePath = n.getPath(resolvePath) + err = os.MkdirAll(filepath.Dir(resolvePath), 0o700) + if err != nil { + return wrapError("create resolv.conf directory", err) + } + // we want to bind mount on this file so we have to create the file first + _, err = os.OpenFile(resolvePath, os.O_CREATE|os.O_RDONLY, 0o600) + if err != nil { + return wrapError("create resolv.conf file: %w", err) + } + } + // mount resolv.conf to make use of the host dns + err = unix.Mount(n.getPath(resolvConfName), resolvePath, none, unix.MS_BIND, "") + if err != nil { + return wrapError(fmt.Sprintf("mount resolv.conf to %q", resolvePath), err) + } + + // 4. CNI plugins need access to /var/lib/cni + if n.backend == CNI { + if err := n.mountCNIVarDir(); err != nil { + return err + } + } + + // 5. Mount the new prepared run dir to /run, it has to be recursive to keep the other bind mounts. + runDir := n.getPath("run") + // relabel the new run directory to the iptables /run label + // this is important, otherwise the iptables command will fail + err = label.Relabel(runDir, "system_u:object_r:iptables_var_run_t:s0", false) + if err != nil { + if !errors.Is(err, unix.ENOTSUP) { + return wrapError("relabel iptables_var_run_t", err) + } + logrus.Debugf("Labeling not supported on %q", runDir) + } + err = mountAndMkdirDest(runDir, "/run", none, unix.MS_BIND|unix.MS_REC) + if err != nil { + return err + } + return nil +} + +func (n *Netns) mountCNIVarDir() error { + varDir := "" + varTarget := persistentCNIDir + // we can only mount to a target dir which exists, check /var/lib/cni recursively + // while we could always use /var there are cases where a user might store the cni + // configs under /var/custom and this would break + for { + if _, err := os.Stat(varTarget); err == nil { + varDir = n.getPath(varTarget) + break + } + varTarget = filepath.Dir(varTarget) + if varTarget == "/" { + break + } + } + if varDir == "" { + return errors.New("failed to stat /var directory") + } + if err := os.MkdirAll(varDir, 0o700); err != nil { + return wrapError("create var dir", err) + } + // make sure to mount var first + err := unix.Mount(varDir, varTarget, none, unix.MS_BIND, "") + if err != nil { + return wrapError(fmt.Sprintf("mount %q to %q", varDir, varTarget), err) + } + return nil +} + +func (n *Netns) runInner(toRun func() error) (err error) { + nsRef, newNs, err := n.getOrCreateNetns() + if err != nil { + return err + } + defer nsRef.Close() + // If a new netns was created make sure to clean it up again on an error to not leak it. + if newNs { + defer func() { + if err != nil { + if err := n.cleanup(); err != nil { + logrus.Errorf("Rootless netns cleanup error after failed setup: %v", err) + } + } + }() + } + + return nsRef.Do(func(_ ns.NetNS) error { + if err := n.setupMounts(); err != nil { + return err + } + return toRun() + }) +} + +func (n *Netns) Setup(nets int, toRun func() error) error { + err := n.runInner(toRun) + if err != nil { + return err + } + _, err = refCount(n.dir, nets) + return err +} + +func (n *Netns) Teardown(nets int, toRun func() error) error { + var multiErr *multierror.Error + count, countErr := refCount(n.dir, -nets) + if countErr != nil { + multiErr = multierror.Append(multiErr, countErr) + } + err := n.runInner(toRun) + if err != nil { + multiErr = multierror.Append(multiErr, err) + } + + // only cleanup if the ref count did not throw an error + if count == 0 && countErr == nil { + err = n.cleanup() + if err != nil { + multiErr = multierror.Append(multiErr, wrapError("cleanup", err)) + } + } + + return multiErr.ErrorOrNil() +} + +// Run any long running function in the userns. +// We need to ensure that during setup/cleanup we are locked to avoid races. +// However because the given function could be running a long time we must +// unlock in between, i.e. this is used by podman unshare --rootless-nets +// and we do not want to keep it locked for the lifetime of the given command. +func (n *Netns) Run(lock *lockfile.LockFile, toRun func() error) error { + lock.Lock() + defer lock.Unlock() + _, err := refCount(n.dir, 1) + if err != nil { + return err + } + inner := func() error { + lock.Unlock() + err = toRun() + lock.Lock() + return err + } + + inErr := n.runInner(inner) + // make sure to always reset the ref counter afterwards + count, err := refCount(n.dir, -1) + if err != nil { + if inErr == nil { + return err + } + logrus.Errorf("Failed to decrement ref count: %v", err) + return inErr + } + if count == 0 { + err = n.cleanup() + if err != nil { + err = wrapError("cleanup", err) + if inErr == nil { + return err + } + logrus.Errorf("Failed to cleanup rootless netns: %v", err) + return inErr + } + } + + return inErr +} + +func refCount(dir string, inc int) (int, error) { + file := filepath.Join(dir, refCountFile) + content, err := os.ReadFile(file) + if err != nil && !errors.Is(err, fs.ErrNotExist) { + return -1, wrapError("read ref counter", err) + } + + currentCount := 0 + if len(content) > 0 { + currentCount, err = strconv.Atoi(string(content)) + if err != nil { + return -1, wrapError("parse ref counter", err) + } + } + + currentCount += inc + if currentCount < 0 { + logrus.Errorf("rootless netns ref counter out of sync, counter is at %d, resetting it back to 0", currentCount) + currentCount = 0 + } + + newNum := strconv.Itoa(currentCount) + if err = os.WriteFile(file, []byte(newNum), 0o600); err != nil { + return -1, wrapError("write ref counter", err) + } + + return currentCount, nil +} diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/bridge.go b/vendor/github.com/containers/common/libnetwork/internal/util/bridge.go index 7197a23bf..c511a2df7 100644 --- a/vendor/github.com/containers/common/libnetwork/internal/util/bridge.go +++ b/vendor/github.com/containers/common/libnetwork/internal/util/bridge.go @@ -7,13 +7,13 @@ import ( "github.com/containers/common/libnetwork/types" "github.com/containers/common/libnetwork/util" "github.com/containers/common/pkg/config" - pkgutil "github.com/containers/common/pkg/util" + "golang.org/x/exp/slices" ) func CreateBridge(n NetUtil, network *types.Network, usedNetworks []*net.IPNet, subnetPools []config.SubnetPool) error { if network.NetworkInterface != "" { bridges := GetBridgeInterfaceNames(n) - if pkgutil.StringInSlice(network.NetworkInterface, bridges) { + if slices.Contains(bridges, network.NetworkInterface) { return fmt.Errorf("bridge name %s already in use", network.NetworkInterface) } if !types.NameRegex.MatchString(network.NetworkInterface) { diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/interface.go b/vendor/github.com/containers/common/libnetwork/internal/util/interface.go index 650fcb193..9b66e66a3 100644 --- a/vendor/github.com/containers/common/libnetwork/internal/util/interface.go +++ b/vendor/github.com/containers/common/libnetwork/internal/util/interface.go @@ -7,7 +7,7 @@ import "github.com/containers/common/libnetwork/types" // NetUtil is a helper interface which all network interfaces should implement to allow easy code sharing type NetUtil interface { - // ForEach eaxecutes the given function for each network + // ForEach executes the given function for each network ForEach(func(types.Network)) // Len returns the number of networks Len() int diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/util.go b/vendor/github.com/containers/common/libnetwork/internal/util/util.go index 2ab24c563..8405bffd9 100644 --- a/vendor/github.com/containers/common/libnetwork/internal/util/util.go +++ b/vendor/github.com/containers/common/libnetwork/internal/util/util.go @@ -7,8 +7,8 @@ import ( "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/config" - "github.com/containers/common/pkg/util" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" ) // GetBridgeInterfaceNames returns all bridge interface names @@ -51,7 +51,7 @@ func GetFreeDeviceName(n NetUtil) (string, error) { // Start by 1, 0 is reserved for the default network for i := 1; i < 1000000; i++ { deviceName := fmt.Sprintf("%s%d", n.DefaultInterfaceName(), i) - if !util.StringInSlice(deviceName, names) { + if !slices.Contains(names, deviceName) { logrus.Debugf("found free device name %s", deviceName) return deviceName, nil } diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/validate.go b/vendor/github.com/containers/common/libnetwork/internal/util/validate.go index adf615552..55995440e 100644 --- a/vendor/github.com/containers/common/libnetwork/internal/util/validate.go +++ b/vendor/github.com/containers/common/libnetwork/internal/util/validate.go @@ -93,15 +93,15 @@ func ValidateRoutes(routes []types.Route) error { func ValidateRoute(route types.Route) error { if route.Destination.IP == nil { - return fmt.Errorf("route destination ip nil") + return errors.New("route destination ip nil") } if route.Destination.Mask == nil { - return fmt.Errorf("route destination mask nil") + return errors.New("route destination mask nil") } if route.Gateway == nil { - return fmt.Errorf("route gateway nil") + return errors.New("route gateway nil") } // Reparse to ensure destination is valid. @@ -112,7 +112,7 @@ func ValidateRoute(route types.Route) error { // check that destination is a network and not an address if !ip.Equal(ipNet.IP) { - return fmt.Errorf("route destination invalid") + return errors.New("route destination invalid") } return nil diff --git a/vendor/github.com/containers/common/libnetwork/netavark/config.go b/vendor/github.com/containers/common/libnetwork/netavark/config.go index de7af9575..bcd1eaea3 100644 --- a/vendor/github.com/containers/common/libnetwork/netavark/config.go +++ b/vendor/github.com/containers/common/libnetwork/netavark/config.go @@ -1,5 +1,4 @@ //go:build linux || freebsd -// +build linux freebsd package netavark @@ -16,14 +15,14 @@ import ( internalutil "github.com/containers/common/libnetwork/internal/util" "github.com/containers/common/libnetwork/types" - "github.com/containers/common/pkg/util" "github.com/containers/storage/pkg/stringid" + "golang.org/x/exp/slices" ) func sliceRemoveDuplicates(strList []string) []string { list := make([]string, 0, len(strList)) for _, item := range strList { - if !util.StringInSlice(item, list) { + if !slices.Contains(list, item) { list = append(list, item) } } @@ -71,7 +70,7 @@ func (n *netavarkNetwork) NetworkUpdate(name string, options types.NetworkUpdate networkDNSServersBefore := network.NetworkDNSServers networkDNSServersAfter := []string{} for _, server := range networkDNSServersBefore { - if util.StringInSlice(server, options.RemoveDNSServers) { + if slices.Contains(options.RemoveDNSServers, server) { continue } networkDNSServersAfter = append(networkDNSServersAfter, server) @@ -273,7 +272,7 @@ func createIpvlanOrMacvlan(network *types.Network) error { if err != nil { return err } - if !util.StringInSlice(network.NetworkInterface, interfaceNames) { + if !slices.Contains(interfaceNames, network.NetworkInterface) { return fmt.Errorf("parent interface %s does not exist", network.NetworkInterface) } } @@ -310,7 +309,7 @@ func createIpvlanOrMacvlan(network *types.Network) error { return errIpvlanNoDHCP } if len(network.Subnets) > 0 { - return fmt.Errorf("ipam driver dhcp set but subnets are set") + return errors.New("ipam driver dhcp set but subnets are set") } } @@ -319,11 +318,11 @@ func createIpvlanOrMacvlan(network *types.Network) error { switch key { case types.ModeOption: if isMacVlan { - if !util.StringInSlice(value, types.ValidMacVLANModes) { + if !slices.Contains(types.ValidMacVLANModes, value) { return fmt.Errorf("unknown macvlan mode %q", value) } } else { - if !util.StringInSlice(value, types.ValidIPVLANModes) { + if !slices.Contains(types.ValidIPVLANModes, value) { return fmt.Errorf("unknown ipvlan mode %q", value) } } @@ -473,7 +472,7 @@ func getAllPlugins(dirs []string) []string { if err == nil { for _, entry := range entries { name := entry.Name() - if !util.StringInSlice(name, plugins) { + if !slices.Contains(plugins, name) { plugins = append(plugins, name) } } diff --git a/vendor/github.com/containers/common/libnetwork/netavark/const.go b/vendor/github.com/containers/common/libnetwork/netavark/const.go index 70c50b1a8..fd975a1f8 100644 --- a/vendor/github.com/containers/common/libnetwork/netavark/const.go +++ b/vendor/github.com/containers/common/libnetwork/netavark/const.go @@ -1,5 +1,4 @@ //go:build linux || freebsd -// +build linux freebsd package netavark diff --git a/vendor/github.com/containers/common/libnetwork/netavark/exec.go b/vendor/github.com/containers/common/libnetwork/netavark/exec.go index 20934a3f9..2a52a4702 100644 --- a/vendor/github.com/containers/common/libnetwork/netavark/exec.go +++ b/vendor/github.com/containers/common/libnetwork/netavark/exec.go @@ -1,5 +1,4 @@ //go:build linux || freebsd -// +build linux freebsd package netavark @@ -10,6 +9,7 @@ import ( "os" "os/exec" "strconv" + "strings" "github.com/sirupsen/logrus" ) @@ -76,9 +76,18 @@ func getRustLogEnv() string { // used to marshal the netavark output into it. This can be nil. // All errors return by this function should be of the type netavarkError // to provide a helpful error message. -func (n *netavarkNetwork) execNetavark(args []string, needPlugin bool, stdin, result interface{}) error { +func (n *netavarkNetwork) execNetavark(args []string, needPlugin bool, stdin, result any) error { // set the netavark log level to the same as the podman env := append(os.Environ(), getRustLogEnv()) + // Netavark need access to iptables in $PATH. As it turns out debian doesn't put + // /usr/sbin in $PATH for rootless users. This will break rootless networking completely. + // We might break existing users and we cannot expect everyone to change their $PATH so + // let's add /usr/sbin to $PATH ourselves. + path := os.Getenv("PATH") + if !strings.Contains(path, "/usr/sbin") { + path += ":/usr/sbin" + env = append(env, "PATH="+path) + } // if we run with debug log level lets also set RUST_BACKTRACE=1 so we can get the full stack trace in case of panics if logrus.IsLevelEnabled(logrus.DebugLevel) { env = append(env, "RUST_BACKTRACE=1") @@ -86,14 +95,17 @@ func (n *netavarkNetwork) execNetavark(args []string, needPlugin bool, stdin, re if n.dnsBindPort != 0 { env = append(env, "NETAVARK_DNS_PORT="+strconv.Itoa(int(n.dnsBindPort))) } + if n.firewallDriver != "" { + env = append(env, "NETAVARK_FW="+n.firewallDriver) + } return n.execBinary(n.netavarkBinary, append(n.getCommonNetavarkOptions(needPlugin), args...), stdin, result, env) } -func (n *netavarkNetwork) execPlugin(path string, args []string, stdin, result interface{}) error { +func (n *netavarkNetwork) execPlugin(path string, args []string, stdin, result any) error { return n.execBinary(path, args, stdin, result, nil) } -func (n *netavarkNetwork) execBinary(path string, args []string, stdin, result interface{}, env []string) error { +func (n *netavarkNetwork) execBinary(path string, args []string, stdin, result any, env []string) error { stdinR, stdinW, err := os.Pipe() if err != nil { return newNetavarkError("failed to create stdin pipe", err) diff --git a/vendor/github.com/containers/common/libnetwork/netavark/ipam.go b/vendor/github.com/containers/common/libnetwork/netavark/ipam.go index 65f21c1e3..b9a48d456 100644 --- a/vendor/github.com/containers/common/libnetwork/netavark/ipam.go +++ b/vendor/github.com/containers/common/libnetwork/netavark/ipam.go @@ -1,5 +1,4 @@ //go:build linux || freebsd -// +build linux freebsd package netavark @@ -48,7 +47,7 @@ func (e *ipamError) Error() string { return msg } -func newIPAMError(cause error, msg string, args ...interface{}) *ipamError { +func newIPAMError(cause error, msg string, args ...any) *ipamError { return &ipamError{ msg: fmt.Sprintf(msg, args...), cause: cause, diff --git a/vendor/github.com/containers/common/libnetwork/netavark/network.go b/vendor/github.com/containers/common/libnetwork/netavark/network.go index 0d323db28..9f4ee3135 100644 --- a/vendor/github.com/containers/common/libnetwork/netavark/network.go +++ b/vendor/github.com/containers/common/libnetwork/netavark/network.go @@ -1,5 +1,4 @@ //go:build linux || freebsd -// +build linux freebsd package netavark @@ -12,6 +11,7 @@ import ( "strings" "time" + "github.com/containers/common/libnetwork/internal/rootlessnetns" "github.com/containers/common/libnetwork/internal/util" "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/config" @@ -36,6 +36,9 @@ type netavarkNetwork struct { // aardvarkBinary is the path to the aardvark binary. aardvarkBinary string + // firewallDriver sets the firewall driver to use + firewallDriver string + // defaultNetwork is the name for the default network. defaultNetwork string // defaultSubnet is the default subnet for the default network. @@ -65,6 +68,9 @@ type netavarkNetwork struct { // networks is a map with loaded networks, the key is the network name networks map[string]*types.Network + + // rootlessNetns is used for the rootless network setup/teardown + rootlessNetns *rootlessnetns.Netns } type InitConfig struct { @@ -79,23 +85,12 @@ type InitConfig struct { // NetworkRunDir is where temporary files are stored, i.e.the ipam db, aardvark config NetworkRunDir string - // DefaultNetwork is the name for the default network. - DefaultNetwork string - // DefaultSubnet is the default subnet for the default network. - DefaultSubnet string - - // DefaultsubnetPools contains the subnets which must be used to allocate a free subnet by network create - DefaultsubnetPools []config.SubnetPool - - // DNSBindPort is set the port to pass to netavark for aardvark - DNSBindPort uint16 - - // PluginDirs list of directories were netavark plugins are located - PluginDirs []string - // Syslog describes whenever the netavark debug output should be log to the syslog as well. // This will use logrus to do so, make sure logrus is set up to log to the syslog. Syslog bool + + // Config containers.conf options + Config *config.Config } // NewNetworkInterface creates the ContainerNetwork interface for the netavark backend. @@ -112,12 +107,12 @@ func NewNetworkInterface(conf *InitConfig) (types.ContainerNetwork, error) { return nil, err } - defaultNetworkName := conf.DefaultNetwork + defaultNetworkName := conf.Config.Network.DefaultNetwork if defaultNetworkName == "" { defaultNetworkName = types.DefaultNetworkName } - defaultSubnet := conf.DefaultSubnet + defaultSubnet := conf.Config.Network.DefaultSubnet if defaultSubnet == "" { defaultSubnet = types.DefaultSubnet } @@ -134,11 +129,19 @@ func NewNetworkInterface(conf *InitConfig) (types.ContainerNetwork, error) { return nil, err } - defaultSubnetPools := conf.DefaultsubnetPools + defaultSubnetPools := conf.Config.Network.DefaultSubnetPools if defaultSubnetPools == nil { defaultSubnetPools = config.DefaultSubnetPools } + var netns *rootlessnetns.Netns + if unshare.IsRootless() { + netns, err = rootlessnetns.New(conf.NetworkRunDir, rootlessnetns.Netavark, conf.Config) + if err != nil { + return nil, err + } + } + n := &netavarkNetwork{ networkConfigDir: conf.NetworkConfigDir, networkRunDir: conf.NetworkRunDir, @@ -146,13 +149,15 @@ func NewNetworkInterface(conf *InitConfig) (types.ContainerNetwork, error) { aardvarkBinary: conf.AardvarkBinary, networkRootless: unshare.IsRootless(), ipamDBPath: filepath.Join(conf.NetworkRunDir, "ipam.db"), + firewallDriver: conf.Config.Network.FirewallDriver, defaultNetwork: defaultNetworkName, defaultSubnet: defaultNet, defaultsubnetPools: defaultSubnetPools, - dnsBindPort: conf.DNSBindPort, - pluginDirs: conf.PluginDirs, + dnsBindPort: conf.Config.Network.DNSBindPort, + pluginDirs: conf.Config.Network.NetavarkPluginDirs.Get(), lock: lock, syslog: conf.Syslog, + rootlessNetns: netns, } return n, nil diff --git a/vendor/github.com/containers/common/libnetwork/netavark/run.go b/vendor/github.com/containers/common/libnetwork/netavark/run.go index 3df5ced05..d13e51ff8 100644 --- a/vendor/github.com/containers/common/libnetwork/netavark/run.go +++ b/vendor/github.com/containers/common/libnetwork/netavark/run.go @@ -1,5 +1,4 @@ //go:build linux || freebsd -// +build linux freebsd package netavark @@ -11,8 +10,8 @@ import ( "github.com/containers/common/libnetwork/internal/util" "github.com/containers/common/libnetwork/types" - pkgutil "github.com/containers/common/pkg/util" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" ) type netavarkOptions struct { @@ -72,12 +71,24 @@ func (n *netavarkNetwork) Setup(namespacePath string, options types.SetupOptions } result := map[string]types.StatusBlock{} - err = n.execNetavark([]string{"setup", namespacePath}, needPlugin, netavarkOpts, &result) - if err != nil { - // lets dealloc ips to prevent leaking - if err := n.deallocIPs(&options.NetworkOptions); err != nil { - logrus.Error(err) + setup := func() error { + err := n.execNetavark([]string{"setup", namespacePath}, needPlugin, netavarkOpts, &result) + if err != nil { + // lets dealloc ips to prevent leaking + if err := n.deallocIPs(&options.NetworkOptions); err != nil { + logrus.Error(err) + } + return err } + return nil + } + + if n.rootlessNetns != nil { + err = n.rootlessNetns.Setup(len(options.Networks), setup) + } else { + err = setup() + } + if err != nil { return nil, err } @@ -112,7 +123,16 @@ func (n *netavarkNetwork) Teardown(namespacePath string, options types.TeardownO return fmt.Errorf("failed to convert net opts: %w", err) } - retErr := n.execNetavark([]string{"teardown", namespacePath}, needPlugin, netavarkOpts, nil) + var retErr error + teardown := func() error { + return n.execNetavark([]string{"teardown", namespacePath}, needPlugin, netavarkOpts, nil) + } + + if n.rootlessNetns != nil { + retErr = n.rootlessNetns.Teardown(len(options.Networks), teardown) + } else { + retErr = teardown() + } // when netavark returned an error we still free the used ips // otherwise we could end up in a state where block the ips forever @@ -154,9 +174,16 @@ func (n *netavarkNetwork) convertNetOpts(opts types.NetworkOptions) (*netavarkOp return nil, false, err } netavarkOptions.Networks[network] = net - if !pkgutil.StringInSlice(net.Driver, builtinDrivers) { + if !slices.Contains(builtinDrivers, net.Driver) { needsPlugin = true } } return &netavarkOptions, needsPlugin, nil } + +func (n *netavarkNetwork) RunInRootlessNetns(toRun func() error) error { + if n.rootlessNetns == nil { + return types.ErrNotRootlessNetns + } + return n.rootlessNetns.Run(n.lock, toRun) +} diff --git a/vendor/github.com/containers/common/libnetwork/network/interface.go b/vendor/github.com/containers/common/libnetwork/network/interface.go index aeac8d9c1..51c1ae718 100644 --- a/vendor/github.com/containers/common/libnetwork/network/interface.go +++ b/vendor/github.com/containers/common/libnetwork/network/interface.go @@ -1,5 +1,4 @@ //go:build linux || freebsd -// +build linux freebsd package network @@ -9,13 +8,10 @@ import ( "os" "path/filepath" - "github.com/containers/common/libnetwork/cni" "github.com/containers/common/libnetwork/netavark" "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/config" - "github.com/containers/common/pkg/machine" "github.com/containers/storage" - "github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/unshare" "github.com/sirupsen/logrus" @@ -24,8 +20,6 @@ import ( const ( // defaultNetworkBackendFileName is the file name for sentinel file to store the backend defaultNetworkBackendFileName = "defaultNetworkBackend" - // cniConfigDirRootless is the directory in XDG_CONFIG_HOME for cni plugins - cniConfigDirRootless = "cni/net.d/" // netavarkBinary is the name of the netavark binary netavarkBinary = "netavark" @@ -53,153 +47,94 @@ func NetworkBackend(store storage.Store, conf *config.Config, syslog bool) (type } } - switch backend { - case types.Netavark: - netavarkBin, err := conf.FindHelperBinary(netavarkBinary, false) - if err != nil { - return "", nil, err - } + return backendFromType(backend, store, conf, syslog) +} - aardvarkBin, _ := conf.FindHelperBinary(aardvarkBinary, false) +func netavarkBackendFromConf(store storage.Store, conf *config.Config, syslog bool) (types.ContainerNetwork, error) { + netavarkBin, err := conf.FindHelperBinary(netavarkBinary, false) + if err != nil { + return nil, err + } - confDir := conf.Network.NetworkConfigDir - if confDir == "" { - confDir = getDefaultNetavarkConfigDir(store) - } + aardvarkBin, _ := conf.FindHelperBinary(aardvarkBinary, false) - // We cannot use the runroot for rootful since the network namespace is shared for all - // libpod instances they also have to share the same ipam db. - // For rootless we have our own network namespace per libpod instances, - // so this is not a problem there. - runDir := netavarkRunDir - if unshare.IsRootless() { - runDir = filepath.Join(store.RunRoot(), "networks") - } + confDir := conf.Network.NetworkConfigDir + if confDir == "" { + confDir = getDefaultNetavarkConfigDir(store) + } - netInt, err := netavark.NewNetworkInterface(&netavark.InitConfig{ - NetworkConfigDir: confDir, - NetworkRunDir: runDir, - NetavarkBinary: netavarkBin, - AardvarkBinary: aardvarkBin, - PluginDirs: conf.Network.NetavarkPluginDirs.Get(), - DefaultNetwork: conf.Network.DefaultNetwork, - DefaultSubnet: conf.Network.DefaultSubnet, - DefaultsubnetPools: conf.Network.DefaultSubnetPools, - DNSBindPort: conf.Network.DNSBindPort, - Syslog: syslog, - }) - return types.Netavark, netInt, err - case types.CNI: - netInt, err := getCniInterface(conf) - return types.CNI, netInt, err - - default: - return "", nil, fmt.Errorf("unsupported network backend %q, check network_backend in containers.conf", backend) + // We cannot use the runroot for rootful since the network namespace is shared for all + // libpod instances they also have to share the same ipam db. + // For rootless we have our own network namespace per libpod instances, + // so this is not a problem there. + runDir := netavarkRunDir + if unshare.IsRootless() { + runDir = filepath.Join(store.RunRoot(), "networks") } + + netInt, err := netavark.NewNetworkInterface(&netavark.InitConfig{ + Config: conf, + NetworkConfigDir: confDir, + NetworkRunDir: runDir, + NetavarkBinary: netavarkBin, + AardvarkBinary: aardvarkBin, + Syslog: syslog, + }) + return netInt, err } func defaultNetworkBackend(store storage.Store, conf *config.Config) (backend types.NetworkBackend, err error) { - // read defaultNetworkBackend file + err = nil + file := filepath.Join(store.GraphRoot(), defaultNetworkBackendFileName) - b, err := os.ReadFile(file) - if err == nil { - val := string(b) - if val == string(types.Netavark) { - return types.Netavark, nil - } - if val == string(types.CNI) { - return types.CNI, nil - } - return "", fmt.Errorf("unknown network backend value %q in %q", val, file) - } - // fail for all errors except ENOENT - if !errors.Is(err, os.ErrNotExist) { - return "", fmt.Errorf("could not read network backend value: %w", err) - } - // cache the network backend to make sure always the same one will be used - defer func() { + writeBackendToFile := func(backendT types.NetworkBackend) { // only write when there is no error if err == nil { - if err := ioutils.AtomicWriteFile(file, []byte(backend), 0o644); err != nil { + if err := ioutils.AtomicWriteFile(file, []byte(backendT), 0o644); err != nil { logrus.Errorf("could not write network backend to file: %v", err) } } - }() - - _, err = conf.FindHelperBinary("netavark", false) - if err != nil { - // if we cannot find netavark use CNI - return types.CNI, nil - } - - // If there are any containers then return CNI - cons, err := store.Containers() - if err != nil { - return "", err - } - if len(cons) != 0 { - return types.CNI, nil - } - - // If there are any non ReadOnly images then return CNI - imgs, err := store.Images() - if err != nil { - return "", err - } - for _, i := range imgs { - if !i.ReadOnly { - return types.CNI, nil - } } - // If there are CNI Networks then return CNI - cniInterface, err := getCniInterface(conf) + // read defaultNetworkBackend file + b, err := os.ReadFile(file) if err == nil { - nets, err := cniInterface.NetworkList() - // there is always a default network so check > 1 - if err != nil && !errors.Is(err, os.ErrNotExist) { - return "", err - } - - if len(nets) > 1 { - // we do not have a fresh system so use CNI - return types.CNI, nil - } - } - return types.Netavark, nil -} + val := string(b) -func getCniInterface(conf *config.Config) (types.ContainerNetwork, error) { - confDir := conf.Network.NetworkConfigDir - if confDir == "" { - var err error - confDir, err = getDefaultCNIConfigDir() - if err != nil { - return nil, err + // if the network backend has been already set previously, + // handle the values depending on whether CNI is supported and + // whether the network backend is explicitly configured + if val == string(types.Netavark) { + // netavark is always good + return types.Netavark, nil + } else if val == string(types.CNI) { + if cniSupported { + return types.CNI, nil + } + // the user has *not* configured a network + // backend explicitly but used CNI in the past + // => we upgrade them in this case to netavark only + writeBackendToFile(types.Netavark) + logrus.Info("Migrating network backend to netavark as no backend has been configured previously") + return types.Netavark, nil } + return "", fmt.Errorf("unknown network backend value %q in %q", val, file) } - return cni.NewCNINetworkInterface(&cni.InitConfig{ - CNIConfigDir: confDir, - CNIPluginDirs: conf.Network.CNIPluginDirs.Get(), - RunDir: conf.Engine.TmpDir, - DefaultNetwork: conf.Network.DefaultNetwork, - DefaultSubnet: conf.Network.DefaultSubnet, - DefaultsubnetPools: conf.Network.DefaultSubnetPools, - IsMachine: machine.IsGvProxyBased(), - }) -} -func getDefaultCNIConfigDir() (string, error) { - if !unshare.IsRootless() { - return cniConfigDir, nil + // fail for all errors except ENOENT + if !errors.Is(err, os.ErrNotExist) { + return "", fmt.Errorf("could not read network backend value: %w", err) } - configHome, err := homedir.GetConfigHome() + backend, err = networkBackendFromStore(store, conf) if err != nil { return "", err } - return filepath.Join(configHome, cniConfigDirRootless), nil + // cache the network backend to make sure always the same one will be used + writeBackendToFile(backend) + + return backend, nil } // getDefaultNetavarkConfigDir return the netavark config dir. For rootful it will diff --git a/vendor/github.com/containers/common/libnetwork/network/interface_cni.go b/vendor/github.com/containers/common/libnetwork/network/interface_cni.go new file mode 100644 index 000000000..0a8bc4472 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/network/interface_cni.go @@ -0,0 +1,121 @@ +//go:build (linux || freebsd) && cni +// +build linux freebsd +// +build cni + +package network + +import ( + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/containers/common/libnetwork/cni" + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/config" + "github.com/containers/common/pkg/machine" + "github.com/containers/storage" + "github.com/containers/storage/pkg/homedir" + "github.com/containers/storage/pkg/unshare" +) + +const ( + // cniConfigDirRootless is the directory in XDG_CONFIG_HOME for cni plugins + cniConfigDirRootless = "cni/net.d/" + + cniSupported = true +) + +func getCniInterface(conf *config.Config) (types.ContainerNetwork, error) { + confDir := conf.Network.NetworkConfigDir + if confDir == "" { + var err error + confDir, err = getDefaultCNIConfigDir() + if err != nil { + return nil, err + } + } + return cni.NewCNINetworkInterface(&cni.InitConfig{ + Config: conf, + CNIConfigDir: confDir, + RunDir: conf.Engine.TmpDir, + IsMachine: machine.IsGvProxyBased(), + }) +} + +func getDefaultCNIConfigDir() (string, error) { + if !unshare.IsRootless() { + return cniConfigDir, nil + } + + configHome, err := homedir.GetConfigHome() + if err != nil { + return "", err + } + + return filepath.Join(configHome, cniConfigDirRootless), nil +} + +func networkBackendFromStore(store storage.Store, conf *config.Config) (backend types.NetworkBackend, err error) { + _, err = conf.FindHelperBinary("netavark", false) + if err != nil { + // if we cannot find netavark use CNI + return types.CNI, nil + } + + // If there are any containers then return CNI + cons, err := store.Containers() + if err != nil { + return "", err + } + if len(cons) != 0 { + return types.CNI, nil + } + + // If there are any non ReadOnly images then return CNI + imgs, err := store.Images() + if err != nil { + return "", err + } + for _, i := range imgs { + if !i.ReadOnly { + return types.CNI, nil + } + } + + // If there are CNI Networks then return CNI + cniInterface, err := getCniInterface(conf) + if err == nil { + nets, err := cniInterface.NetworkList() + // there is always a default network so check > 1 + if err != nil && !errors.Is(err, os.ErrNotExist) { + return "", err + } + + if len(nets) > 1 { + // we do not have a fresh system so use CNI + return types.CNI, nil + } + } + return types.Netavark, nil +} + +func backendFromType(backend types.NetworkBackend, store storage.Store, conf *config.Config, syslog bool) (types.NetworkBackend, types.ContainerNetwork, error) { + switch backend { + case types.Netavark: + netInt, err := netavarkBackendFromConf(store, conf, syslog) + if err != nil { + return "", nil, err + } + return types.Netavark, netInt, err + case types.CNI: + netInt, err := getCniInterface(conf) + if err != nil { + return "", nil, err + } + return types.CNI, netInt, err + + default: + return "", nil, fmt.Errorf("unsupported network backend %q, check network_backend in containers.conf", backend) + } +} diff --git a/vendor/github.com/containers/common/libnetwork/network/interface_cni_unsupported.go b/vendor/github.com/containers/common/libnetwork/network/interface_cni_unsupported.go new file mode 100644 index 000000000..2f4bb8371 --- /dev/null +++ b/vendor/github.com/containers/common/libnetwork/network/interface_cni_unsupported.go @@ -0,0 +1,32 @@ +//go:build (linux || freebsd) && !cni +// +build linux freebsd +// +build !cni + +package network + +import ( + "fmt" + + "github.com/containers/common/libnetwork/types" + "github.com/containers/common/pkg/config" + "github.com/containers/storage" +) + +const ( + cniSupported = false +) + +func networkBackendFromStore(_store storage.Store, _conf *config.Config) (backend types.NetworkBackend, err error) { + return types.Netavark, nil +} + +func backendFromType(backend types.NetworkBackend, store storage.Store, conf *config.Config, syslog bool) (types.NetworkBackend, types.ContainerNetwork, error) { + if backend != types.Netavark { + return "", nil, fmt.Errorf("cni support is not enabled in this build, only netavark. Got unsupported network backend %q", backend) + } + cn, err := netavarkBackendFromConf(store, conf, syslog) + if err != nil { + return "", nil, err + } + return types.Netavark, cn, err +} diff --git a/vendor/github.com/containers/common/libnetwork/pasta/pasta.go b/vendor/github.com/containers/common/libnetwork/pasta/pasta.go index b787a7814..0da7607f6 100644 --- a/vendor/github.com/containers/common/libnetwork/pasta/pasta.go +++ b/vendor/github.com/containers/common/libnetwork/pasta/pasta.go @@ -47,6 +47,7 @@ func Setup(opts *SetupOptions) error { NoTCPNamespacePorts := true NoUDPNamespacePorts := true NoMapGW := true + NoDNS := true path, err := opts.Config.FindHelperBinary(BinaryName, true) if err != nil { @@ -62,7 +63,7 @@ func Setup(opts *SetupOptions) error { var addr string if i.HostIP != "" { - addr = fmt.Sprintf("%s/", i.HostIP) + addr = i.HostIP + "/" } switch protocol { @@ -102,6 +103,8 @@ func Setup(opts *SetupOptions) error { NoMapGW = false // not an actual pasta(1) option cmdArgs = append(cmdArgs[:i], cmdArgs[i+1:]...) + case "-D", "--dns", "--dns-forward": + NoDNS = false } } @@ -120,21 +123,36 @@ func Setup(opts *SetupOptions) error { if NoMapGW { cmdArgs = append(cmdArgs, "--no-map-gw") } + if NoDNS { + // disable pasta reading from /etc/resolv.conf which hides the + // "Couldn't get any nameserver address" warning when only + // localhost resolvers are configured. + cmdArgs = append(cmdArgs, "--dns", "none") + } - cmdArgs = append(cmdArgs, "--netns", opts.Netns) + // always pass --quiet to silence the info output from pasta + cmdArgs = append(cmdArgs, "--quiet", "--netns", opts.Netns) logrus.Debugf("pasta arguments: %s", strings.Join(cmdArgs, " ")) // pasta forks once ready, and quits once we delete the target namespace - _, err = exec.Command(path, cmdArgs...).Output() + out, err := exec.Command(path, cmdArgs...).CombinedOutput() if err != nil { exitErr := &exec.ExitError{} if errors.As(err, &exitErr) { return fmt.Errorf("pasta failed with exit code %d:\n%s", - exitErr.ExitCode(), exitErr.Stderr) + exitErr.ExitCode(), string(out)) } return fmt.Errorf("failed to start pasta: %w", err) } + if len(out) > 0 { + // TODO: This should be warning but right now pasta still prints + // things with --quiet that we do not care about. + // For now info is fine and we can bump it up later, it is only a + // nice to have. + logrus.Infof("pasta logged warnings: %q", string(out)) + } + return nil } diff --git a/vendor/github.com/containers/common/libnetwork/resolvconf/resolv.go b/vendor/github.com/containers/common/libnetwork/resolvconf/resolv.go index c451d3b49..472fb9452 100644 --- a/vendor/github.com/containers/common/libnetwork/resolvconf/resolv.go +++ b/vendor/github.com/containers/common/libnetwork/resolvconf/resolv.go @@ -7,9 +7,9 @@ import ( "path/filepath" "strings" - "github.com/containers/common/pkg/util" "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" ) const ( @@ -111,7 +111,7 @@ func getDefaultResolvConf(params *Params) ([]byte, bool, error) { // unsetSearchDomainsIfNeeded removes the search domain when they contain a single dot as element. func unsetSearchDomainsIfNeeded(searches []string) []string { - if util.StringInSlice(".", searches) { + if slices.Contains(searches, ".") { return nil } return searches @@ -173,7 +173,7 @@ func Remove(path string, nameservers []string) error { oldNameservers := getNameservers(contents) newNameserver := make([]string, 0, len(oldNameservers)) for _, ns := range oldNameservers { - if !util.StringInSlice(ns, nameservers) { + if !slices.Contains(nameservers, ns) { newNameserver = append(newNameserver, ns) } } diff --git a/vendor/github.com/containers/common/libnetwork/slirp4netns/slirp4netns.go b/vendor/github.com/containers/common/libnetwork/slirp4netns/slirp4netns.go index 43ca97808..d37893414 100644 --- a/vendor/github.com/containers/common/libnetwork/slirp4netns/slirp4netns.go +++ b/vendor/github.com/containers/common/libnetwork/slirp4netns/slirp4netns.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package slirp4netns @@ -211,7 +210,7 @@ func createBasicSlirpCmdArgs(options *networkOptions, features *slirpFeatures) ( cmdArgs = append(cmdArgs, "--disable-host-loopback") } if options.mtu > -1 && features.HasMTU { - cmdArgs = append(cmdArgs, fmt.Sprintf("--mtu=%d", options.mtu)) + cmdArgs = append(cmdArgs, "--mtu="+strconv.Itoa(options.mtu)) } if !options.noPivotRoot && features.HasEnableSandbox { cmdArgs = append(cmdArgs, "--enable-sandbox") @@ -222,33 +221,33 @@ func createBasicSlirpCmdArgs(options *networkOptions, features *slirpFeatures) ( if options.cidr != "" { if !features.HasCIDR { - return nil, fmt.Errorf("cidr not supported") + return nil, errors.New("cidr not supported") } - cmdArgs = append(cmdArgs, fmt.Sprintf("--cidr=%s", options.cidr)) + cmdArgs = append(cmdArgs, "--cidr="+options.cidr) } if options.enableIPv6 { if !features.HasIPv6 { - return nil, fmt.Errorf("enable_ipv6 not supported") + return nil, errors.New("enable_ipv6 not supported") } cmdArgs = append(cmdArgs, "--enable-ipv6") } if options.outboundAddr != "" { if !features.HasOutboundAddr { - return nil, fmt.Errorf("outbound_addr not supported") + return nil, errors.New("outbound_addr not supported") } - cmdArgs = append(cmdArgs, fmt.Sprintf("--outbound-addr=%s", options.outboundAddr)) + cmdArgs = append(cmdArgs, "--outbound-addr="+options.outboundAddr) } if options.outboundAddr6 != "" { if !features.HasOutboundAddr || !features.HasIPv6 { - return nil, fmt.Errorf("outbound_addr6 not supported") + return nil, errors.New("outbound_addr6 not supported") } if !options.enableIPv6 { - return nil, fmt.Errorf("enable_ipv6=true is required for outbound_addr6") + return nil, errors.New("enable_ipv6=true is required for outbound_addr6") } - cmdArgs = append(cmdArgs, fmt.Sprintf("--outbound-addr6=%s", options.outboundAddr6)) + cmdArgs = append(cmdArgs, "--outbound-addr6="+options.outboundAddr6) } return cmdArgs, nil @@ -301,7 +300,7 @@ func Setup(opts *SetupOptions) (*SetupResult, error) { var apiSocket string if havePortMapping && netOptions.isSlirpHostForward { - apiSocket = filepath.Join(opts.Config.Engine.TmpDir, fmt.Sprintf("%s.net", opts.ContainerID)) + apiSocket = filepath.Join(opts.Config.Engine.TmpDir, opts.ContainerID+".net") cmdArgs = append(cmdArgs, "--api-socket", apiSocket) } @@ -611,7 +610,7 @@ func SetupRootlessPortMappingViaRLK(opts *SetupOptions, slirpSubnet *net.IPNet, if stdoutStr != "" { // err contains full debug log and too verbose, so return stdoutStr logrus.Debug(err) - return fmt.Errorf("rootlessport " + strings.TrimSuffix(stdoutStr, "\n")) + return errors.New("rootlessport " + strings.TrimSuffix(stdoutStr, "\n")) } return err } @@ -706,7 +705,7 @@ func openSlirp4netnsPort(apiSocket, proto, hostip string, hostport, guestport ui } // if there is no 'error' key in the received JSON data, then the operation was // successful. - var y map[string]interface{} + var y map[string]any if err := json.Unmarshal(buf[0:readLength], &y); err != nil { return fmt.Errorf("parsing error status from slirp4netns: %w", err) } diff --git a/vendor/github.com/containers/common/libnetwork/types/define.go b/vendor/github.com/containers/common/libnetwork/types/define.go index 6e91ccda9..193377b1a 100644 --- a/vendor/github.com/containers/common/libnetwork/types/define.go +++ b/vendor/github.com/containers/common/libnetwork/types/define.go @@ -18,6 +18,9 @@ var ( // exists. ErrNetworkExists = errors.New("network already exists") + // ErrNotRootlessNetns indicates the rootless netns can only be used as root + ErrNotRootlessNetns = errors.New("rootless netns cannot be used as root") + // NameRegex is a regular expression to validate names. // This must NOT be changed. NameRegex = regexp.Delayed("^[a-zA-Z0-9][a-zA-Z0-9_.-]*$") diff --git a/vendor/github.com/containers/common/libnetwork/types/network.go b/vendor/github.com/containers/common/libnetwork/types/network.go index 94087fd37..9e30975cb 100644 --- a/vendor/github.com/containers/common/libnetwork/types/network.go +++ b/vendor/github.com/containers/common/libnetwork/types/network.go @@ -27,6 +27,10 @@ type ContainerNetwork interface { // Teardown will teardown the container network namespace. Teardown(namespacePath string, options TeardownOptions) error + // RunInRootlessNetns is used to run the given function in the rootless netns. + // Only used as rootless and should return an error as root. + RunInRootlessNetns(toRun func() error) error + // Drivers will return the list of supported network drivers // for this interface. Drivers() []string diff --git a/vendor/github.com/containers/common/libnetwork/util/filters.go b/vendor/github.com/containers/common/libnetwork/util/filters.go index 70f90918c..faea9c1ad 100644 --- a/vendor/github.com/containers/common/libnetwork/util/filters.go +++ b/vendor/github.com/containers/common/libnetwork/util/filters.go @@ -7,6 +7,7 @@ import ( "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/filters" "github.com/containers/common/pkg/util" + "golang.org/x/exp/slices" ) func GenerateNetworkFilters(f map[string][]string) ([]types.FilterFunc, error) { @@ -32,7 +33,7 @@ func createFilterFuncs(key string, filterValues []string) (types.FilterFunc, err case types.Driver: // matches network driver return func(net types.Network) bool { - return util.StringInSlice(net.Driver, filterValues) + return slices.Contains(filterValues, net.Driver) }, nil case "id": diff --git a/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go index 435422c27..5cbb6ba9f 100644 --- a/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go +++ b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go @@ -1,5 +1,4 @@ //go:build linux && apparmor -// +build linux,apparmor package apparmor diff --git a/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux_template.go b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux_template.go index 99e0e14e2..b8212542d 100644 --- a/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux_template.go +++ b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux_template.go @@ -1,5 +1,4 @@ //go:build linux && apparmor -// +build linux,apparmor package apparmor diff --git a/vendor/github.com/containers/common/pkg/apparmor/apparmor_unsupported.go b/vendor/github.com/containers/common/pkg/apparmor/apparmor_unsupported.go index dacfc2f48..6c1ce46d6 100644 --- a/vendor/github.com/containers/common/pkg/apparmor/apparmor_unsupported.go +++ b/vendor/github.com/containers/common/pkg/apparmor/apparmor_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux || !apparmor -// +build !linux !apparmor package apparmor diff --git a/vendor/github.com/containers/common/pkg/auth/auth.go b/vendor/github.com/containers/common/pkg/auth/auth.go index 6536d0f2f..3b6a05f0a 100644 --- a/vendor/github.com/containers/common/pkg/auth/auth.go +++ b/vendor/github.com/containers/common/pkg/auth/auth.go @@ -285,7 +285,7 @@ func getUserAndPass(opts *LoginOptions, password, userFromAuthFile string) (user username := opts.Username if username == "" { if opts.Stdin == nil { - return "", "", fmt.Errorf("cannot prompt for username without stdin") + return "", "", errors.New("cannot prompt for username without stdin") } if userFromAuthFile != "" { diff --git a/vendor/github.com/containers/common/pkg/capabilities/capabilities.go b/vendor/github.com/containers/common/pkg/capabilities/capabilities.go index 3bf25e086..e0b0ac95e 100644 --- a/vendor/github.com/containers/common/pkg/capabilities/capabilities.go +++ b/vendor/github.com/containers/common/pkg/capabilities/capabilities.go @@ -13,6 +13,7 @@ import ( "sync" "github.com/syndtr/gocapability/capability" + "golang.org/x/exp/slices" ) var ( @@ -54,16 +55,6 @@ func init() { } } -// stringInSlice determines if a string is in a string slice, returns bool -func stringInSlice(s string, sl []string) bool { - for _, i := range sl { - if i == s { - return true - } - } - return false -} - var ( boundingSetOnce sync.Once boundingSetRet []string @@ -115,7 +106,7 @@ func NormalizeCapabilities(caps []string) ([]string, error) { if !strings.HasPrefix(c, "CAP_") { c = "CAP_" + c } - if !stringInSlice(c, capabilityList) { + if !slices.Contains(capabilityList, c) { return nil, fmt.Errorf("%q: %w", c, ErrUnknownCapability) } normalized = append(normalized, c) @@ -127,7 +118,7 @@ func NormalizeCapabilities(caps []string) ([]string, error) { // ValidateCapabilities validates if caps only contains valid capabilities. func ValidateCapabilities(caps []string) error { for _, c := range caps { - if !stringInSlice(c, capabilityList) { + if !slices.Contains(capabilityList, c) { return fmt.Errorf("%q: %w", c, ErrUnknownCapability) } } @@ -159,8 +150,8 @@ func MergeCapabilities(base, adds, drops []string) ([]string, error) { return nil, err } - if stringInSlice(All, capDrop) { - if stringInSlice(All, capAdd) { + if slices.Contains(capDrop, All) { + if slices.Contains(capAdd, All) { return nil, errors.New("adding all caps and removing all caps not allowed") } // "Drop" all capabilities; return what's in capAdd instead @@ -168,7 +159,7 @@ func MergeCapabilities(base, adds, drops []string) ([]string, error) { return capAdd, nil } - if stringInSlice(All, capAdd) { + if slices.Contains(capAdd, All) { base, err = BoundingSet() if err != nil { return nil, err @@ -176,14 +167,14 @@ func MergeCapabilities(base, adds, drops []string) ([]string, error) { capAdd = []string{} } else { for _, add := range capAdd { - if stringInSlice(add, capDrop) { + if slices.Contains(capDrop, add) { return nil, fmt.Errorf("capability %q cannot be dropped and added", add) } } } for _, drop := range capDrop { - if stringInSlice(drop, capAdd) { + if slices.Contains(capAdd, drop) { return nil, fmt.Errorf("capability %q cannot be dropped and added", drop) } } @@ -191,7 +182,7 @@ func MergeCapabilities(base, adds, drops []string) ([]string, error) { caps := make([]string, 0, len(base)+len(capAdd)) // Drop any capabilities in capDrop that are in base for _, cap := range base { - if stringInSlice(cap, capDrop) { + if slices.Contains(capDrop, cap) { continue } caps = append(caps, cap) @@ -199,7 +190,7 @@ func MergeCapabilities(base, adds, drops []string) ([]string, error) { // Add any capabilities in capAdd that are not in base for _, cap := range capAdd { - if stringInSlice(cap, base) { + if slices.Contains(base, cap) { continue } caps = append(caps, cap) diff --git a/vendor/github.com/containers/common/pkg/cgroups/blkio_linux.go b/vendor/github.com/containers/common/pkg/cgroups/blkio_linux.go index dd18ed565..246f2c231 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/blkio_linux.go +++ b/vendor/github.com/containers/common/pkg/cgroups/blkio_linux.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package cgroups diff --git a/vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go b/vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go index 7605b5006..3b5536920 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go +++ b/vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package cgroups @@ -14,6 +13,9 @@ import ( "path/filepath" "strconv" "strings" + "sync" + "syscall" + "time" "github.com/containers/storage/pkg/unshare" systemdDbus "github.com/coreos/go-systemd/v22/dbus" @@ -22,6 +24,8 @@ import ( "github.com/opencontainers/runc/libcontainer/cgroups/fs2" "github.com/opencontainers/runc/libcontainer/configs" "github.com/sirupsen/logrus" + "golang.org/x/exp/maps" + "golang.org/x/sys/unix" ) var ( @@ -30,6 +34,10 @@ var ( // ErrCgroupV1Rootless means the cgroup v1 were attempted to be used in rootless environment ErrCgroupV1Rootless = errors.New("no support for CGroups V1 in rootless environments") ErrStatCgroup = errors.New("no cgroup available for gathering user statistics") + + isUnifiedOnce sync.Once + isUnified bool + isUnifiedErr error ) // CgroupControl controls a cgroup hierarchy @@ -73,12 +81,13 @@ const ( var handlers map[string]controllerHandler func init() { - handlers = make(map[string]controllerHandler) - handlers[CPU] = getCPUHandler() - handlers[CPUset] = getCpusetHandler() - handlers[Memory] = getMemoryHandler() - handlers[Pids] = getPidsHandler() - handlers[Blkio] = getBlkioHandler() + handlers = map[string]controllerHandler{ + CPU: getCPUHandler(), + CPUset: getCpusetHandler(), + Memory: getMemoryHandler(), + Pids: getPidsHandler(), + Blkio: getBlkioHandler(), + } } // getAvailableControllers get the available controllers @@ -94,7 +103,7 @@ func getAvailableControllers(exclude map[string]controllerHandler, cgroup2 bool) } // userSlice already contains '/' so not adding here basePath := cgroupRoot + userSlice - controllersFile = fmt.Sprintf("%s/cgroup.controllers", basePath) + controllersFile = basePath + "/cgroup.controllers" } controllersFileBytes, err := os.ReadFile(controllersFile) if err != nil { @@ -380,7 +389,7 @@ func Load(path string) (*CgroupControl, error) { // CreateSystemdUnit creates the systemd cgroup func (c *CgroupControl) CreateSystemdUnit(path string) error { if !c.systemd { - return fmt.Errorf("the cgroup controller is not using systemd") + return errors.New("the cgroup controller is not using systemd") } conn, err := systemdDbus.NewWithContext(context.TODO()) @@ -395,7 +404,7 @@ func (c *CgroupControl) CreateSystemdUnit(path string) error { // CreateSystemdUserUnit creates the systemd cgroup for the specified user func (c *CgroupControl) CreateSystemdUserUnit(path string, uid int) error { if !c.systemd { - return fmt.Errorf("the cgroup controller is not using systemd") + return errors.New("the cgroup controller is not using systemd") } conn, err := UserConnection(uid) @@ -492,10 +501,7 @@ func (c *CgroupControl) AddPid(pid int) error { return fs2.CreateCgroupPath(path, c.config) } - names := make([]string, 0, len(handlers)) - for n := range handlers { - names = append(names, n) - } + names := maps.Keys(handlers) for _, c := range c.additionalControllers { if !c.symlink { @@ -672,7 +678,7 @@ func cpusetCopyFileFromParent(dir, file string, cgroupv2 bool) ([]byte, error) { path := filepath.Join(dir, file) parentPath := path if cgroupv2 { - parentPath = fmt.Sprintf("%s.effective", parentPath) + parentPath += ".effective" } data, err := os.ReadFile(parentPath) if err != nil { @@ -733,3 +739,139 @@ func SystemCPUUsage() (uint64, error) { } return total, nil } + +// IsCgroup2UnifiedMode returns whether we are running in cgroup 2 cgroup2 mode. +func IsCgroup2UnifiedMode() (bool, error) { + isUnifiedOnce.Do(func() { + var st syscall.Statfs_t + if err := syscall.Statfs("/sys/fs/cgroup", &st); err != nil { + isUnified, isUnifiedErr = false, err + } else { + isUnified, isUnifiedErr = st.Type == unix.CGROUP2_SUPER_MAGIC, nil + } + }) + return isUnified, isUnifiedErr +} + +// UserConnection returns an user connection to D-BUS +func UserConnection(uid int) (*systemdDbus.Conn, error) { + return systemdDbus.NewConnection(func() (*dbus.Conn, error) { + return dbusAuthConnection(uid, dbus.SessionBusPrivateNoAutoStartup) + }) +} + +// UserOwnsCurrentSystemdCgroup checks whether the current EUID owns the +// current cgroup. +func UserOwnsCurrentSystemdCgroup() (bool, error) { + uid := os.Geteuid() + + cgroup2, err := IsCgroup2UnifiedMode() + if err != nil { + return false, err + } + + f, err := os.Open("/proc/self/cgroup") + if err != nil { + return false, err + } + defer f.Close() + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + parts := strings.SplitN(line, ":", 3) + + if len(parts) < 3 { + continue + } + + var cgroupPath string + + if cgroup2 { + cgroupPath = filepath.Join(cgroupRoot, parts[2]) + } else { + if parts[1] != "name=systemd" { + continue + } + cgroupPath = filepath.Join(cgroupRoot, "systemd", parts[2]) + } + + st, err := os.Stat(cgroupPath) + if err != nil { + return false, err + } + s := st.Sys() + if s == nil { + return false, fmt.Errorf("stat cgroup path %s", cgroupPath) + } + + if int(s.(*syscall.Stat_t).Uid) != uid { + return false, nil + } + } + if err := scanner.Err(); err != nil { + return false, fmt.Errorf("parsing file /proc/self/cgroup: %w", err) + } + return true, nil +} + +// rmDirRecursively delete recursively a cgroup directory. +// It differs from os.RemoveAll as it doesn't attempt to unlink files. +// On cgroupfs we are allowed only to rmdir empty directories. +func rmDirRecursively(path string) error { + killProcesses := func(signal syscall.Signal) { + if signal == unix.SIGKILL { + if err := os.WriteFile(filepath.Join(path, "cgroup.kill"), []byte("1"), 0o600); err == nil { + return + } + } + // kill all the processes that are still part of the cgroup + if procs, err := os.ReadFile(filepath.Join(path, "cgroup.procs")); err == nil { + for _, pidS := range strings.Split(string(procs), "\n") { + if pid, err := strconv.Atoi(pidS); err == nil { + _ = unix.Kill(pid, signal) + } + } + } + } + + if err := os.Remove(path); err == nil || errors.Is(err, os.ErrNotExist) { + return nil + } + entries, err := os.ReadDir(path) + if err != nil { + return err + } + for _, i := range entries { + if i.IsDir() { + if err := rmDirRecursively(filepath.Join(path, i.Name())); err != nil { + return err + } + } + } + + attempts := 0 + for { + err := os.Remove(path) + if err == nil || errors.Is(err, os.ErrNotExist) { + return nil + } + if errors.Is(err, unix.EBUSY) { + // send a SIGTERM after 3 second + if attempts == 300 { + killProcesses(unix.SIGTERM) + } + // send SIGKILL after 8 seconds + if attempts == 800 { + killProcesses(unix.SIGKILL) + } + // give up after 10 seconds + if attempts < 1000 { + time.Sleep(time.Millisecond * 10) + attempts++ + continue + } + } + return fmt.Errorf("remove %s: %w", path, err) + } +} diff --git a/vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go b/vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go deleted file mode 100644 index 5c0cac642..000000000 --- a/vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go +++ /dev/null @@ -1,163 +0,0 @@ -//go:build linux -// +build linux - -package cgroups - -import ( - "bufio" - "errors" - "fmt" - "os" - "path/filepath" - "strconv" - "strings" - "sync" - "syscall" - "time" - - systemdDbus "github.com/coreos/go-systemd/v22/dbus" - "github.com/godbus/dbus/v5" - "golang.org/x/sys/unix" -) - -var ( - isUnifiedOnce sync.Once - isUnified bool - isUnifiedErr error -) - -// IsCgroup2UnifiedMode returns whether we are running in cgroup 2 cgroup2 mode. -func IsCgroup2UnifiedMode() (bool, error) { - isUnifiedOnce.Do(func() { - var st syscall.Statfs_t - if err := syscall.Statfs("/sys/fs/cgroup", &st); err != nil { - isUnified, isUnifiedErr = false, err - } else { - isUnified, isUnifiedErr = st.Type == unix.CGROUP2_SUPER_MAGIC, nil - } - }) - return isUnified, isUnifiedErr -} - -// UserConnection returns an user connection to D-BUS -func UserConnection(uid int) (*systemdDbus.Conn, error) { - return systemdDbus.NewConnection(func() (*dbus.Conn, error) { - return dbusAuthConnection(uid, dbus.SessionBusPrivateNoAutoStartup) - }) -} - -// UserOwnsCurrentSystemdCgroup checks whether the current EUID owns the -// current cgroup. -func UserOwnsCurrentSystemdCgroup() (bool, error) { - uid := os.Geteuid() - - cgroup2, err := IsCgroup2UnifiedMode() - if err != nil { - return false, err - } - - f, err := os.Open("/proc/self/cgroup") - if err != nil { - return false, err - } - defer f.Close() - - scanner := bufio.NewScanner(f) - for scanner.Scan() { - line := scanner.Text() - parts := strings.SplitN(line, ":", 3) - - if len(parts) < 3 { - continue - } - - var cgroupPath string - - if cgroup2 { - cgroupPath = filepath.Join(cgroupRoot, parts[2]) - } else { - if parts[1] != "name=systemd" { - continue - } - cgroupPath = filepath.Join(cgroupRoot, "systemd", parts[2]) - } - - st, err := os.Stat(cgroupPath) - if err != nil { - return false, err - } - s := st.Sys() - if s == nil { - return false, fmt.Errorf("stat cgroup path %s", cgroupPath) - } - - if int(s.(*syscall.Stat_t).Uid) != uid { - return false, nil - } - } - if err := scanner.Err(); err != nil { - return false, fmt.Errorf("parsing file /proc/self/cgroup: %w", err) - } - return true, nil -} - -// rmDirRecursively delete recursively a cgroup directory. -// It differs from os.RemoveAll as it doesn't attempt to unlink files. -// On cgroupfs we are allowed only to rmdir empty directories. -func rmDirRecursively(path string) error { - killProcesses := func(signal syscall.Signal) { - if signal == unix.SIGKILL { - if err := os.WriteFile(filepath.Join(path, "cgroup.kill"), []byte("1"), 0o600); err == nil { - return - } - } - // kill all the processes that are still part of the cgroup - if procs, err := os.ReadFile(filepath.Join(path, "cgroup.procs")); err == nil { - for _, pidS := range strings.Split(string(procs), "\n") { - if pid, err := strconv.Atoi(pidS); err == nil { - _ = unix.Kill(pid, signal) - } - } - } - } - - if err := os.Remove(path); err == nil || errors.Is(err, os.ErrNotExist) { - return nil - } - entries, err := os.ReadDir(path) - if err != nil { - return err - } - for _, i := range entries { - if i.IsDir() { - if err := rmDirRecursively(filepath.Join(path, i.Name())); err != nil { - return err - } - } - } - - attempts := 0 - for { - err := os.Remove(path) - if err == nil || errors.Is(err, os.ErrNotExist) { - return nil - } - if errors.Is(err, unix.EBUSY) { - // send a SIGTERM after 3 second - if attempts == 300 { - killProcesses(unix.SIGTERM) - } - // send SIGKILL after 8 seconds - if attempts == 800 { - killProcesses(unix.SIGKILL) - } - // give up after 10 seconds - if attempts < 1000 { - time.Sleep(time.Millisecond * 10) - attempts++ - continue - } - } - return fmt.Errorf("remove %s: %w", path, err) - } -} diff --git a/vendor/github.com/containers/common/pkg/cgroups/cgroups_unsupported.go b/vendor/github.com/containers/common/pkg/cgroups/cgroups_unsupported.go index f2558728d..160291212 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/cgroups_unsupported.go +++ b/vendor/github.com/containers/common/pkg/cgroups/cgroups_unsupported.go @@ -1,13 +1,9 @@ //go:build !linux -// +build !linux package cgroups import ( - "fmt" "os" - - systemdDbus "github.com/coreos/go-systemd/v22/dbus" ) // IsCgroup2UnifiedMode returns whether we are running in cgroup 2 cgroup2 mode. @@ -24,8 +20,3 @@ func UserOwnsCurrentSystemdCgroup() (bool, error) { func rmDirRecursively(path string) error { return os.RemoveAll(path) } - -// UserConnection returns an user connection to D-BUS -func UserConnection(uid int) (*systemdDbus.Conn, error) { - return nil, fmt.Errorf("systemd d-bus is not supported on this platform") -} diff --git a/vendor/github.com/containers/common/pkg/cgroups/cpu_linux.go b/vendor/github.com/containers/common/pkg/cgroups/cpu_linux.go index 4931be6ef..c10df7968 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/cpu_linux.go +++ b/vendor/github.com/containers/common/pkg/cgroups/cpu_linux.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package cgroups diff --git a/vendor/github.com/containers/common/pkg/cgroups/cpuset_linux.go b/vendor/github.com/containers/common/pkg/cgroups/cpuset_linux.go index c55c76864..28b46fb25 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/cpuset_linux.go +++ b/vendor/github.com/containers/common/pkg/cgroups/cpuset_linux.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package cgroups diff --git a/vendor/github.com/containers/common/pkg/cgroups/memory_linux.go b/vendor/github.com/containers/common/pkg/cgroups/memory_linux.go index 3335cdffe..b82945c5b 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/memory_linux.go +++ b/vendor/github.com/containers/common/pkg/cgroups/memory_linux.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package cgroups diff --git a/vendor/github.com/containers/common/pkg/cgroups/pids_linux.go b/vendor/github.com/containers/common/pkg/cgroups/pids_linux.go index a8163ce46..61ee85394 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/pids_linux.go +++ b/vendor/github.com/containers/common/pkg/cgroups/pids_linux.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package cgroups diff --git a/vendor/github.com/containers/common/pkg/cgroups/systemd.go b/vendor/github.com/containers/common/pkg/cgroups/systemd.go deleted file mode 100644 index 80a7bde28..000000000 --- a/vendor/github.com/containers/common/pkg/cgroups/systemd.go +++ /dev/null @@ -1,81 +0,0 @@ -//go:build !linux -// +build !linux - -package cgroups - -import ( - "context" - "fmt" - "path/filepath" - "strings" - - systemdDbus "github.com/coreos/go-systemd/v22/dbus" - "github.com/godbus/dbus/v5" -) - -func systemdCreate(path string, c *systemdDbus.Conn) error { - slice, name := filepath.Split(path) - slice = strings.TrimSuffix(slice, "/") - - var lastError error - for i := 0; i < 2; i++ { - properties := []systemdDbus.Property{ - systemdDbus.PropDescription(fmt.Sprintf("cgroup %s", name)), - systemdDbus.PropWants(slice), - } - pMap := map[string]bool{ - "DefaultDependencies": false, - "MemoryAccounting": true, - "CPUAccounting": true, - "BlockIOAccounting": true, - } - if i == 0 { - pMap["Delegate"] = true - } - for k, v := range pMap { - p := systemdDbus.Property{ - Name: k, - Value: dbus.MakeVariant(v), - } - properties = append(properties, p) - } - - ch := make(chan string) - _, err := c.StartTransientUnitContext(context.TODO(), name, "replace", properties, ch) - if err != nil { - lastError = err - continue - } - <-ch - return nil - } - return lastError -} - -/* -systemdDestroyConn is copied from containerd/cgroups/systemd.go file, that -has the following license: -Copyright The containerd Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -func systemdDestroyConn(path string, c *systemdDbus.Conn) error { - name := filepath.Base(path) - - ch := make(chan string) - _, err := c.StopUnitContext(context.TODO(), name, "replace", ch) - if err != nil { - return err - } - <-ch - return nil -} diff --git a/vendor/github.com/containers/common/pkg/cgroups/systemd_linux.go b/vendor/github.com/containers/common/pkg/cgroups/systemd_linux.go index 906c716d1..cde2d596b 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/systemd_linux.go +++ b/vendor/github.com/containers/common/pkg/cgroups/systemd_linux.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package cgroups @@ -26,7 +25,7 @@ func systemdCreate(resources *configs.Resources, path string, c *systemdDbus.Con var lastError error for i := 0; i < 2; i++ { properties := []systemdDbus.Property{ - systemdDbus.PropDescription(fmt.Sprintf("cgroup %s", name)), + systemdDbus.PropDescription("cgroup " + name), systemdDbus.PropWants(slice), } var ioString string diff --git a/vendor/github.com/containers/common/pkg/cgroups/utils_linux.go b/vendor/github.com/containers/common/pkg/cgroups/utils_linux.go index ed9f0761d..be9d11584 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/utils_linux.go +++ b/vendor/github.com/containers/common/pkg/cgroups/utils_linux.go @@ -1,9 +1,9 @@ //go:build linux -// +build linux package cgroups import ( + "bufio" "bytes" "errors" "fmt" @@ -11,6 +11,7 @@ import ( "path" "path/filepath" "strings" + "sync" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/configs" @@ -143,3 +144,171 @@ func SetBlkioThrottle(res *configs.Resources, cgroupPath string) error { } return nil } + +// Code below was moved from podman/utils/utils_supported.go and should properly better +// integrated here as some parts may be redundant. + +func getCgroupProcess(procFile string, allowRoot bool) (string, error) { + f, err := os.Open(procFile) + if err != nil { + return "", err + } + defer f.Close() + + scanner := bufio.NewScanner(f) + cgroup := "" + for scanner.Scan() { + line := scanner.Text() + parts := strings.SplitN(line, ":", 3) + if len(parts) != 3 { + return "", fmt.Errorf("cannot parse cgroup line %q", line) + } + if strings.HasPrefix(line, "0::") { + cgroup = line[3:] + break + } + if len(parts[2]) > len(cgroup) { + cgroup = parts[2] + } + } + if len(cgroup) == 0 || (!allowRoot && cgroup == "/") { + return "", fmt.Errorf("could not find cgroup mount in %q", procFile) + } + return cgroup, nil +} + +// GetOwnCgroup returns the cgroup for the current process. +func GetOwnCgroup() (string, error) { + return getCgroupProcess("/proc/self/cgroup", true) +} + +func GetOwnCgroupDisallowRoot() (string, error) { + return getCgroupProcess("/proc/self/cgroup", false) +} + +// GetCgroupProcess returns the cgroup for the specified process process. +func GetCgroupProcess(pid int) (string, error) { + return getCgroupProcess(fmt.Sprintf("/proc/%d/cgroup", pid), true) +} + +// MoveUnderCgroupSubtree moves the PID under a cgroup subtree. +func MoveUnderCgroupSubtree(subtree string) error { + return MoveUnderCgroup("", subtree, nil) +} + +// MoveUnderCgroup moves a group of processes to a new cgroup. +// If cgroup is the empty string, then the current calling process cgroup is used. +// If processes is empty, then the processes from the current cgroup are moved. +func MoveUnderCgroup(cgroup, subtree string, processes []uint32) error { + procFile := "/proc/self/cgroup" + f, err := os.Open(procFile) + if err != nil { + return err + } + defer f.Close() + + unifiedMode, err := IsCgroup2UnifiedMode() + if err != nil { + return err + } + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + parts := strings.SplitN(line, ":", 3) + if len(parts) != 3 { + return fmt.Errorf("cannot parse cgroup line %q", line) + } + + // root cgroup, skip it + if parts[2] == "/" && !(unifiedMode && parts[1] == "") { + continue + } + + cgroupRoot := "/sys/fs/cgroup" + // Special case the unified mount on hybrid cgroup and named hierarchies. + // This works on Fedora 31, but we should really parse the mounts to see + // where the cgroup hierarchy is mounted. + if parts[1] == "" && !unifiedMode { + // If it is not using unified mode, the cgroup v2 hierarchy is + // usually mounted under /sys/fs/cgroup/unified + cgroupRoot = filepath.Join(cgroupRoot, "unified") + + // Ignore the unified mount if it doesn't exist + if _, err := os.Stat(cgroupRoot); err != nil && os.IsNotExist(err) { + continue + } + } else if parts[1] != "" { + // Assume the controller is mounted at /sys/fs/cgroup/$CONTROLLER. + controller := strings.TrimPrefix(parts[1], "name=") + cgroupRoot = filepath.Join(cgroupRoot, controller) + } + + parentCgroup := cgroup + if parentCgroup == "" { + parentCgroup = parts[2] + } + newCgroup := filepath.Join(cgroupRoot, parentCgroup, subtree) + if err := os.MkdirAll(newCgroup, 0o755); err != nil && !os.IsExist(err) { + return err + } + + f, err := os.OpenFile(filepath.Join(newCgroup, "cgroup.procs"), os.O_RDWR, 0o755) + if err != nil { + return err + } + defer f.Close() + + if len(processes) > 0 { + for _, pid := range processes { + if _, err := f.WriteString(fmt.Sprintf("%d\n", pid)); err != nil { + logrus.Debugf("Cannot move process %d to cgroup %q: %v", pid, newCgroup, err) + } + } + } else { + processesData, err := os.ReadFile(filepath.Join(cgroupRoot, parts[2], "cgroup.procs")) + if err != nil { + return err + } + for _, pid := range bytes.Split(processesData, []byte("\n")) { + if len(pid) == 0 { + continue + } + if _, err := f.Write(pid); err != nil { + logrus.Debugf("Cannot move process %s to cgroup %q: %v", string(pid), newCgroup, err) + } + } + } + } + return nil +} + +var ( + maybeMoveToSubCgroupSync sync.Once + maybeMoveToSubCgroupSyncErr error +) + +// MaybeMoveToSubCgroup moves the current process in a sub cgroup when +// it is running in the root cgroup on a system that uses cgroupv2. +func MaybeMoveToSubCgroup() error { + maybeMoveToSubCgroupSync.Do(func() { + unifiedMode, err := IsCgroup2UnifiedMode() + if err != nil { + maybeMoveToSubCgroupSyncErr = err + return + } + if !unifiedMode { + maybeMoveToSubCgroupSyncErr = nil + return + } + cgroup, err := GetOwnCgroup() + if err != nil { + maybeMoveToSubCgroupSyncErr = err + return + } + if cgroup == "/" { + maybeMoveToSubCgroupSyncErr = MoveUnderCgroupSubtree("init") + } + }) + return maybeMoveToSubCgroupSyncErr +} diff --git a/vendor/github.com/containers/common/pkg/cgroupv2/cgroups_unsupported.go b/vendor/github.com/containers/common/pkg/cgroupv2/cgroups_unsupported.go index f61bd3bb2..56269aa42 100644 --- a/vendor/github.com/containers/common/pkg/cgroupv2/cgroups_unsupported.go +++ b/vendor/github.com/containers/common/pkg/cgroupv2/cgroups_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package cgroupv2 diff --git a/vendor/github.com/containers/common/pkg/chown/chown_unix.go b/vendor/github.com/containers/common/pkg/chown/chown_unix.go index be4b8cfa5..b598ac920 100644 --- a/vendor/github.com/containers/common/pkg/chown/chown_unix.go +++ b/vendor/github.com/containers/common/pkg/chown/chown_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package chown diff --git a/vendor/github.com/containers/common/pkg/config/config.go b/vendor/github.com/containers/common/pkg/config/config.go index 75b917f01..15c91860c 100644 --- a/vendor/github.com/containers/common/pkg/config/config.go +++ b/vendor/github.com/containers/common/pkg/config/config.go @@ -9,16 +9,14 @@ import ( "runtime" "strings" - "github.com/BurntSushi/toml" "github.com/containers/common/internal/attributedstring" "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/capabilities" - "github.com/containers/common/pkg/util" - "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/unshare" units "github.com/docker/go-units" selinux "github.com/opencontainers/selinux/go-selinux" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" ) const ( @@ -31,7 +29,7 @@ const ( bindirPrefix = "$BINDIR" ) -var validImageVolumeModes = []string{_typeBind, "tmpfs", "ignore"} +var validImageVolumeModes = []string{"anonymous", "tmpfs", "ignore"} // ProxyEnv is a list of Proxy Environment variables var ProxyEnv = []string{ @@ -154,6 +152,13 @@ type ContainersConfig struct { // Deprecated: Do not use this field directly use conf.FindInitBinary() instead. InitPath string `toml:"init_path,omitempty"` + // InterfaceName tells container runtimes how to set interface names + // inside containers. + // The only valid value at the moment is "device" that indicates the + // interface name should be set as the network_interface name from + // the network config. + InterfaceName string `toml:"interface_name,omitempty"` + // IPCNS way to create a ipc namespace for the container IPCNS string `toml:"ipcns,omitempty"` @@ -365,11 +370,6 @@ type EngineConfig struct { // LockType is the type of locking to use. LockType string `toml:"lock_type,omitempty"` - // MachineEnabled indicates if Podman is running in a podman-machine VM - // - // This method is soft deprecated, use machine.IsPodmanMachine instead - MachineEnabled bool `toml:"machine_enabled,omitempty"` - // MultiImageArchive - if true, the container engine allows for storing // archives (e.g., of the docker-archive transport) with multiple // images. By default, Podman creates single-image archives. @@ -416,6 +416,14 @@ type EngineConfig struct { // Indicates whether the application should be running in Remote mode Remote bool `toml:"remote,omitempty"` + // Number of times to retry pulling/pushing images in case of failure + Retry uint `toml:"retry,omitempty"` + + // Delay between retries in case pulling/pushing image fails + // If set, container engines will retry at the set interval, + // otherwise they delay 2 seconds and then exponentially back off. + RetryDelay string `toml:"retry_delay,omitempty"` + // RemoteURI is deprecated, see ActiveService // RemoteURI containers connection information used to connect to remote system. RemoteURI string `toml:"remote_uri,omitempty"` @@ -567,6 +575,9 @@ type NetworkConfig struct { // NetavarkPluginDirs is a list of directories which contain netavark plugins. NetavarkPluginDirs attributedstring.Slice `toml:"netavark_plugin_dirs,omitempty"` + // FirewallDriver is the firewall driver to be used + FirewallDriver string `toml:"firewall_driver,omitempty"` + // DefaultNetwork is the network name of the default network // to attach pods to. DefaultNetwork string `toml:"default_network,omitempty"` @@ -652,14 +663,16 @@ type MachineConfig struct { Volumes attributedstring.Slice `toml:"volumes,omitempty"` // Provider is the virtualization provider used to run podman-machine VM Provider string `toml:"provider,omitempty"` + // Rosetta is the flag to enable Rosetta in the podman-machine VM on Apple Silicon + Rosetta bool `toml:"rosetta,omitempty"` } // FarmConfig represents the "farm" TOML config tables type FarmConfig struct { // Default is the default farm to be used when farming out builds - Default string `toml:"default,omitempty"` + Default string `json:",omitempty" toml:"default,omitempty"` // List is a map of farms created where key=farm-name and value=list of connections - List map[string][]string `toml:"list,omitempty"` + List map[string][]string `json:",omitempty" toml:"list,omitempty"` } // Destination represents destination for remote service @@ -668,10 +681,10 @@ type Destination struct { URI string `toml:"uri"` // Identity file with ssh key, optional - Identity string `toml:"identity,omitempty"` + Identity string `json:",omitempty" toml:"identity,omitempty"` // isMachine describes if the remote destination is a machine. - IsMachine bool `toml:"is_machine,omitempty"` + IsMachine bool `json:",omitempty" toml:"is_machine,omitempty"` } // Consumes container image's os and arch and returns if any dedicated runtime was @@ -811,6 +824,10 @@ func (c *ContainersConfig) Validate() error { return err } + if err := c.validateInterfaceName(); err != nil { + return err + } + if err := c.validateTZ(); err != nil { return err } @@ -915,7 +932,7 @@ func (c *Config) GetDefaultEnvEx(envHost, httpProxy bool) []string { } // Capabilities returns the capabilities parses the Add and Drop capability -// list from the default capabiltiies for the container +// list from the default capabilities for the container func (c *Config) Capabilities(user string, addCapabilities, dropCapabilities []string) ([]string, error) { userNotRoot := func(user string) bool { if user == "" || user == "root" || user == "0" { @@ -994,82 +1011,6 @@ func IsValidDeviceMode(mode string) bool { return true } -func rootlessConfigPath() (string, error) { - if configHome := os.Getenv("XDG_CONFIG_HOME"); configHome != "" { - return filepath.Join(configHome, _configPath), nil - } - home, err := unshare.HomeDir() - if err != nil { - return "", err - } - - return filepath.Join(home, UserOverrideContainersConfig), nil -} - -func Path() string { - if path := os.Getenv("CONTAINERS_CONF"); path != "" { - return path - } - if unshare.IsRootless() { - if rpath, err := rootlessConfigPath(); err == nil { - return rpath - } - return "$HOME/" + UserOverrideContainersConfig - } - return OverrideContainersConfig -} - -// ReadCustomConfig reads the custom config and only generates a config based on it -// If the custom config file does not exists, function will return an empty config -func ReadCustomConfig() (*Config, error) { - path, err := customConfigFile() - if err != nil { - return nil, err - } - newConfig := &Config{} - if _, err := os.Stat(path); err == nil { - if err := readConfigFromFile(path, newConfig); err != nil { - return nil, err - } - } else { - if !errors.Is(err, os.ErrNotExist) { - return nil, err - } - } - // Let's always initialize the farm list so it is never nil - if newConfig.Farms.List == nil { - newConfig.Farms.List = make(map[string][]string) - } - return newConfig, nil -} - -// Write writes the configuration to the default file -func (c *Config) Write() error { - var err error - path, err := customConfigFile() - if err != nil { - return err - } - if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil { - return err - } - - opts := &ioutils.AtomicFileWriterOptions{ExplicitCommit: true} - configFile, err := ioutils.NewAtomicFileWriterWithOpts(path, 0o644, opts) - if err != nil { - return err - } - defer configFile.Close() - - enc := toml.NewEncoder(configFile) - if err := enc.Encode(c); err != nil { - return err - } - - // If no errors commit the changes to the config file - return configFile.Commit() -} - // Reload clean the cached config and reloads the configuration from containers.conf files // This function is meant to be used for long-running processes that need to reload potential changes made to // the cached containers.conf files. @@ -1225,7 +1166,7 @@ func ValidateImageVolumeMode(mode string) error { if mode == "" { return nil } - if util.StringInSlice(mode, validImageVolumeModes) { + if slices.Contains(validImageVolumeModes, mode) { return nil } @@ -1242,7 +1183,7 @@ func (c *Config) FindInitBinary() (string, error) { if c.Engine.InitPath != "" { return c.Engine.InitPath, nil } - // keep old default working to guarantee backwards comapt + // keep old default working to guarantee backwards compat if _, err := os.Stat(DefaultInitPath); err == nil { return DefaultInitPath, nil } diff --git a/vendor/github.com/containers/common/pkg/config/config_darwin.go b/vendor/github.com/containers/common/pkg/config/config_darwin.go index 1b40e2bae..9982d7995 100644 --- a/vendor/github.com/containers/common/pkg/config/config_darwin.go +++ b/vendor/github.com/containers/common/pkg/config/config_darwin.go @@ -1,9 +1,5 @@ package config -import ( - "os" -) - const ( // OverrideContainersConfig holds the default config path overridden by the root user OverrideContainersConfig = "/etc/" + _configPath @@ -14,23 +10,8 @@ const ( // DefaultSignaturePolicyPath is the default value for the // policy.json file. DefaultSignaturePolicyPath = "/etc/containers/policy.json" - - // Mount type for mounting host dir - _typeBind = "bind" ) -// podman remote clients on darwin cannot use unshare.isRootless() to determine the configuration file locations. -func customConfigFile() (string, error) { - if path, found := os.LookupEnv("CONTAINERS_CONF"); found { - return path, nil - } - return rootlessConfigPath() -} - -func ifRootlessConfigPath() (string, error) { - return rootlessConfigPath() -} - var defaultHelperBinariesDir = []string{ // Relative to the binary directory "$BINDIR/../libexec/podman", diff --git a/vendor/github.com/containers/common/pkg/config/config_freebsd.go b/vendor/github.com/containers/common/pkg/config/config_freebsd.go index 48bb0994c..5b7f55a7b 100644 --- a/vendor/github.com/containers/common/pkg/config/config_freebsd.go +++ b/vendor/github.com/containers/common/pkg/config/config_freebsd.go @@ -1,9 +1,5 @@ package config -import ( - "os" -) - const ( // OverrideContainersConfig holds the default config path overridden by the root user OverrideContainersConfig = "/usr/local/etc/" + _configPath @@ -14,23 +10,8 @@ const ( // DefaultSignaturePolicyPath is the default value for the // policy.json file. DefaultSignaturePolicyPath = "/usr/local/etc/containers/policy.json" - - // Mount type for mounting host dir - _typeBind = "nullfs" ) -// podman remote clients on freebsd cannot use unshare.isRootless() to determine the configuration file locations. -func customConfigFile() (string, error) { - if path, found := os.LookupEnv("CONTAINERS_CONF"); found { - return path, nil - } - return rootlessConfigPath() -} - -func ifRootlessConfigPath() (string, error) { - return rootlessConfigPath() -} - var defaultHelperBinariesDir = []string{ "/usr/local/bin", "/usr/local/libexec/podman", diff --git a/vendor/github.com/containers/common/pkg/config/config_linux.go b/vendor/github.com/containers/common/pkg/config/config_linux.go index f294402a3..66d193467 100644 --- a/vendor/github.com/containers/common/pkg/config/config_linux.go +++ b/vendor/github.com/containers/common/pkg/config/config_linux.go @@ -1,9 +1,6 @@ package config import ( - "os" - - "github.com/containers/storage/pkg/unshare" selinux "github.com/opencontainers/selinux/go-selinux" ) @@ -17,40 +14,12 @@ const ( // DefaultSignaturePolicyPath is the default value for the // policy.json file. DefaultSignaturePolicyPath = "/etc/containers/policy.json" - - // Mount type for mounting host dir - _typeBind = "bind" ) func selinuxEnabled() bool { return selinux.GetEnabled() } -func customConfigFile() (string, error) { - if path, found := os.LookupEnv("CONTAINERS_CONF"); found { - return path, nil - } - if unshare.GetRootlessUID() > 0 { - path, err := rootlessConfigPath() - if err != nil { - return "", err - } - return path, nil - } - return OverrideContainersConfig, nil -} - -func ifRootlessConfigPath() (string, error) { - if unshare.GetRootlessUID() > 0 { - path, err := rootlessConfigPath() - if err != nil { - return "", err - } - return path, nil - } - return "", nil -} - var defaultHelperBinariesDir = []string{ "/usr/local/libexec/podman", "/usr/local/lib/podman", diff --git a/vendor/github.com/containers/common/pkg/config/config_local.go b/vendor/github.com/containers/common/pkg/config/config_local.go index dae3ea0d9..e9826d62c 100644 --- a/vendor/github.com/containers/common/pkg/config/config_local.go +++ b/vendor/github.com/containers/common/pkg/config/config_local.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package config @@ -43,6 +42,14 @@ func (c *ContainersConfig) validateDevices() error { return nil } +func (c *ContainersConfig) validateInterfaceName() error { + if c.InterfaceName == "device" || c.InterfaceName == "" { + return nil + } + + return fmt.Errorf("invalid interface_name option %s", c.InterfaceName) +} + func (c *ContainersConfig) validateUlimits() error { for _, u := range c.DefaultUlimits.Get() { ul, err := units.ParseUlimit(u) @@ -67,6 +74,13 @@ func (c *ContainersConfig) validateTZ() error { "/etc/zoneinfo", } + // Allow using TZDIR to override the lookupPaths. Ref: + // https://sourceware.org/git/?p=glibc.git;a=blob;f=time/tzfile.c;h=8a923d0cccc927a106dc3e3c641be310893bab4e;hb=HEAD#l149 + tzdir := os.Getenv("TZDIR") + if tzdir != "" { + lookupPaths = []string{tzdir} + } + for _, paths := range lookupPaths { zonePath := filepath.Join(paths, c.TZ) if _, err := os.Stat(zonePath); err == nil { diff --git a/vendor/github.com/containers/common/pkg/config/config_remote.go b/vendor/github.com/containers/common/pkg/config/config_remote.go index bff869efa..f7b3d1a02 100644 --- a/vendor/github.com/containers/common/pkg/config/config_remote.go +++ b/vendor/github.com/containers/common/pkg/config/config_remote.go @@ -1,5 +1,4 @@ //go:build remote -// +build remote package config @@ -21,6 +20,10 @@ func (c *ContainersConfig) validateDevices() error { return nil } +func (c *ContainersConfig) validateInterfaceName() error { + return nil +} + func (c *ContainersConfig) validateUlimits() error { return nil } diff --git a/vendor/github.com/containers/common/pkg/config/config_unix.go b/vendor/github.com/containers/common/pkg/config/config_unix.go new file mode 100644 index 000000000..bd1652787 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/config_unix.go @@ -0,0 +1,25 @@ +//go:build !windows + +package config + +import ( + "os" + "path/filepath" + + "github.com/containers/storage/pkg/unshare" +) + +// userConfigPath returns the path to the users local config that is +// not shared with other users. It uses $XDG_CONFIG_HOME/containers... +// if set or $HOME/.config/containers... if not. +func userConfigPath() (string, error) { + if configHome := os.Getenv("XDG_CONFIG_HOME"); configHome != "" { + return filepath.Join(configHome, _configPath), nil + } + home, err := unshare.HomeDir() + if err != nil { + return "", err + } + + return filepath.Join(home, UserOverrideContainersConfig), nil +} diff --git a/vendor/github.com/containers/common/pkg/config/config_unsupported.go b/vendor/github.com/containers/common/pkg/config/config_unsupported.go index 64e4fcfcd..341225f10 100644 --- a/vendor/github.com/containers/common/pkg/config/config_unsupported.go +++ b/vendor/github.com/containers/common/pkg/config/config_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package config diff --git a/vendor/github.com/containers/common/pkg/config/config_windows.go b/vendor/github.com/containers/common/pkg/config/config_windows.go index 67f0aab23..9011687e4 100644 --- a/vendor/github.com/containers/common/pkg/config/config_windows.go +++ b/vendor/github.com/containers/common/pkg/config/config_windows.go @@ -17,15 +17,9 @@ const ( _typeBind = "bind" ) -// podman remote clients on windows cannot use unshare.isRootless() to determine the configuration file locations. -func customConfigFile() (string, error) { - if path, found := os.LookupEnv("CONTAINERS_CONF"); found { - return path, nil - } - return os.Getenv("APPDATA") + "\\containers\\containers.conf", nil -} - -func ifRootlessConfigPath() (string, error) { +// userConfigPath returns the path to the users local config that is +// not shared with other users. It uses $APPDATA/containers... +func userConfigPath() (string, error) { return os.Getenv("APPDATA") + "\\containers\\containers.conf", nil } diff --git a/vendor/github.com/containers/common/pkg/config/connections.go b/vendor/github.com/containers/common/pkg/config/connections.go new file mode 100644 index 000000000..d7c2c7d8a --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/connections.go @@ -0,0 +1,312 @@ +package config + +import ( + "encoding/json" + "errors" + "fmt" + "io/fs" + "os" + "path/filepath" + + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/lockfile" +) + +const connectionsFile = "podman-connections.json" + +// connectionsConfigFile returns the path to the rw connections config file +func connectionsConfigFile() (string, error) { + if path, found := os.LookupEnv("PODMAN_CONNECTIONS_CONF"); found { + return path, nil + } + path, err := userConfigPath() + if err != nil { + return "", err + } + // file is stored next to containers.conf + return filepath.Join(filepath.Dir(path), connectionsFile), nil +} + +type ConnectionConfig struct { + Default string `json:",omitempty"` + Connections map[string]Destination `json:",omitempty"` +} + +type ConnectionsFile struct { + Connection ConnectionConfig `json:",omitempty"` + Farm FarmConfig `json:",omitempty"` +} + +type Connection struct { + // Name of the connection + Name string + + // Destination for this connection + Destination + + // Default if this connection is the default + Default bool + + // ReadWrite if true the connection is stored in the connections file + ReadWrite bool +} + +type Farm struct { + // Name of the farm + Name string + + // Connections + Connections []string + + // Default if this is the default farm + Default bool + + // ReadWrite if true the farm is stored in the connections file + ReadWrite bool +} + +func readConnectionConf(path string) (*ConnectionsFile, error) { + conf := new(ConnectionsFile) + f, err := os.Open(path) + if err != nil { + // return empty config if file does not exists + if errors.Is(err, fs.ErrNotExist) { + return conf, nil + } + + return nil, err + } + defer f.Close() + + err = json.NewDecoder(f).Decode(conf) + if err != nil { + return nil, fmt.Errorf("parse %q: %w", path, err) + } + return conf, nil +} + +func writeConnectionConf(path string, conf *ConnectionsFile) error { + if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil { + return err + } + + opts := &ioutils.AtomicFileWriterOptions{ExplicitCommit: true} + configFile, err := ioutils.NewAtomicFileWriterWithOpts(path, 0o644, opts) + if err != nil { + return err + } + defer configFile.Close() + + err = json.NewEncoder(configFile).Encode(conf) + if err != nil { + return err + } + + // If no errors commit the changes to the config file + return configFile.Commit() +} + +// EditConnectionConfig must be used to edit the connections config. +// The function will read and write the file automatically and the +// callback function just needs to modify the cfg as needed. +func EditConnectionConfig(callback func(cfg *ConnectionsFile) error) error { + path, err := connectionsConfigFile() + if err != nil { + return err + } + + lockPath := path + ".lock" + lock, err := lockfile.GetLockFile(lockPath) + if err != nil { + return fmt.Errorf("obtain lock file: %w", err) + } + lock.Lock() + defer lock.Unlock() + + conf, err := readConnectionConf(path) + if err != nil { + return fmt.Errorf("read connections file: %w", err) + } + if conf.Farm.List == nil { + conf.Farm.List = make(map[string][]string) + } + + if err := callback(conf); err != nil { + return err + } + + return writeConnectionConf(path, conf) +} + +func makeConnection(name string, dst Destination, def, readWrite bool) *Connection { + return &Connection{ + Name: name, + Destination: dst, + Default: def, + ReadWrite: readWrite, + } +} + +// GetConnection return the connection for the given name or if def is set to true then return the default connection. +func (c *Config) GetConnection(name string, def bool) (*Connection, error) { + path, err := connectionsConfigFile() + if err != nil { + return nil, err + } + conConf, err := readConnectionConf(path) + if err != nil { + return nil, err + } + defaultCon := conConf.Connection.Default + if defaultCon == "" { + defaultCon = c.Engine.ActiveService + } + if def { + if defaultCon == "" { + return nil, errors.New("no default connection found") + } + name = defaultCon + } else { + def = defaultCon == name + } + + if dst, ok := conConf.Connection.Connections[name]; ok { + return makeConnection(name, dst, def, true), nil + } + if dst, ok := c.Engine.ServiceDestinations[name]; ok { + return makeConnection(name, dst, def, false), nil + } + return nil, fmt.Errorf("connection %q not found", name) +} + +// GetAllConnections return all configured connections +func (c *Config) GetAllConnections() ([]Connection, error) { + path, err := connectionsConfigFile() + if err != nil { + return nil, err + } + conConf, err := readConnectionConf(path) + if err != nil { + return nil, err + } + + defaultCon := conConf.Connection.Default + if defaultCon == "" { + defaultCon = c.Engine.ActiveService + } + + connections := make([]Connection, 0, len(conConf.Connection.Connections)) + for name, dst := range conConf.Connection.Connections { + def := defaultCon == name + connections = append(connections, *makeConnection(name, dst, def, true)) + } + for name, dst := range c.Engine.ServiceDestinations { + if _, ok := conConf.Connection.Connections[name]; ok { + // connection name is overwritten by connections file + continue + } + def := defaultCon == name + connections = append(connections, *makeConnection(name, dst, def, false)) + } + + return connections, nil +} + +func getConnections(cons []string, dests map[string]Destination) ([]Connection, error) { + connections := make([]Connection, 0, len(cons)) + for _, name := range cons { + if dst, ok := dests[name]; ok { + connections = append(connections, *makeConnection(name, dst, false, false)) + } else { + return nil, fmt.Errorf("connection %q not found", name) + } + } + return connections, nil +} + +// GetFarmConnections return all the connections for the given farm. +func (c *Config) GetFarmConnections(name string) ([]Connection, error) { + _, cons, err := c.getFarmConnections(name, false) + return cons, err +} + +// GetDefaultFarmConnections returns the name of the default farm +// and the connections. +func (c *Config) GetDefaultFarmConnections() (string, []Connection, error) { + return c.getFarmConnections("", true) +} + +// getFarmConnections returns all connections for the given farm, +// if def is true it will use the default farm instead of the name. +// Returns the name of the farm and the connections for it. +func (c *Config) getFarmConnections(name string, def bool) (string, []Connection, error) { + path, err := connectionsConfigFile() + if err != nil { + return "", nil, err + } + conConf, err := readConnectionConf(path) + if err != nil { + return "", nil, err + } + defaultFarm := conConf.Farm.Default + if defaultFarm == "" { + defaultFarm = c.Farms.Default + } + if def { + if defaultFarm == "" { + return "", nil, errors.New("no default farm found") + } + name = defaultFarm + } + + if cons, ok := conConf.Farm.List[name]; ok { + cons, err := getConnections(cons, conConf.Connection.Connections) + return name, cons, err + } + if cons, ok := c.Farms.List[name]; ok { + cons, err := getConnections(cons, c.Engine.ServiceDestinations) + return name, cons, err + } + return "", nil, fmt.Errorf("farm %q not found", name) +} + +func makeFarm(name string, cons []string, def, readWrite bool) Farm { + return Farm{ + Name: name, + Connections: cons, + Default: def, + ReadWrite: readWrite, + } +} + +// GetAllFarms returns all configured farms +func (c *Config) GetAllFarms() ([]Farm, error) { + path, err := connectionsConfigFile() + if err != nil { + return nil, err + } + conConf, err := readConnectionConf(path) + if err != nil { + return nil, err + } + defaultFarm := conConf.Farm.Default + if defaultFarm == "" { + defaultFarm = c.Farms.Default + } + + farms := make([]Farm, 0, len(conConf.Farm.List)) + for name, cons := range conConf.Farm.List { + def := defaultFarm == name + farms = append(farms, makeFarm(name, cons, def, true)) + } + for name, cons := range c.Farms.List { + if _, ok := conConf.Farm.List[name]; ok { + // farm name is overwritten by connections file + continue + } + def := defaultFarm == name + farms = append(farms, makeFarm(name, cons, def, false)) + } + + return farms, nil +} diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf b/vendor/github.com/containers/common/pkg/config/containers.conf index 8c532f079..6e0044f6d 100644 --- a/vendor/github.com/containers/common/pkg/config/containers.conf +++ b/vendor/github.com/containers/common/pkg/config/containers.conf @@ -10,7 +10,8 @@ # locations in the following order: # 1. /usr/share/containers/containers.conf # 2. /etc/containers/containers.conf -# 3. $HOME/.config/containers/containers.conf (Rootless containers ONLY) +# 3. $XDG_CONFIG_HOME/containers/containers.conf or +# $HOME/.config/containers/containers.conf if $XDG_CONFIG_HOME is not set # Items specified in the latter containers.conf, if they exist, override the # previous containers.conf settings, or the default settings. @@ -164,6 +165,13 @@ default_sysctls = [ # #ipcns = "shareable" +# Default way to set an interface name inside container. Defaults to legacy +# pattern of ethX, where X is a integer, when left undefined. +# Options are: +# "device" Uses the network_interface name from the network config as interface name. +# Falls back to the ethX pattern if the network_interface is not set. +#interface_name = "" + # keyring tells the container engine whether to create # a kernel keyring for use within the container. # @@ -340,6 +348,14 @@ default_sysctls = [ # "/usr/lib/netavark", #] +# The firewall driver to be used by netavark. +# The default is empty which means netavark will pick one accordingly. Current supported +# drivers are "iptables", "none" (no firewall rules will be created) and "firewalld" (firewalld is +# experimental at the moment and not recommend outside of testing). In the future we are +# planning to add support for a "nftables" driver. +#firewall_driver = "" + + # The network name of the default network to attach pods to. # #default_network = "podman" @@ -368,9 +384,9 @@ default_sysctls = [ # Configure which rootless network program to use by default. Valid options are -# `slirp4netns` (default) and `pasta`. +# `slirp4netns` and `pasta` (default). # -#default_rootless_network_cmd = "slirp4netns" +#default_rootless_network_cmd = "pasta" # Path to the directory where network configuration files are located. # For the CNI backend the default is "/etc/cni/net.d" as root @@ -419,6 +435,9 @@ default_sysctls = [ # The compression format to use when pushing an image. # Valid options are: `gzip`, `zstd` and `zstd:chunked`. +# This field is ignored when pushing images to the docker-daemon and +# docker-archive formats. It is also ignored when the manifest format is set +# to v2s2. # #compression_format = "gzip" @@ -541,7 +560,7 @@ default_sysctls = [ #image_parallel_copies = 0 # Tells container engines how to handle the built-in image volumes. -# * bind: An anonymous named volume will be created and mounted +# * anonymous: An anonymous named volume will be created and mounted # into the container. # * tmpfs: The volume is mounted onto the container as a tmpfs, # which allows users to create content that disappears when @@ -620,7 +639,8 @@ default_sysctls = [ # #no_pivot_root = false -# Number of locks available for containers and pods. +# Number of locks available for containers, pods, and volumes. Each container, +# pod, and volume consumes 1 lock for as long as it exists. # If this is changed, a lock renumber must be performed (e.g. with the # 'podman system renumber' command). # @@ -639,6 +659,16 @@ default_sysctls = [ # #remote = false +# Number of times to retry pulling/pushing images in case of failure +# +#retry = 3 + +# Delay between retries in case pulling/pushing image fails. +# If set, container engines will retry at the set interval, +# otherwise they delay 2 seconds and then exponentially back off. +# +#retry_delay = "2s" + # Default OCI runtime # #runtime = "crun" @@ -729,6 +759,15 @@ default_sysctls = [ # "/run/current-system/sw/bin/crun", #] +#crun-vm = [ +# "/usr/bin/crun-vm", +# "/usr/local/bin/crun-vm", +# "/usr/local/sbin/crun-vm", +# "/sbin/crun-vm", +# "/bin/crun-vm", +# "/run/current-system/sw/bin/crun-vm", +#] + #kata = [ # "/usr/bin/kata-runtime", # "/usr/sbin/kata-runtime", @@ -784,16 +823,15 @@ default_sysctls = [ # #disk_size=10 -# Default image URI when creating a new VM using `podman machine init`. -# Options: On Linux/Mac, `testing`, `stable`, `next`. On Windows, the major -# version of the OS (e.g `36`) for Fedora 36. For all platforms you can -# alternatively specify a custom download URL to an image. Container engines -# translate URIs $OS and $ARCH to the native OS and ARCH. URI -# "https://example.com/$OS/$ARCH/foobar.ami" becomes +# Default Image used when creating a new VM using `podman machine init`. +# Can be specified as registry with a bootable OCI artifact, download URL, or a local path. +# Registry target must be in the form of `docker://registry/repo/image:version`. +# Container engines translate URIs $OS and $ARCH to the native OS and ARCH. +# URI "https://example.com/$OS/$ARCH/foobar.ami" would become # "https://example.com/linux/amd64/foobar.ami" on a Linux AMD machine. -# The default value is `testing`. +# If unspecified, the default Podman machine image will be used. # -#image = "testing" +#image = "" # Memory in MB a machine is created with. # @@ -818,6 +856,11 @@ default_sysctls = [ # #provider = "" +# Rosetta supports running x86_64 Linux binaries on a Podman machine on Apple silicon. +# The default value is `true`. Supported on AppleHV(arm64) machines only. +# +#rosetta=true + # The [machine] table MUST be the last entry in this file. # (Unless another table is added) # TOML does not provide a way to end a table other than a further table being diff --git a/vendor/github.com/containers/common/pkg/config/default.go b/vendor/github.com/containers/common/pkg/config/default.go index ea2ede1cb..b08f16592 100644 --- a/vendor/github.com/containers/common/pkg/config/default.go +++ b/vendor/github.com/containers/common/pkg/config/default.go @@ -13,7 +13,6 @@ import ( nettypes "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/apparmor" "github.com/containers/common/pkg/cgroupv2" - "github.com/containers/common/pkg/util" "github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/unshare" "github.com/containers/storage/types" @@ -30,7 +29,7 @@ const ( _defaultTransport = "docker://" // _defaultImageVolumeMode is a mode to handle built-in image volumes. - _defaultImageVolumeMode = _typeBind + _defaultImageVolumeMode = "anonymous" // defaultInitName is the default name of the init binary defaultInitName = "catatonit" @@ -196,7 +195,9 @@ func defaultConfig() (*Config, error) { } defaultEngineConfig.SignaturePolicyPath = DefaultSignaturePolicyPath - if useUserConfigLocations() { + // NOTE: For now we want Windows to use system locations. + // GetRootlessUID == -1 on Windows, so exclude negative range + if unshare.GetRootlessUID() > 0 { configHome, err := homedir.GetConfigHome() if err != nil { return nil, err @@ -252,10 +253,11 @@ func defaultConfig() (*Config, error) { Volumes: attributedstring.Slice{}, }, Network: NetworkConfig{ + FirewallDriver: "", DefaultNetwork: "podman", DefaultSubnet: DefaultSubnet, DefaultSubnetPools: DefaultSubnetPools, - DefaultRootlessNetworkCmd: "slirp4netns", + DefaultRootlessNetworkCmd: "pasta", DNSBindPort: 0, CNIPluginDirs: attributedstring.NewSlice(DefaultCNIPluginDirs), NetavarkPluginDirs: attributedstring.NewSlice(DefaultNetavarkPluginDirs), @@ -284,18 +286,21 @@ func defaultMachineConfig() MachineConfig { return MachineConfig{ CPUs: uint64(cpus), DiskSize: 100, - Image: getDefaultMachineImage(), - Memory: 2048, - User: getDefaultMachineUser(), - Volumes: attributedstring.NewSlice(getDefaultMachineVolumes()), + // TODO: Set machine image default here + // Currently the default is set in Podman as we need time to stabilize + // VM images and locations between different providers. + Image: "", + Memory: 2048, + User: getDefaultMachineUser(), + Volumes: attributedstring.NewSlice(getDefaultMachineVolumes()), + Rosetta: true, } } // defaultFarmConfig returns the default farms configuration. func defaultFarmConfig() FarmConfig { - emptyList := make(map[string][]string) return FarmConfig{ - List: emptyList, + List: map[string][]string{}, } } @@ -320,7 +325,7 @@ func defaultEngineConfig() (*EngineConfig, error) { return nil, err } } - storeOpts, err := types.DefaultStoreOptions(useUserConfigLocations(), unshare.GetRootlessUID()) + storeOpts, err := types.DefaultStoreOptions() if err != nil { return nil, err } @@ -338,7 +343,7 @@ func defaultEngineConfig() (*EngineConfig, error) { c.HelperBinariesDir.Set(defaultHelperBinariesDir) if additionalHelperBinariesDir != "" { - // Prioritize addtionalHelperBinariesDir over defaults. + // Prioritize additionalHelperBinariesDir over defaults. c.HelperBinariesDir.Set(append([]string{additionalHelperBinariesDir}, c.HelperBinariesDir.Get()...)) } c.HooksDir.Set(DefaultHooksDirs) @@ -353,6 +358,7 @@ func defaultEngineConfig() (*EngineConfig, error) { c.PodmanshTimeout = uint(30) c.ExitCommandDelay = uint(5 * 60) c.Remote = isRemote() + c.Retry = 3 c.OCIRuntimes = map[string][]string{ "crun": { "/usr/bin/crun", @@ -363,6 +369,14 @@ func defaultEngineConfig() (*EngineConfig, error) { "/bin/crun", "/run/current-system/sw/bin/crun", }, + "crun-vm": { + "/usr/bin/crun-vm", + "/usr/local/bin/crun-vm", + "/usr/local/sbin/crun-vm", + "/sbin/crun-vm", + "/bin/crun-vm", + "/run/current-system/sw/bin/crun-vm", + }, "crun-wasm": { "/usr/bin/crun-wasm", "/usr/sbin/crun-wasm", @@ -470,7 +484,6 @@ func defaultEngineConfig() (*EngineConfig, error) { // TODO - ideally we should expose a `type LockType string` along with // constants. c.LockType = getDefaultLockType() - c.MachineEnabled = false c.ChownCopiedFiles = true c.PodExitPolicy = defaultPodExitPolicy @@ -481,11 +494,14 @@ func defaultEngineConfig() (*EngineConfig, error) { } func defaultTmpDir() (string, error) { - if !useUserConfigLocations() { + // NOTE: For now we want Windows to use system locations. + // GetRootlessUID == -1 on Windows, so exclude negative range + rootless := unshare.GetRootlessUID() > 0 + if !rootless { return getLibpodTmpDir(), nil } - runtimeDir, err := util.GetRuntimeDir() + runtimeDir, err := homedir.GetRuntimeDir() if err != nil { return "", err } @@ -515,13 +531,13 @@ func (c EngineConfig) EventsLogMaxSize() uint64 { func (c *Config) SecurityOptions() []string { securityOpts := []string{} if c.Containers.SeccompProfile != "" && c.Containers.SeccompProfile != SeccompDefaultPath { - securityOpts = append(securityOpts, fmt.Sprintf("seccomp=%s", c.Containers.SeccompProfile)) + securityOpts = append(securityOpts, "seccomp="+c.Containers.SeccompProfile) } if apparmor.IsEnabled() && c.Containers.ApparmorProfile != "" { - securityOpts = append(securityOpts, fmt.Sprintf("apparmor=%s", c.Containers.ApparmorProfile)) + securityOpts = append(securityOpts, "apparmor="+c.Containers.ApparmorProfile) } if selinux.GetEnabled() && !c.Containers.EnableLabeling { - securityOpts = append(securityOpts, fmt.Sprintf("label=%s", selinux.DisableSecOpt()[0])) + securityOpts = append(securityOpts, "label="+selinux.DisableSecOpt()[0]) } return securityOpts } @@ -551,7 +567,7 @@ func (c *Config) DNSServers() []string { return c.Containers.DNSServers.Get() } -// DNSSerches returns the default DNS searches to add to resolv.conf in containers. +// DNSSearches returns the default DNS searches to add to resolv.conf in containers. func (c *Config) DNSSearches() []string { return c.Containers.DNSSearches.Get() } @@ -636,11 +652,6 @@ func (c *Config) LogDriver() string { return c.Containers.LogDriver } -// MachineEnabled returns if podman is running inside a VM or not. -func (c *Config) MachineEnabled() bool { - return c.Engine.MachineEnabled -} - // MachineVolumes returns volumes to mount into the VM. func (c *Config) MachineVolumes() ([]string, error) { return machineVolumes(c.Machine.Volumes.Get()) @@ -670,18 +681,6 @@ func getDefaultSSHConfig() string { return filepath.Join(dirname, ".ssh", "config") } -func useUserConfigLocations() bool { - // NOTE: For now we want Windows to use system locations. - // GetRootlessUID == -1 on Windows, so exclude negative range - return unshare.GetRootlessUID() > 0 -} - -// getDefaultImage returns the default machine image stream -// On Windows this refers to the Fedora major release number -func getDefaultMachineImage() string { - return "testing" -} - // getDefaultMachineUser returns the user to use for rootless podman // This is only for the apple, hyperv, and qemu implementations. // WSL's user will be hardcoded in podman to "user" diff --git a/vendor/github.com/containers/common/pkg/config/default_common.go b/vendor/github.com/containers/common/pkg/config/default_common.go index f65461043..2caa3f01d 100644 --- a/vendor/github.com/containers/common/pkg/config/default_common.go +++ b/vendor/github.com/containers/common/pkg/config/default_common.go @@ -1,5 +1,4 @@ //go:build !freebsd -// +build !freebsd package config diff --git a/vendor/github.com/containers/common/pkg/config/default_unsupported.go b/vendor/github.com/containers/common/pkg/config/default_unsupported.go index 0d427a054..46653e399 100644 --- a/vendor/github.com/containers/common/pkg/config/default_unsupported.go +++ b/vendor/github.com/containers/common/pkg/config/default_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux && !windows -// +build !linux,!windows package config diff --git a/vendor/github.com/containers/common/pkg/config/default_windows.go b/vendor/github.com/containers/common/pkg/config/default_windows.go index 4f1362bd2..70627cbda 100644 --- a/vendor/github.com/containers/common/pkg/config/default_windows.go +++ b/vendor/github.com/containers/common/pkg/config/default_windows.go @@ -1,6 +1,13 @@ package config -import "os" +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/containers/storage/pkg/homedir" +) // isCgroup2UnifiedMode returns whether we are running in cgroup2 mode. func isCgroup2UnifiedMode() (isUnified bool, isUnifiedErr error) { @@ -36,7 +43,10 @@ func getLibpodTmpDir() string { // getDefaultMachineVolumes returns default mounted volumes (possibly with env vars, which will be expanded) func getDefaultMachineVolumes() []string { - return []string{} + hd := homedir.Get() + vol := filepath.VolumeName(hd) + hostMnt := filepath.ToSlash(strings.TrimPrefix(hd, vol)) + return []string{fmt.Sprintf("%s:%s", hd, hostMnt)} } func getDefaultComposeProviders() []string { diff --git a/vendor/github.com/containers/common/pkg/config/new.go b/vendor/github.com/containers/common/pkg/config/new.go index 64ddf4716..fb59473f0 100644 --- a/vendor/github.com/containers/common/pkg/config/new.go +++ b/vendor/github.com/containers/common/pkg/config/new.go @@ -21,7 +21,6 @@ var ( ) const ( - // FIXME: update code base and tests to use the two constants below. containersConfEnv = "CONTAINERS_CONF" containersConfOverrideEnv = containersConfEnv + "_OVERRIDE" ) @@ -79,11 +78,12 @@ func newLocked(options *Options) (*Config, error) { if err != nil { return nil, fmt.Errorf("finding config on system: %w", err) } + for _, path := range configs { // Merge changes in later configs with the previous configs. // Each config file that specified fields, will override the // previous fields. - if err = readConfigFromFile(path, config); err != nil { + if err = readConfigFromFile(path, config, true); err != nil { return nil, fmt.Errorf("reading system config %q: %w", path, err) } logrus.Debugf("Merged system config %q", path) @@ -115,7 +115,7 @@ func newLocked(options *Options) (*Config, error) { } // readConfigFromFile reads in container config in the specified // file and then merge changes with the current default. - if err := readConfigFromFile(add, config); err != nil { + if err := readConfigFromFile(add, config, false); err != nil { return nil, fmt.Errorf("reading additional config %q: %w", add, err) } logrus.Debugf("Merged additional config %q", add) @@ -157,12 +157,8 @@ func systemConfigs() (configs []string, finalErr error) { } return append(configs, path), nil } - if _, err := os.Stat(DefaultContainersConfig); err == nil { - configs = append(configs, DefaultContainersConfig) - } - if _, err := os.Stat(OverrideContainersConfig); err == nil { - configs = append(configs, OverrideContainersConfig) - } + configs = append(configs, DefaultContainersConfig) + configs = append(configs, OverrideContainersConfig) var err error configs, err = addConfigs(OverrideContainersConfig+".d", configs) @@ -170,18 +166,14 @@ func systemConfigs() (configs []string, finalErr error) { return nil, err } - path, err := ifRootlessConfigPath() + path, err := userConfigPath() if err != nil { return nil, err } - if path != "" { - if _, err := os.Stat(path); err == nil { - configs = append(configs, path) - } - configs, err = addConfigs(path+".d", configs) - if err != nil { - return nil, err - } + configs = append(configs, path) + configs, err = addConfigs(path+".d", configs) + if err != nil { + return nil, err } return configs, nil } @@ -225,10 +217,13 @@ func addConfigs(dirPath string, configs []string) ([]string, error) { // unmarshal its content into a Config. The config param specifies the previous // default config. If the path, only specifies a few fields in the Toml file // the defaults from the config parameter will be used for all other fields. -func readConfigFromFile(path string, config *Config) error { +func readConfigFromFile(path string, config *Config, ignoreErrNotExist bool) error { logrus.Tracef("Reading configuration file %q", path) meta, err := toml.DecodeFile(path, config) if err != nil { + if ignoreErrNotExist && errors.Is(err, fs.ErrNotExist) { + return nil + } return fmt.Errorf("decode configuration %v: %w", path, err) } keys := meta.Undecoded() diff --git a/vendor/github.com/containers/common/pkg/config/nosystemd.go b/vendor/github.com/containers/common/pkg/config/nosystemd.go index 352fddf92..8832d8426 100644 --- a/vendor/github.com/containers/common/pkg/config/nosystemd.go +++ b/vendor/github.com/containers/common/pkg/config/nosystemd.go @@ -1,5 +1,4 @@ //go:build !systemd || !cgo -// +build !systemd !cgo package config diff --git a/vendor/github.com/containers/common/pkg/config/systemd.go b/vendor/github.com/containers/common/pkg/config/systemd.go index 3cd6ff845..506374619 100644 --- a/vendor/github.com/containers/common/pkg/config/systemd.go +++ b/vendor/github.com/containers/common/pkg/config/systemd.go @@ -1,5 +1,4 @@ //go:build systemd && cgo -// +build systemd,cgo package config diff --git a/vendor/github.com/containers/common/pkg/filters/filters.go b/vendor/github.com/containers/common/pkg/filters/filters.go index 3d442a530..25f52297b 100644 --- a/vendor/github.com/containers/common/pkg/filters/filters.go +++ b/vendor/github.com/containers/common/pkg/filters/filters.go @@ -2,6 +2,7 @@ package filters import ( "encoding/json" + "errors" "fmt" "net/http" "path/filepath" @@ -17,7 +18,7 @@ import ( func ComputeUntilTimestamp(filterValues []string) (time.Time, error) { invalid := time.Time{} if len(filterValues) != 1 { - return invalid, fmt.Errorf("specify exactly one timestamp for until") + return invalid, errors.New("specify exactly one timestamp for until") } ts, err := timetype.GetTimestamp(filterValues[0], time.Now()) if err != nil { @@ -76,13 +77,10 @@ func FiltersFromRequest(r *http.Request) ([]string, error) { libpodFilters := make([]string, 0, len(filters)) for filterKey, filterSlice := range filters { - f := filterKey for _, filterValue := range filterSlice { - f += "=" + filterValue + libpodFilters = append(libpodFilters, fmt.Sprintf("%s=%s", filterKey, filterValue)) } - libpodFilters = append(libpodFilters, f) } - return libpodFilters, nil } diff --git a/vendor/github.com/containers/common/pkg/hooks/hooks.go b/vendor/github.com/containers/common/pkg/hooks/hooks.go index 2758d122d..0b4ad052b 100644 --- a/vendor/github.com/containers/common/pkg/hooks/hooks.go +++ b/vendor/github.com/containers/common/pkg/hooks/hooks.go @@ -122,10 +122,8 @@ func (m *Manager) Hooks(config *rspec.Spec, annotations map[string]string, hasBi switch stage { case "createContainer": config.Hooks.CreateContainer = append(config.Hooks.CreateContainer, namedHook.hook.Hook) - case "createRuntime": + case "createRuntime", "prestart": config.Hooks.CreateRuntime = append(config.Hooks.CreateRuntime, namedHook.hook.Hook) - case "prestart": - config.Hooks.Prestart = append(config.Hooks.Prestart, namedHook.hook.Hook) case "poststart": config.Hooks.Poststart = append(config.Hooks.Poststart, namedHook.hook.Hook) case "poststop": diff --git a/vendor/github.com/containers/common/pkg/machine/machine.go b/vendor/github.com/containers/common/pkg/machine/machine.go index 36428e58f..57797a445 100644 --- a/vendor/github.com/containers/common/pkg/machine/machine.go +++ b/vendor/github.com/containers/common/pkg/machine/machine.go @@ -4,15 +4,9 @@ import ( "os" "strings" "sync" - - "github.com/containers/common/pkg/config" - "github.com/sirupsen/logrus" ) -// TODO: change name to MachineMarker since package is already called machine -// -//nolint:revive -type MachineMarker struct { +type Marker struct { Enabled bool Type string } @@ -21,56 +15,42 @@ const ( markerFile = "/etc/containers/podman-machine" Wsl = "wsl" Qemu = "qemu" + AppleHV = "applehv" + HyperV = "hyperv" ) var ( - markerSync sync.Once - machineMarker *MachineMarker + markerSync sync.Once + marker *Marker ) func loadMachineMarker(file string) { var kind string - - // Support deprecated config value for compatibility - enabled := isLegacyConfigSet() + enabled := false if content, err := os.ReadFile(file); err == nil { enabled = true kind = strings.TrimSpace(string(content)) } - machineMarker = &MachineMarker{enabled, kind} -} - -func isLegacyConfigSet() bool { - config, err := config.Default() - if err != nil { - logrus.Warnf("could not obtain container configuration") - return false - } - - //nolint:staticcheck //lint:ignore SA1019 deprecated call - return config.Engine.MachineEnabled + marker = &Marker{enabled, kind} } func IsPodmanMachine() bool { return GetMachineMarker().Enabled } -// TODO: change name to HostType since package is already called machine -// -//nolint:revive -func MachineHostType() string { +func HostType() string { return GetMachineMarker().Type } func IsGvProxyBased() bool { - return IsPodmanMachine() && MachineHostType() != Wsl + return IsPodmanMachine() && HostType() != Wsl } -func GetMachineMarker() *MachineMarker { +func GetMachineMarker() *Marker { markerSync.Do(func() { loadMachineMarker(markerFile) }) - return machineMarker + return marker } diff --git a/vendor/github.com/containers/common/pkg/manifests/manifests.go b/vendor/github.com/containers/common/pkg/manifests/manifests.go index 8296faa82..30f099a06 100644 --- a/vendor/github.com/containers/common/pkg/manifests/manifests.go +++ b/vendor/github.com/containers/common/pkg/manifests/manifests.go @@ -6,10 +6,12 @@ import ( "fmt" "os" + "github.com/containers/common/internal" "github.com/containers/image/v5/manifest" digest "github.com/opencontainers/go-digest" imgspec "github.com/opencontainers/image-spec/specs-go" v1 "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/exp/slices" ) // List is a generic interface for manipulating a manifest list or an image @@ -19,6 +21,7 @@ type List interface { Remove(instanceDigest digest.Digest) error SetURLs(instanceDigest digest.Digest, urls []string) error URLs(instanceDigest digest.Digest) ([]string, error) + ClearAnnotations(instanceDigest *digest.Digest) error SetAnnotations(instanceDigest *digest.Digest, annotations map[string]string) error Annotations(instanceDigest *digest.Digest) (map[string]string, error) SetOS(instanceDigest digest.Digest, os string) error @@ -33,6 +36,12 @@ type List interface { Features(instanceDigest digest.Digest) ([]string, error) SetOSFeatures(instanceDigest digest.Digest, osFeatures []string) error OSFeatures(instanceDigest digest.Digest) ([]string, error) + SetMediaType(instanceDigest digest.Digest, mediaType string) error + MediaType(instanceDigest digest.Digest) (string, error) + SetArtifactType(instanceDigest *digest.Digest, artifactType string) error + ArtifactType(instanceDigest *digest.Digest) (string, error) + SetSubject(subject *v1.Descriptor) error + Subject() (*v1.Descriptor, error) Serialize(mimeType string) ([]byte, error) Instances() []digest.Digest OCIv1() *v1.Index @@ -96,18 +105,21 @@ func (l *list) AddInstance(manifestDigest digest.Digest, manifestSize int64, man Platform: schema2platform, }) - ociv1platform := v1.Platform{ + ociv1platform := &v1.Platform{ Architecture: architecture, OS: osName, OSVersion: osVersion, OSFeatures: osFeatures, Variant: variant, } + if ociv1platform.Architecture == "" && ociv1platform.OS == "" && ociv1platform.OSVersion == "" && ociv1platform.Variant == "" && len(ociv1platform.OSFeatures) == 0 { + ociv1platform = nil + } l.oci.Manifests = append(l.oci.Manifests, v1.Descriptor{ MediaType: manifestType, Size: manifestSize, Digest: manifestDigest, - Platform: &ociv1platform, + Platform: ociv1platform, }) return nil @@ -166,7 +178,13 @@ func (l *list) SetURLs(instanceDigest digest.Digest, urls []string) error { return err } oci.URLs = append([]string{}, urls...) + if len(oci.URLs) == 0 { + oci.URLs = nil + } docker.URLs = append([]string{}, urls...) + if len(docker.URLs) == 0 { + docker.URLs = nil + } return nil } @@ -179,7 +197,24 @@ func (l *list) URLs(instanceDigest digest.Digest) ([]string, error) { return append([]string{}, oci.URLs...), nil } -// SetAnnotations sets annotations on the image index, or on a specific manifest. +// ClearAnnotations removes all annotations from the image index, or from a +// specific manifest. +// The field is specific to the OCI image index format, and is not present in Docker manifest lists. +func (l *list) ClearAnnotations(instanceDigest *digest.Digest) error { + a := &l.oci.Annotations + if instanceDigest != nil { + oci, err := l.findOCIv1(*instanceDigest) + if err != nil { + return err + } + a = &oci.Annotations + } + *a = nil + return nil +} + +// SetAnnotations sets annotations on the image index, or on a specific +// manifest. // The field is specific to the OCI image index format, and is not present in Docker manifest lists. func (l *list) SetAnnotations(instanceDigest *digest.Digest, annotations map[string]string) error { a := &l.oci.Annotations @@ -190,10 +225,15 @@ func (l *list) SetAnnotations(instanceDigest *digest.Digest, annotations map[str } a = &oci.Annotations } - (*a) = make(map[string]string) + if *a == nil { + (*a) = make(map[string]string) + } for k, v := range annotations { (*a)[k] = v } + if len(*a) == 0 { + *a = nil + } return nil } @@ -226,7 +266,13 @@ func (l *list) SetOS(instanceDigest digest.Digest, os string) error { return err } docker.Platform.OS = os + if oci.Platform == nil { + oci.Platform = &v1.Platform{} + } oci.Platform.OS = os + if oci.Platform.Architecture == "" && oci.Platform.OS == "" && oci.Platform.OSVersion == "" && oci.Platform.Variant == "" && len(oci.Platform.OSFeatures) == 0 { + oci.Platform = nil + } return nil } @@ -236,7 +282,11 @@ func (l *list) OS(instanceDigest digest.Digest) (string, error) { if err != nil { return "", err } - return oci.Platform.OS, nil + platform := oci.Platform + if platform == nil { + platform = &v1.Platform{} + } + return platform.OS, nil } // SetArchitecture sets the Architecture field in the platform information associated with the instance with the specified digest. @@ -250,7 +300,13 @@ func (l *list) SetArchitecture(instanceDigest digest.Digest, arch string) error return err } docker.Platform.Architecture = arch + if oci.Platform == nil { + oci.Platform = &v1.Platform{} + } oci.Platform.Architecture = arch + if oci.Platform.Architecture == "" && oci.Platform.OS == "" && oci.Platform.OSVersion == "" && oci.Platform.Variant == "" && len(oci.Platform.OSFeatures) == 0 { + oci.Platform = nil + } return nil } @@ -260,7 +316,11 @@ func (l *list) Architecture(instanceDigest digest.Digest) (string, error) { if err != nil { return "", err } - return oci.Platform.Architecture, nil + platform := oci.Platform + if platform == nil { + platform = &v1.Platform{} + } + return platform.Architecture, nil } // SetOSVersion sets the OSVersion field in the platform information associated with the instance with the specified digest. @@ -274,7 +334,13 @@ func (l *list) SetOSVersion(instanceDigest digest.Digest, osVersion string) erro return err } docker.Platform.OSVersion = osVersion + if oci.Platform == nil { + oci.Platform = &v1.Platform{} + } oci.Platform.OSVersion = osVersion + if oci.Platform.Architecture == "" && oci.Platform.OS == "" && oci.Platform.OSVersion == "" && oci.Platform.Variant == "" && len(oci.Platform.OSFeatures) == 0 { + oci.Platform = nil + } return nil } @@ -284,7 +350,11 @@ func (l *list) OSVersion(instanceDigest digest.Digest) (string, error) { if err != nil { return "", err } - return oci.Platform.OSVersion, nil + platform := oci.Platform + if platform == nil { + platform = &v1.Platform{} + } + return platform.OSVersion, nil } // SetVariant sets the Variant field in the platform information associated with the instance with the specified digest. @@ -298,7 +368,13 @@ func (l *list) SetVariant(instanceDigest digest.Digest, variant string) error { return err } docker.Platform.Variant = variant + if oci.Platform == nil { + oci.Platform = &v1.Platform{} + } oci.Platform.Variant = variant + if oci.Platform.Architecture == "" && oci.Platform.OS == "" && oci.Platform.OSVersion == "" && oci.Platform.Variant == "" && len(oci.Platform.OSFeatures) == 0 { + oci.Platform = nil + } return nil } @@ -308,7 +384,11 @@ func (l *list) Variant(instanceDigest digest.Digest) (string, error) { if err != nil { return "", err } - return oci.Platform.Variant, nil + platform := oci.Platform + if platform == nil { + platform = &v1.Platform{} + } + return platform.Variant, nil } // SetFeatures sets the features list in the platform information associated with the instance with the specified digest. @@ -319,6 +399,9 @@ func (l *list) SetFeatures(instanceDigest digest.Digest, features []string) erro return err } docker.Platform.Features = append([]string{}, features...) + if len(docker.Platform.Features) == 0 { + docker.Platform.Features = nil + } // no OCI equivalent return nil } @@ -344,7 +427,16 @@ func (l *list) SetOSFeatures(instanceDigest digest.Digest, osFeatures []string) return err } docker.Platform.OSFeatures = append([]string{}, osFeatures...) + if oci.Platform == nil { + oci.Platform = &v1.Platform{} + } oci.Platform.OSFeatures = append([]string{}, osFeatures...) + if len(oci.Platform.OSFeatures) == 0 { + oci.Platform.OSFeatures = nil + } + if oci.Platform.Architecture == "" && oci.Platform.OS == "" && oci.Platform.OSVersion == "" && oci.Platform.Variant == "" && len(oci.Platform.OSFeatures) == 0 { + oci.Platform = nil + } return nil } @@ -354,7 +446,77 @@ func (l *list) OSFeatures(instanceDigest digest.Digest) ([]string, error) { if err != nil { return nil, err } - return append([]string{}, oci.Platform.OSFeatures...), nil + platform := oci.Platform + if platform == nil { + platform = &v1.Platform{} + } + return append([]string{}, platform.OSFeatures...), nil +} + +// SetMediaType sets the MediaType field in the instance with the specified digest. +func (l *list) SetMediaType(instanceDigest digest.Digest, mediaType string) error { + oci, err := l.findOCIv1(instanceDigest) + if err != nil { + return err + } + oci.MediaType = mediaType + return nil +} + +// MediaType retrieves the MediaType field in the instance with the specified digest. +func (l *list) MediaType(instanceDigest digest.Digest) (string, error) { + oci, err := l.findOCIv1(instanceDigest) + if err != nil { + return "", err + } + return oci.MediaType, nil +} + +// SetArtifactType sets the ArtifactType field in the instance with the specified digest. +func (l *list) SetArtifactType(instanceDigest *digest.Digest, artifactType string) error { + artifactTypePtr := &l.oci.ArtifactType + if instanceDigest != nil { + oci, err := l.findOCIv1(*instanceDigest) + if err != nil { + return err + } + artifactTypePtr = &oci.ArtifactType + } + *artifactTypePtr = artifactType + return nil +} + +// ArtifactType retrieves the ArtifactType field in the instance with the specified digest. +func (l *list) ArtifactType(instanceDigest *digest.Digest) (string, error) { + artifactTypePtr := &l.oci.ArtifactType + if instanceDigest != nil { + oci, err := l.findOCIv1(*instanceDigest) + if err != nil { + return "", err + } + artifactTypePtr = &oci.ArtifactType + } + return *artifactTypePtr, nil +} + +// SetSubject sets the image index's subject. +// The field is specific to the OCI image index format, and is not present in Docker manifest lists. +func (l *list) SetSubject(subject *v1.Descriptor) error { + if subject != nil { + subject = internal.DeepCopyDescriptor(subject) + } + l.oci.Subject = subject + return nil +} + +// Subject retrieves the subject which might have been set on the image index. +// The field is specific to the OCI image index format, and is not present in Docker manifest lists. +func (l *list) Subject() (*v1.Descriptor, error) { + s := l.oci.Subject + if s != nil { + s = internal.DeepCopyDescriptor(s) + } + return s, nil } // FromBlob builds a list from an encoded manifest list or image index. @@ -400,11 +562,19 @@ func FromBlob(manifestBytes []byte) (List, error) { if platform == nil { platform = &v1.Platform{} } + if m.Platform != nil && m.Platform.OSFeatures != nil { + platform.OSFeatures = slices.Clone(m.Platform.OSFeatures) + } + var urls []string + if m.URLs != nil { + urls = slices.Clone(m.URLs) + } list.docker.Manifests = append(list.docker.Manifests, manifest.Schema2ManifestDescriptor{ Schema2Descriptor: manifest.Schema2Descriptor{ MediaType: m.MediaType, Size: m.Size, Digest: m.Digest, + URLs: urls, }, Platform: manifest.Schema2PlatformSpec{ Architecture: platform.Architecture, @@ -421,13 +591,25 @@ func FromBlob(manifestBytes []byte) (List, error) { func (l *list) preferOCI() bool { // If we have any data that's only in the OCI format, use that. + if l.oci.ArtifactType != "" { + return true + } + if l.oci.Subject != nil { + return true + } + if len(l.oci.Annotations) > 0 { + return true + } for _, m := range l.oci.Manifests { - if len(m.URLs) > 0 { + if m.ArtifactType != "" { return true } if len(m.Annotations) > 0 { return true } + if len(m.Data) > 0 { + return true + } } // If we have any data that's only in the Docker format, use that. for _, m := range l.docker.Manifests { diff --git a/vendor/github.com/containers/common/pkg/netns/netns_linux.go b/vendor/github.com/containers/common/pkg/netns/netns_linux.go index f2569d379..593665329 100644 --- a/vendor/github.com/containers/common/pkg/netns/netns_linux.go +++ b/vendor/github.com/containers/common/pkg/netns/netns_linux.go @@ -30,17 +30,19 @@ import ( "sync" "github.com/containernetworking/plugins/pkg/ns" - "github.com/containers/common/pkg/util" + "github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/unshare" - "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) +// threadNsPath is the /proc path to the current netns handle for the current thread +const threadNsPath = "/proc/thread-self/ns/net" + // GetNSRunDir returns the dir of where to create the netNS. When running // rootless, it needs to be at a location writable by user. func GetNSRunDir() (string, error) { if unshare.IsRootless() { - rootlessDir, err := util.GetRuntimeDir() + rootlessDir, err := homedir.GetRuntimeDir() if err != nil { return "", err } @@ -49,6 +51,10 @@ func GetNSRunDir() (string, error) { return "/run/netns", nil } +func NewNSAtPath(nsPath string) (ns.NetNS, error) { + return newNSPath(nsPath) +} + // NewNS creates a new persistent (bind-mounted) network namespace and returns // an object representing that namespace, without switching to it. func NewNS() (ns.NetNS, error) { @@ -111,8 +117,12 @@ func NewNSWithName(name string) (ns.NetNS, error) { } } - // create an empty file at the mount point nsPath := path.Join(nsRunDir, name) + return newNSPath(nsPath) +} + +func newNSPath(nsPath string) (ns.NetNS, error) { + // create an empty file at the mount point mountPointFd, err := os.OpenFile(nsPath, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0o600) if err != nil { return nil, err @@ -140,24 +150,10 @@ func NewNSWithName(name string) (ns.NetNS, error) { // Don't unlock. By not unlocking, golang will kill the OS thread when the // goroutine is done (for go1.10+) - threadNsPath := getCurrentThreadNetNSPath() - - var origNS ns.NetNS - origNS, err = ns.GetNS(threadNsPath) - if err != nil { - logrus.Warnf("Cannot open current network namespace %s: %q", threadNsPath, err) - return - } - defer func() { - if err := origNS.Close(); err != nil { - logrus.Errorf("Unable to close namespace: %q", err) - } - }() - // create a new netns on the current thread err = unix.Unshare(unix.CLONE_NEWNET) if err != nil { - logrus.Warnf("Cannot create a new network namespace: %q", err) + err = fmt.Errorf("unshare network namespace: %w", err) return } @@ -181,29 +177,26 @@ func NewNSWithName(name string) (ns.NetNS, error) { // UnmountNS unmounts the given netns path func UnmountNS(nsPath string) error { - nsRunDir, err := GetNSRunDir() - if err != nil { - return err - } - + var rErr error // Only unmount if it's been bind-mounted (don't touch namespaces in /proc...) - if strings.HasPrefix(nsPath, nsRunDir) { + if !strings.HasPrefix(nsPath, "/proc/") { if err := unix.Unmount(nsPath, unix.MNT_DETACH); err != nil { - return fmt.Errorf("failed to unmount NS: at %s: %v", nsPath, err) + // Do not return here, always try to remove below. + // This is important in case podman now is in a new userns compared to + // when the netns was created. The umount will fail EINVAL but removing + // the file will work and the kernel will destroy the bind mount in the + // other ns because of this. We also need it so pasta doesn't leak. + rErr = fmt.Errorf("failed to unmount NS: at %s: %w", nsPath, err) } if err := os.Remove(nsPath); err != nil { - return fmt.Errorf("failed to remove ns path %s: %v", nsPath, err) + err := fmt.Errorf("failed to remove ns path: %w", err) + if rErr != nil { + err = fmt.Errorf("%v, %w", err, rErr) + } + rErr = err } } - return nil -} - -// getCurrentThreadNetNSPath copied from pkg/ns -func getCurrentThreadNetNSPath() string { - // /proc/self/ns/net returns the namespace of the main thread, not - // of whatever thread this goroutine is running on. Make sure we - // use the thread's net namespace since the thread is switching around - return fmt.Sprintf("/proc/%d/task/%d/ns/net", os.Getpid(), unix.Gettid()) + return rErr } diff --git a/vendor/github.com/containers/common/pkg/parse/parse.go b/vendor/github.com/containers/common/pkg/parse/parse.go index 7629f5842..284751e52 100644 --- a/vendor/github.com/containers/common/pkg/parse/parse.go +++ b/vendor/github.com/containers/common/pkg/parse/parse.go @@ -14,7 +14,7 @@ import ( // ValidateVolumeOpts validates a volume's options func ValidateVolumeOpts(options []string) ([]string, error) { - var foundRootPropagation, foundRWRO, foundLabelChange, bindType, foundExec, foundDev, foundSuid, foundChown, foundUpperDir, foundWorkDir, foundCopy int + var foundRootPropagation, foundRWRO, foundLabelChange, bindType, foundExec, foundDev, foundSuid, foundChown, foundUpperDir, foundWorkDir, foundCopy, foundCopySymlink int finalOpts := make([]string, 0, len(options)) for _, opt := range options { // support advanced options like upperdir=/path, workdir=/path @@ -93,6 +93,11 @@ func ValidateVolumeOpts(options []string) ([]string, error) { if foundCopy > 1 { return nil, fmt.Errorf("invalid options %q, can only specify 1 'copy' or 'nocopy' option", strings.Join(options, ", ")) } + case "no-dereference": + foundCopySymlink++ + if foundCopySymlink > 1 { + return nil, fmt.Errorf("invalid options %q, can only specify 1 'no-dereference' option", strings.Join(options, ", ")) + } default: return nil, fmt.Errorf("invalid option type %q", opt) } diff --git a/vendor/github.com/containers/common/pkg/parse/parse_unix.go b/vendor/github.com/containers/common/pkg/parse/parse_unix.go index 44fe33d9e..86563b3eb 100644 --- a/vendor/github.com/containers/common/pkg/parse/parse_unix.go +++ b/vendor/github.com/containers/common/pkg/parse/parse_unix.go @@ -1,5 +1,4 @@ //go:build linux || darwin || freebsd -// +build linux darwin freebsd package parse diff --git a/vendor/github.com/containers/common/pkg/password/password_supported.go b/vendor/github.com/containers/common/pkg/password/password_supported.go index 56e95b3d5..4761b3ff5 100644 --- a/vendor/github.com/containers/common/pkg/password/password_supported.go +++ b/vendor/github.com/containers/common/pkg/password/password_supported.go @@ -1,5 +1,4 @@ //go:build linux || darwin || freebsd -// +build linux darwin freebsd package password diff --git a/vendor/github.com/containers/common/pkg/password/password_windows.go b/vendor/github.com/containers/common/pkg/password/password_windows.go index 7a0822d01..0a1d9eebd 100644 --- a/vendor/github.com/containers/common/pkg/password/password_windows.go +++ b/vendor/github.com/containers/common/pkg/password/password_windows.go @@ -1,5 +1,4 @@ //go:build windows -// +build windows package password diff --git a/vendor/github.com/containers/common/pkg/retry/retry_unsupported.go b/vendor/github.com/containers/common/pkg/retry/retry_unsupported.go index 901e28a5d..008fe6bcd 100644 --- a/vendor/github.com/containers/common/pkg/retry/retry_unsupported.go +++ b/vendor/github.com/containers/common/pkg/retry/retry_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package retry diff --git a/vendor/github.com/containers/common/pkg/rootlessport/rootlessport_linux.go b/vendor/github.com/containers/common/pkg/rootlessport/rootlessport_linux.go index 7ea018a23..dc4c5bf1a 100644 --- a/vendor/github.com/containers/common/pkg/rootlessport/rootlessport_linux.go +++ b/vendor/github.com/containers/common/pkg/rootlessport/rootlessport_linux.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux // Rootlessport Config type for use in podman/cmd/rootlessport. package rootlessport diff --git a/vendor/github.com/containers/common/pkg/seccomp/default_linux.go b/vendor/github.com/containers/common/pkg/seccomp/default_linux.go index 0db77879c..2a59c1496 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/default_linux.go +++ b/vendor/github.com/containers/common/pkg/seccomp/default_linux.go @@ -142,6 +142,7 @@ func DefaultProfile() *Seccomp { "fchdir", "fchmod", "fchmodat", + "fchmodat2", "fchown", "fchown32", "fchownat", diff --git a/vendor/github.com/containers/common/pkg/seccomp/errno_list.go b/vendor/github.com/containers/common/pkg/seccomp/errno_list.go index 87ac2ab77..616cae336 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/errno_list.go +++ b/vendor/github.com/containers/common/pkg/seccomp/errno_list.go @@ -1,5 +1,4 @@ //go:build linux && seccomp -// +build linux,seccomp package seccomp diff --git a/vendor/github.com/containers/common/pkg/seccomp/filter.go b/vendor/github.com/containers/common/pkg/seccomp/filter.go index 72c95734b..1739dcb93 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/filter.go +++ b/vendor/github.com/containers/common/pkg/seccomp/filter.go @@ -1,5 +1,4 @@ //go:build seccomp -// +build seccomp // NOTE: this package has originally been copied from // github.com/opencontainers/runc and modified to work for other use cases diff --git a/vendor/github.com/containers/common/pkg/seccomp/seccomp.json b/vendor/github.com/containers/common/pkg/seccomp/seccomp.json index 18674db4d..306e90148 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/seccomp.json +++ b/vendor/github.com/containers/common/pkg/seccomp/seccomp.json @@ -149,6 +149,7 @@ "fchdir", "fchmod", "fchmodat", + "fchmodat2", "fchown", "fchown32", "fchownat", diff --git a/vendor/github.com/containers/common/pkg/seccomp/seccomp_linux.go b/vendor/github.com/containers/common/pkg/seccomp/seccomp_linux.go index f7adde8ab..e399f6b28 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/seccomp_linux.go +++ b/vendor/github.com/containers/common/pkg/seccomp/seccomp_linux.go @@ -1,5 +1,4 @@ //go:build seccomp -// +build seccomp // SPDX-License-Identifier: Apache-2.0 diff --git a/vendor/github.com/containers/common/pkg/seccomp/seccomp_unsupported.go b/vendor/github.com/containers/common/pkg/seccomp/seccomp_unsupported.go index da5230c56..1bf8155dd 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/seccomp_unsupported.go +++ b/vendor/github.com/containers/common/pkg/seccomp/seccomp_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux || !seccomp -// +build !linux !seccomp // SPDX-License-Identifier: Apache-2.0 @@ -15,12 +14,12 @@ import ( var errNotSupported = errors.New("seccomp not enabled in this build") -// LoadProfile returns an error on unsuppored systems +// LoadProfile returns an error on unsupported systems func LoadProfile(body string, rs *specs.Spec) (*specs.LinuxSeccomp, error) { return nil, errNotSupported } -// GetDefaultProfile returns an error on unsuppored systems +// GetDefaultProfile returns an error on unsupported systems func GetDefaultProfile(rs *specs.Spec) (*specs.LinuxSeccomp, error) { return nil, errNotSupported } diff --git a/vendor/github.com/containers/common/pkg/seccomp/supported.go b/vendor/github.com/containers/common/pkg/seccomp/supported.go index f8a20e536..5c39979d5 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/supported.go +++ b/vendor/github.com/containers/common/pkg/seccomp/supported.go @@ -1,5 +1,4 @@ //go:build linux && seccomp -// +build linux,seccomp package seccomp diff --git a/vendor/github.com/containers/common/pkg/seccomp/validate.go b/vendor/github.com/containers/common/pkg/seccomp/validate.go index 80558c1f0..81effc767 100644 --- a/vendor/github.com/containers/common/pkg/seccomp/validate.go +++ b/vendor/github.com/containers/common/pkg/seccomp/validate.go @@ -1,5 +1,4 @@ //go:build seccomp -// +build seccomp package seccomp diff --git a/vendor/github.com/containers/common/pkg/secrets/filedriver/filedriver.go b/vendor/github.com/containers/common/pkg/secrets/filedriver/filedriver.go index 3054a2bb5..33bb62dc4 100644 --- a/vendor/github.com/containers/common/pkg/secrets/filedriver/filedriver.go +++ b/vendor/github.com/containers/common/pkg/secrets/filedriver/filedriver.go @@ -10,6 +10,7 @@ import ( "sort" "github.com/containers/storage/pkg/lockfile" + "golang.org/x/exp/maps" ) // secretsDataFile is the file where secrets data/payload will be stored @@ -56,10 +57,7 @@ func (d *Driver) List() ([]string, error) { if err != nil { return nil, err } - allID := make([]string, 0, len(secretData)) - for k := range secretData { - allID = append(allID, k) - } + allID := maps.Keys(secretData) sort.Strings(allID) return allID, err } @@ -79,7 +77,7 @@ func (d *Driver) Lookup(id string) ([]byte, error) { return nil, fmt.Errorf("%s: %w", id, errNoSecretData) } -// Store stores the bytes associated with an ID. An error is returned if the ID arleady exists +// Store stores the bytes associated with an ID. An error is returned if the ID already exists func (d *Driver) Store(id string, data []byte) error { d.lockfile.Lock() defer d.lockfile.Unlock() diff --git a/vendor/github.com/containers/common/pkg/secrets/secrets.go b/vendor/github.com/containers/common/pkg/secrets/secrets.go index 47e688406..7092c8f2b 100644 --- a/vendor/github.com/containers/common/pkg/secrets/secrets.go +++ b/vendor/github.com/containers/common/pkg/secrets/secrets.go @@ -13,6 +13,7 @@ import ( "github.com/containers/storage/pkg/lockfile" "github.com/containers/storage/pkg/regexp" "github.com/containers/storage/pkg/stringid" + "golang.org/x/exp/maps" ) // maxSecretSize is the max size for secret data - 512kB @@ -289,11 +290,7 @@ func (s *SecretsManager) List() ([]Secret, error) { if err != nil { return nil, err } - ls := make([]Secret, 0, len(secrets)) - for _, v := range secrets { - ls = append(ls, v) - } - return ls, nil + return maps.Values(secrets), nil } // LookupSecretData returns secret metadata as well as secret data in bytes. diff --git a/vendor/github.com/containers/common/pkg/servicereaper/service.go b/vendor/github.com/containers/common/pkg/servicereaper/service.go index 11482c59c..12a29669b 100644 --- a/vendor/github.com/containers/common/pkg/servicereaper/service.go +++ b/vendor/github.com/containers/common/pkg/servicereaper/service.go @@ -1,5 +1,4 @@ //go:build linux || freebsd -// +build linux freebsd package servicereaper diff --git a/vendor/github.com/containers/common/pkg/signal/signal_linux.go b/vendor/github.com/containers/common/pkg/signal/signal_linux.go index 21e09c9fe..6800b7d3c 100644 --- a/vendor/github.com/containers/common/pkg/signal/signal_linux.go +++ b/vendor/github.com/containers/common/pkg/signal/signal_linux.go @@ -1,5 +1,4 @@ //go:build linux && !mips && !mipsle && !mips64 && !mips64le -// +build linux,!mips,!mipsle,!mips64,!mips64le // Signal handling for Linux only. package signal diff --git a/vendor/github.com/containers/common/pkg/signal/signal_linux_mipsx.go b/vendor/github.com/containers/common/pkg/signal/signal_linux_mipsx.go index 52b07aaf4..58b353955 100644 --- a/vendor/github.com/containers/common/pkg/signal/signal_linux_mipsx.go +++ b/vendor/github.com/containers/common/pkg/signal/signal_linux_mipsx.go @@ -1,6 +1,4 @@ //go:build linux && (mips || mipsle || mips64 || mips64le) -// +build linux -// +build mips mipsle mips64 mips64le // Special signal handling for mips architecture package signal diff --git a/vendor/github.com/containers/common/pkg/signal/signal_unsupported.go b/vendor/github.com/containers/common/pkg/signal/signal_unsupported.go index 0e8685a7c..36893d059 100644 --- a/vendor/github.com/containers/common/pkg/signal/signal_unsupported.go +++ b/vendor/github.com/containers/common/pkg/signal/signal_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux // Signal handling for Linux only. package signal diff --git a/vendor/github.com/containers/common/pkg/ssh/connection_golang.go b/vendor/github.com/containers/common/pkg/ssh/connection_golang.go index 1abb5802c..ad47f63c5 100644 --- a/vendor/github.com/containers/common/pkg/ssh/connection_golang.go +++ b/vendor/github.com/containers/common/pkg/ssh/connection_golang.go @@ -23,6 +23,7 @@ import ( "golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh/agent" "golang.org/x/crypto/ssh/knownhosts" + "golang.org/x/exp/maps" ) func golangConnectionCreate(options ConnectionCreateOptions) error { @@ -52,32 +53,32 @@ func golangConnectionCreate(options ConnectionCreateOptions) error { dst.URI += uri.Path } - cfg, err := config.ReadCustomConfig() - if err != nil { - return err - } - if cfg.Engine.ServiceDestinations == nil { - cfg.Engine.ServiceDestinations = map[string]config.Destination{ - options.Name: *dst, - } - cfg.Engine.ActiveService = options.Name - } else { - cfg.Engine.ServiceDestinations[options.Name] = *dst - } - - // Create or update an existing farm with the connection being added - if options.Farm != "" { - if len(cfg.Farms.List) == 0 { - cfg.Farms.Default = options.Farm - } - if val, ok := cfg.Farms.List[options.Farm]; ok { - cfg.Farms.List[options.Farm] = append(val, options.Name) + // TODO this really should not live here, it must be in podman where we write the other connections as well. + // This duplicates the code for no reason and I have a really hard time to make any sense of why this code + // was added in the first place. + return config.EditConnectionConfig(func(cfg *config.ConnectionsFile) error { + if cfg.Connection.Connections == nil { + cfg.Connection.Connections = map[string]config.Destination{ + options.Name: *dst, + } + cfg.Connection.Default = options.Name } else { - cfg.Farms.List[options.Farm] = []string{options.Name} + cfg.Connection.Connections[options.Name] = *dst } - } - return cfg.Write() + // Create or update an existing farm with the connection being added + if options.Farm != "" { + if len(cfg.Farm.List) == 0 { + cfg.Farm.Default = options.Farm + } + if val, ok := cfg.Farm.List[options.Farm]; ok { + cfg.Farm.List[options.Farm] = append(val, options.Name) + } else { + cfg.Farm.List[options.Farm] = []string{options.Name} + } + } + return nil + }) } func golangConnectionDial(options ConnectionDialOptions) (*ConnectionDialReport, error) { @@ -98,7 +99,7 @@ func golangConnectionDial(options ConnectionDialOptions) (*ConnectionDialReport, return &ConnectionDialReport{dial}, nil } -func golangConnectionExec(options ConnectionExecOptions) (*ConnectionExecReport, error) { +func golangConnectionExec(options ConnectionExecOptions, input io.Reader) (*ConnectionExecReport, error) { if !strings.HasPrefix(options.Host, "ssh://") { options.Host = "ssh://" + options.Host } @@ -116,7 +117,7 @@ func golangConnectionExec(options ConnectionExecOptions) (*ConnectionExecReport, return nil, fmt.Errorf("failed to connect: %w", err) } - out, err := ExecRemoteCommand(dialAdd, strings.Join(options.Args, " ")) + out, err := ExecRemoteCommandWithInput(dialAdd, strings.Join(options.Args, " "), input) if err != nil { return nil, err } @@ -188,6 +189,10 @@ func golangConnectionScp(options ConnectionScpOptions) (*ConnectionScpReport, er // ExecRemoteCommand takes a ssh client connection and a command to run and executes the // command on the specified client. The function returns the Stdout from the client or the Stderr func ExecRemoteCommand(dial *ssh.Client, run string) ([]byte, error) { + return ExecRemoteCommandWithInput(dial, run, nil) +} + +func ExecRemoteCommandWithInput(dial *ssh.Client, run string, input io.Reader) ([]byte, error) { sess, err := dial.NewSession() // new ssh client session if err != nil { return nil, err @@ -196,8 +201,11 @@ func ExecRemoteCommand(dial *ssh.Client, run string) ([]byte, error) { var buffer bytes.Buffer var bufferErr bytes.Buffer - sess.Stdout = &buffer // output from client funneled into buffer - sess.Stderr = &bufferErr // err form client funneled into buffer + sess.Stdout = &buffer // output from client funneled into buffer + sess.Stderr = &bufferErr // err from client funneled into buffer + if input != nil { + sess.Stdin = input + } if err := sess.Run(run); err != nil { // run the command on the ssh client return nil, fmt.Errorf("%v: %w", bufferErr.String(), err) } @@ -262,7 +270,7 @@ func ValidateAndConfigure(uri *url.URL, iden string, insecureIsMachineConnection } } } - var authMethods []ssh.AuthMethod // now we validate and check for the authorization methods, most notaibly public key authorization + var authMethods []ssh.AuthMethod // now we validate and check for the authorization methods, most notably public key authorization if len(signers) > 0 { dedup := make(map[string]ssh.Signer) for _, s := range signers { @@ -273,10 +281,7 @@ func ValidateAndConfigure(uri *url.URL, iden string, insecureIsMachineConnection dedup[fp] = s } - var uniq []ssh.Signer - for _, s := range dedup { - uniq = append(uniq, s) - } + uniq := maps.Values(dedup) authMethods = append(authMethods, ssh.PublicKeysCallback(func() ([]ssh.Signer, error) { return uniq, nil })) @@ -286,7 +291,7 @@ func ValidateAndConfigure(uri *url.URL, iden string, insecureIsMachineConnection } if len(authMethods) == 0 { authMethods = append(authMethods, ssh.PasswordCallback(func() (string, error) { - pass, err := ReadPassword(fmt.Sprintf("%s's login password:", uri.User.Username())) + pass, err := ReadPassword(uri.User.Username() + "'s login password:") return string(pass), err })) } diff --git a/vendor/github.com/containers/common/pkg/ssh/connection_native.go b/vendor/github.com/containers/common/pkg/ssh/connection_native.go index c725cb27d..a92693ff8 100644 --- a/vendor/github.com/containers/common/pkg/ssh/connection_native.go +++ b/vendor/github.com/containers/common/pkg/ssh/connection_native.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/json" "fmt" + "io" "os/exec" "regexp" "strings" @@ -34,7 +35,7 @@ func nativeConnectionCreate(options ConnectionCreateOptions) error { // test connection ssh, err := exec.LookPath("ssh") if err != nil { - return fmt.Errorf("no ssh binary found") + return err } if strings.Contains(uri.Host, "/run") { @@ -72,27 +73,35 @@ func nativeConnectionCreate(options ConnectionCreateOptions) error { return fmt.Errorf("remote podman %q failed to report its UDS socket", uri.Host) } - cfg, err := config.ReadCustomConfig() - if err != nil { - return err - } - if options.Default { - cfg.Engine.ActiveService = options.Name - } - - if cfg.Engine.ServiceDestinations == nil { - cfg.Engine.ServiceDestinations = map[string]config.Destination{ - options.Name: *dst, + // TODO this really should not live here, it must be in podman where we write the other connections as well. + // This duplicates the code for no reason and I have a really hard time to make any sense of why this code + // was added in the first place. + return config.EditConnectionConfig(func(cfg *config.ConnectionsFile) error { + if cfg.Connection.Connections == nil { + cfg.Connection.Connections = map[string]config.Destination{ + options.Name: *dst, + } + cfg.Connection.Default = options.Name + } else { + cfg.Connection.Connections[options.Name] = *dst } - cfg.Engine.ActiveService = options.Name - } else { - cfg.Engine.ServiceDestinations[options.Name] = *dst - } - return cfg.Write() + // Create or update an existing farm with the connection being added + if options.Farm != "" { + if len(cfg.Farm.List) == 0 { + cfg.Farm.Default = options.Farm + } + if val, ok := cfg.Farm.List[options.Farm]; ok { + cfg.Farm.List[options.Farm] = append(val, options.Name) + } else { + cfg.Farm.List[options.Farm] = []string{options.Name} + } + } + return nil + }) } -func nativeConnectionExec(options ConnectionExecOptions) (*ConnectionExecReport, error) { +func nativeConnectionExec(options ConnectionExecOptions, input io.Reader) (*ConnectionExecReport, error) { dst, uri, err := Validate(options.User, options.Host, options.Port, options.Identity) if err != nil { return nil, err @@ -100,7 +109,7 @@ func nativeConnectionExec(options ConnectionExecOptions) (*ConnectionExecReport, ssh, err := exec.LookPath("ssh") if err != nil { - return nil, fmt.Errorf("no ssh binary found") + return nil, err } output := &bytes.Buffer{} @@ -126,6 +135,9 @@ func nativeConnectionExec(options ConnectionExecOptions) (*ConnectionExecReport, info := exec.Command(ssh, args...) info.Stdout = output info.Stderr = errors + if input != nil { + info.Stdin = input + } err = info.Run() if err != nil { return nil, err @@ -145,7 +157,7 @@ func nativeConnectionScp(options ConnectionScpOptions) (*ConnectionScpReport, er scp, err := exec.LookPath("scp") if err != nil { - return nil, fmt.Errorf("no scp binary found") + return nil, err } conf, err := config.Default() diff --git a/vendor/github.com/containers/common/pkg/ssh/ssh.go b/vendor/github.com/containers/common/pkg/ssh/ssh.go index d638d69ad..6e8a923ee 100644 --- a/vendor/github.com/containers/common/pkg/ssh/ssh.go +++ b/vendor/github.com/containers/common/pkg/ssh/ssh.go @@ -1,7 +1,8 @@ package ssh import ( - "fmt" + "errors" + "io" "golang.org/x/crypto/ssh" ) @@ -17,7 +18,7 @@ func Dial(options *ConnectionDialOptions, kind EngineMode) (*ssh.Client, error) var rep *ConnectionDialReport var err error if kind == NativeMode { - return nil, fmt.Errorf("ssh dial failed: you cannot create a dial-able client with native ssh") + return nil, errors.New("ssh dial failed: you cannot create a dial-able client with native ssh") } rep, err = golangConnectionDial(*options) if err != nil { @@ -27,15 +28,19 @@ func Dial(options *ConnectionDialOptions, kind EngineMode) (*ssh.Client, error) } func Exec(options *ConnectionExecOptions, kind EngineMode) (string, error) { + return ExecWithInput(options, kind, nil) +} + +func ExecWithInput(options *ConnectionExecOptions, kind EngineMode, input io.Reader) (string, error) { var rep *ConnectionExecReport var err error if kind == NativeMode { - rep, err = nativeConnectionExec(*options) + rep, err = nativeConnectionExec(*options, input) if err != nil { return "", err } } else { - rep, err = golangConnectionExec(*options) + rep, err = golangConnectionExec(*options, input) if err != nil { return "", err } diff --git a/vendor/github.com/containers/common/pkg/ssh/types.go b/vendor/github.com/containers/common/pkg/ssh/types.go index 600655931..bc41d78bf 100644 --- a/vendor/github.com/containers/common/pkg/ssh/types.go +++ b/vendor/github.com/containers/common/pkg/ssh/types.go @@ -70,11 +70,11 @@ type ConnectionScpReport struct { // Info is the overall struct that describes the host system // running libpod/podman type Info struct { - Host *HostInfo `json:"host"` - Store *StoreInfo `json:"store"` - Registries map[string]interface{} `json:"registries"` - Plugins Plugins `json:"plugins"` - Version Version `json:"version"` + Host *HostInfo `json:"host"` + Store *StoreInfo `json:"store"` + Registries map[string]any `json:"registries"` + Plugins Plugins `json:"plugins"` + Version Version `json:"version"` } // Version is an output struct for API @@ -121,8 +121,8 @@ type HostInfo struct { OCIRuntime *OCIRuntimeInfo `json:"ociRuntime"` OS string `json:"os"` // RemoteSocket returns the UNIX domain socket the Podman service is listening on - RemoteSocket *RemoteSocket `json:"remoteSocket,omitempty"` - RuntimeInfo map[string]interface{} `json:"runtimeInfo,omitempty"` + RemoteSocket *RemoteSocket `json:"remoteSocket,omitempty"` + RuntimeInfo map[string]any `json:"runtimeInfo,omitempty"` // ServiceIsRemote is true when the podman/libpod service is remote to the client ServiceIsRemote bool `json:"serviceIsRemote"` Security SecurityInfo `json:"security"` @@ -179,11 +179,11 @@ type OCIRuntimeInfo struct { // StoreInfo describes the container storage and its // attributes type StoreInfo struct { - ConfigFile string `json:"configFile"` - ContainerStore ContainerStore `json:"containerStore"` - GraphDriverName string `json:"graphDriverName"` - GraphOptions map[string]interface{} `json:"graphOptions"` - GraphRoot string `json:"graphRoot"` + ConfigFile string `json:"configFile"` + ContainerStore ContainerStore `json:"containerStore"` + GraphDriverName string `json:"graphDriverName"` + GraphOptions map[string]any `json:"graphOptions"` + GraphRoot string `json:"graphRoot"` // GraphRootAllocated is how much space the graphroot has in bytes GraphRootAllocated uint64 `json:"graphRootAllocated"` // GraphRootUsed is how much of graphroot is used in bytes diff --git a/vendor/github.com/containers/common/pkg/ssh/utils.go b/vendor/github.com/containers/common/pkg/ssh/utils.go index d47a9859c..0aa76d653 100644 --- a/vendor/github.com/containers/common/pkg/ssh/utils.go +++ b/vendor/github.com/containers/common/pkg/ssh/utils.go @@ -1,6 +1,7 @@ package ssh import ( + "errors" "fmt" "io" "net" @@ -80,7 +81,7 @@ func ReadPassword(prompt string) (pw []byte, err error) { pw = append(pw, b[0]) // limit size, so that a wrong input won't fill up the memory if len(pw) > 1024 { - err = fmt.Errorf("password too long, 1024 byte limit") + err = errors.New("password too long, 1024 byte limit") } } if err != nil { @@ -156,7 +157,7 @@ func ParseScpArgs(options ConnectionScpOptions) (string, string, string, bool, e } else { split = strings.Split(host, ":") if len(split) != 2 { - return "", "", "", false, fmt.Errorf("no remote destination provided") + return "", "", "", false, errors.New("no remote destination provided") } host = split[0] remotePath = split[1] diff --git a/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go b/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go index 6ba2154a7..6845914aa 100644 --- a/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go +++ b/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go @@ -212,7 +212,7 @@ func MountsWithUIDGID(mountLabel, containerRunDir, mountFile, mountPoint string, } func rchown(chowndir string, uid, gid int) error { - return filepath.Walk(chowndir, func(filePath string, f os.FileInfo, err error) error { + return filepath.Walk(chowndir, func(filePath string, _ os.FileInfo, err error) error { return os.Lchown(filePath, uid, gid) }) } @@ -231,7 +231,7 @@ func addSubscriptionsFromMountsFile(filePath, mountLabel, containerRunDir string fileInfo, err := os.Stat(hostDirOrFile) if err != nil { if errors.Is(err, os.ErrNotExist) { - logrus.Warnf("Path %q from %q doesn't exist, skipping", hostDirOrFile, filePath) + logrus.Infof("Path %q from %q doesn't exist, skipping", hostDirOrFile, filePath) continue } return nil, err diff --git a/vendor/github.com/containers/common/pkg/supplemented/supplemented.go b/vendor/github.com/containers/common/pkg/supplemented/supplemented.go index 6ae9a4160..c21a6aa52 100644 --- a/vendor/github.com/containers/common/pkg/supplemented/supplemented.go +++ b/vendor/github.com/containers/common/pkg/supplemented/supplemented.go @@ -3,6 +3,7 @@ package supplemented import ( "container/list" "context" + "errors" "fmt" "io" @@ -14,6 +15,7 @@ import ( multierror "github.com/hashicorp/go-multierror" digest "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" ) // supplementedImageReference groups multiple references together. @@ -139,7 +141,7 @@ func (s *supplementedImageReference) NewImageSource(ctx context.Context, sys *ty } sources[manifestDigest] = src - // Parse the manifest as a list of images. + // Parse the manifest as a list of images and artifacts. list, err := manifest.ListFromBlob(manifestBytes, manifestType) if err != nil { return fmt.Errorf("parsing manifest blob %q as a %q: %w", string(manifestBytes), manifestType, err) @@ -155,7 +157,11 @@ func (s *supplementedImageReference) NewImageSource(ctx context.Context, sys *ty } chaseInstances = []digest.Digest{instance} case cp.CopySpecificImages: - chaseInstances = s.instances + for _, instance := range list.Instances() { + if slices.Contains(s.instances, instance) { + chaseInstances = append(chaseInstances, instance) + } + } case cp.CopyAllImages: chaseInstances = list.Instances() } @@ -281,7 +287,7 @@ func (s *supplementedImageReference) NewImageSource(ctx context.Context, sys *ty } func (s *supplementedImageReference) DeleteImage(_ context.Context, _ *types.SystemContext) error { - return fmt.Errorf("deletion of images not implemented") + return errors.New("deletion of images not implemented") } func (s *supplementedImageSource) Close() error { diff --git a/vendor/github.com/containers/common/pkg/sysinfo/numcpu_linux.go b/vendor/github.com/containers/common/pkg/sysinfo/numcpu_linux.go index d77e52f60..5e680957b 100644 --- a/vendor/github.com/containers/common/pkg/sysinfo/numcpu_linux.go +++ b/vendor/github.com/containers/common/pkg/sysinfo/numcpu_linux.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package sysinfo diff --git a/vendor/github.com/containers/common/pkg/sysinfo/numcpu_other.go b/vendor/github.com/containers/common/pkg/sysinfo/numcpu_other.go index 26c543c4a..fdb0586e5 100644 --- a/vendor/github.com/containers/common/pkg/sysinfo/numcpu_other.go +++ b/vendor/github.com/containers/common/pkg/sysinfo/numcpu_other.go @@ -1,5 +1,4 @@ //go:build !linux && !windows -// +build !linux,!windows package sysinfo diff --git a/vendor/github.com/containers/common/pkg/sysinfo/numcpu_windows.go b/vendor/github.com/containers/common/pkg/sysinfo/numcpu_windows.go index 9f354eb10..13c42d280 100644 --- a/vendor/github.com/containers/common/pkg/sysinfo/numcpu_windows.go +++ b/vendor/github.com/containers/common/pkg/sysinfo/numcpu_windows.go @@ -1,5 +1,4 @@ //go:build windows -// +build windows package sysinfo diff --git a/vendor/github.com/containers/common/pkg/sysinfo/nummem_linux.go b/vendor/github.com/containers/common/pkg/sysinfo/nummem_linux.go index 018c488be..739a7ffb1 100644 --- a/vendor/github.com/containers/common/pkg/sysinfo/nummem_linux.go +++ b/vendor/github.com/containers/common/pkg/sysinfo/nummem_linux.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package sysinfo diff --git a/vendor/github.com/containers/common/pkg/sysinfo/nummem_unsupported.go b/vendor/github.com/containers/common/pkg/sysinfo/nummem_unsupported.go index c9e4184aa..2b6bbaf2d 100644 --- a/vendor/github.com/containers/common/pkg/sysinfo/nummem_unsupported.go +++ b/vendor/github.com/containers/common/pkg/sysinfo/nummem_unsupported.go @@ -1,5 +1,4 @@ //go:build (windows && ignore) || osx -// +build windows,ignore osx package sysinfo diff --git a/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_solaris.go b/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_solaris.go index af1c77d60..bf4b3104e 100644 --- a/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_solaris.go +++ b/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_solaris.go @@ -1,5 +1,4 @@ //go:build solaris && cgo -// +build solaris,cgo package sysinfo diff --git a/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_unix.go b/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_unix.go index 4aa9401f6..ce6ac0232 100644 --- a/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_unix.go +++ b/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_unix.go @@ -1,5 +1,4 @@ //go:build !linux && !solaris && !windows -// +build !linux,!solaris,!windows package sysinfo diff --git a/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_windows.go b/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_windows.go index 455a8892f..e89d18a06 100644 --- a/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_windows.go +++ b/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_windows.go @@ -1,5 +1,4 @@ //go:build windows -// +build windows package sysinfo diff --git a/vendor/github.com/containers/common/pkg/systemd/systemd_linux.go b/vendor/github.com/containers/common/pkg/systemd/systemd_linux.go new file mode 100644 index 000000000..c1d8ed72e --- /dev/null +++ b/vendor/github.com/containers/common/pkg/systemd/systemd_linux.go @@ -0,0 +1,151 @@ +package systemd + +import ( + "context" + "crypto/rand" + "fmt" + "os" + "strconv" + "sync" + + "github.com/containers/common/pkg/cgroups" + "github.com/containers/storage/pkg/unshare" + systemdDbus "github.com/coreos/go-systemd/v22/dbus" + "github.com/godbus/dbus/v5" + "github.com/sirupsen/logrus" +) + +var ( + runsOnSystemdOnce sync.Once + runsOnSystemd bool +) + +// RunsOnSystemd returns whether the system is using systemd +func RunsOnSystemd() bool { + runsOnSystemdOnce.Do(func() { + // per sd_booted(3), check for this dir + fd, err := os.Stat("/run/systemd/system") + runsOnSystemd = err == nil && fd.IsDir() + }) + return runsOnSystemd +} + +func moveProcessPIDFileToScope(pidPath, slice, scope string) error { + data, err := os.ReadFile(pidPath) + if err != nil { + // do not raise an error if the file doesn't exist + if os.IsNotExist(err) { + return nil + } + return fmt.Errorf("cannot read pid file: %w", err) + } + pid, err := strconv.ParseUint(string(data), 10, 0) + if err != nil { + return fmt.Errorf("cannot parse pid file %s: %w", pidPath, err) + } + + return moveProcessToScope(int(pid), slice, scope) +} + +func moveProcessToScope(pid int, slice, scope string) error { + err := RunUnderSystemdScope(pid, slice, scope) + // If the PID is not valid anymore, do not return an error. + if dbusErr, ok := err.(dbus.Error); ok { + if dbusErr.Name == "org.freedesktop.DBus.Error.UnixProcessIdUnknown" { + return nil + } + } + return err +} + +// MoveRootlessNetnsSlirpProcessToUserSlice moves the slirp4netns process for the rootless netns +// into a different scope so that systemd does not kill it with a container. +func MoveRootlessNetnsSlirpProcessToUserSlice(pid int) error { + randBytes := make([]byte, 4) + _, err := rand.Read(randBytes) + if err != nil { + return err + } + return moveProcessToScope(pid, "user.slice", fmt.Sprintf("rootless-netns-%x.scope", randBytes)) +} + +// MovePauseProcessToScope moves the pause process used for rootless mode to keep the namespaces alive to +// a separate scope. +func MovePauseProcessToScope(pausePidPath string) { + var err error + + for i := 0; i < 10; i++ { + randBytes := make([]byte, 4) + _, err = rand.Read(randBytes) + if err != nil { + logrus.Errorf("failed to read random bytes: %v", err) + continue + } + err = moveProcessPIDFileToScope(pausePidPath, "user.slice", fmt.Sprintf("podman-pause-%x.scope", randBytes)) + if err == nil { + return + } + } + + if err != nil { + unified, err2 := cgroups.IsCgroup2UnifiedMode() + if err2 != nil { + logrus.Warnf("Failed to detect if running with cgroup unified: %v", err) + } + if RunsOnSystemd() && unified { + logrus.Warnf("Failed to add pause process to systemd sandbox cgroup: %v", err) + } else { + logrus.Debugf("Failed to add pause process to systemd sandbox cgroup: %v", err) + } + } +} + +// RunUnderSystemdScope adds the specified pid to a systemd scope +func RunUnderSystemdScope(pid int, slice string, unitName string) error { + var properties []systemdDbus.Property + var conn *systemdDbus.Conn + var err error + + if unshare.GetRootlessUID() != 0 { + conn, err = cgroups.UserConnection(unshare.GetRootlessUID()) + if err != nil { + return err + } + } else { + conn, err = systemdDbus.NewWithContext(context.Background()) + if err != nil { + return err + } + } + defer conn.Close() + properties = append(properties, systemdDbus.PropSlice(slice)) + properties = append(properties, newProp("PIDs", []uint32{uint32(pid)})) + properties = append(properties, newProp("Delegate", true)) + properties = append(properties, newProp("DefaultDependencies", false)) + ch := make(chan string) + _, err = conn.StartTransientUnitContext(context.Background(), unitName, "replace", properties, ch) + if err != nil { + // On errors check if the cgroup already exists, if it does move the process there + if props, err := conn.GetUnitTypePropertiesContext(context.Background(), unitName, "Scope"); err == nil { + if cgroup, ok := props["ControlGroup"].(string); ok && cgroup != "" { + if err := cgroups.MoveUnderCgroup(cgroup, "", []uint32{uint32(pid)}); err == nil { + return nil + } + // On errors return the original error message we got from StartTransientUnit. + } + } + return err + } + + // Block until job is started + <-ch + + return nil +} + +func newProp(name string, units any) systemdDbus.Property { + return systemdDbus.Property{ + Name: name, + Value: dbus.MakeVariant(units), + } +} diff --git a/vendor/github.com/containers/common/pkg/systemd/systemd_unsupported.go b/vendor/github.com/containers/common/pkg/systemd/systemd_unsupported.go new file mode 100644 index 000000000..e4a628527 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/systemd/systemd_unsupported.go @@ -0,0 +1,15 @@ +//go:build !linux + +package systemd + +import "errors" + +func RunsOnSystemd() bool { + return false +} + +func MovePauseProcessToScope(pausePidPath string) {} + +func RunUnderSystemdScope(pid int, slice string, unitName string) error { + return errors.New("RunUnderSystemdScope not supported on this OS") +} diff --git a/vendor/github.com/containers/common/pkg/timezone/timezone.go b/vendor/github.com/containers/common/pkg/timezone/timezone.go new file mode 100644 index 000000000..4586e0e1e --- /dev/null +++ b/vendor/github.com/containers/common/pkg/timezone/timezone.go @@ -0,0 +1,103 @@ +//go:build !windows + +package timezone + +import ( + "errors" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + + securejoin "github.com/cyphar/filepath-securejoin" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// ConfigureContainerTimeZone configure the time zone for a container. +// It returns the path of the created /etc/localtime file if needed. +func ConfigureContainerTimeZone(timezone, containerRunDir, mountPoint, etcPath, containerID string) (localTimePath string, err error) { + var timezonePath string + switch { + case timezone == "": + return "", nil + case os.Getenv("TZDIR") != "": + // Allow using TZDIR per: + // https://sourceware.org/git/?p=glibc.git;a=blob;f=time/tzfile.c;h=8a923d0cccc927a106dc3e3c641be310893bab4e;hb=HEAD#l149 + + timezonePath = filepath.Join(os.Getenv("TZDIR"), timezone) + case timezone == "local": + timezonePath, err = filepath.EvalSymlinks("/etc/localtime") + if err != nil { + return "", fmt.Errorf("finding local timezone for container %s: %w", containerID, err) + } + default: + timezonePath = filepath.Join("/usr/share/zoneinfo", timezone) + } + + etcFd, err := openDirectory(etcPath) + if err != nil { + return "", fmt.Errorf("open /etc in the container: %w", err) + } + defer unix.Close(etcFd) + + // Make sure to remove any existing localtime file in the container to not create invalid links + err = unix.Unlinkat(etcFd, "localtime", 0) + if err != nil && !errors.Is(err, fs.ErrNotExist) { + return "", fmt.Errorf("removing /etc/localtime: %w", err) + } + + hostPath, err := securejoin.SecureJoin(mountPoint, timezonePath) + if err != nil { + return "", fmt.Errorf("resolve zoneinfo path in the container: %w", err) + } + + var localtimePath string + if _, err := os.Stat(hostPath); err != nil { + // File does not exist, which means tzdata is not installed in the container. + // Create /etc/localtime as a copy from the host. + logrus.Debugf("Timezone %s does not exist in the container, create our own copy from the host", timezonePath) + localtimePath, err = copyTimezoneFile(containerRunDir, timezonePath) + if err != nil { + return "", fmt.Errorf("setting timezone for container %s: %w", containerID, err) + } + } else { + // File exists, let's create a symlink according to localtime(5) + logrus.Debugf("Create localtime symlink for %s", timezonePath) + err = unix.Symlinkat(".."+timezonePath, etcFd, "localtime") + if err != nil { + return "", fmt.Errorf("creating /etc/localtime symlink: %w", err) + } + } + return localtimePath, nil +} + +// copyTimezoneFile copies the timezone file from the host to the container. +func copyTimezoneFile(containerRunDir, zonePath string) (string, error) { + localtimeCopy := filepath.Join(containerRunDir, "localtime") + file, err := os.Stat(zonePath) + if err != nil { + return "", err + } + if file.IsDir() { + return "", errors.New("invalid timezone: is a directory") + } + src, err := os.Open(zonePath) + if err != nil { + return "", err + } + defer src.Close() + + dest, err := os.Create(localtimeCopy) + if err != nil { + return "", err + } + defer dest.Close() + + _, err = io.Copy(dest, src) + if err != nil { + return "", err + } + return localtimeCopy, err +} diff --git a/vendor/github.com/containers/common/pkg/timezone/timezone_linux.go b/vendor/github.com/containers/common/pkg/timezone/timezone_linux.go new file mode 100644 index 000000000..ef096af59 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/timezone/timezone_linux.go @@ -0,0 +1,9 @@ +package timezone + +import ( + "golang.org/x/sys/unix" +) + +func openDirectory(path string) (fd int, err error) { + return unix.Open(path, unix.O_RDONLY|unix.O_PATH|unix.O_CLOEXEC, 0) +} diff --git a/vendor/github.com/containers/common/pkg/timezone/timezone_unix.go b/vendor/github.com/containers/common/pkg/timezone/timezone_unix.go new file mode 100644 index 000000000..bb57036f8 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/timezone/timezone_unix.go @@ -0,0 +1,12 @@ +//go:build !windows && !linux + +package timezone + +import ( + "golang.org/x/sys/unix" +) + +func openDirectory(path string) (fd int, err error) { + const O_PATH = 0x00400000 + return unix.Open(path, unix.O_RDONLY|O_PATH|unix.O_CLOEXEC, 0) +} diff --git a/vendor/github.com/containers/common/pkg/timezone/timezone_windows.go b/vendor/github.com/containers/common/pkg/timezone/timezone_windows.go new file mode 100644 index 000000000..d89090eeb --- /dev/null +++ b/vendor/github.com/containers/common/pkg/timezone/timezone_windows.go @@ -0,0 +1,5 @@ +package timezone + +func ConfigureContainerTimeZone(timezone, containerRunDir, mountPoint, etcPath, containerID string) (string, error) { + return "", nil +} diff --git a/vendor/github.com/containers/common/pkg/umask/umask_unix.go b/vendor/github.com/containers/common/pkg/umask/umask_unix.go index 4f5527cb6..caa60c8e6 100644 --- a/vendor/github.com/containers/common/pkg/umask/umask_unix.go +++ b/vendor/github.com/containers/common/pkg/umask/umask_unix.go @@ -1,5 +1,4 @@ //go:build linux || darwin -// +build linux darwin package umask diff --git a/vendor/github.com/containers/common/pkg/umask/umask_unsupported.go b/vendor/github.com/containers/common/pkg/umask/umask_unsupported.go index cf76ea1d3..547dea868 100644 --- a/vendor/github.com/containers/common/pkg/umask/umask_unsupported.go +++ b/vendor/github.com/containers/common/pkg/umask/umask_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux && !darwin -// +build !linux,!darwin package umask diff --git a/vendor/github.com/containers/common/pkg/util/util.go b/vendor/github.com/containers/common/pkg/util/util.go index 708472bac..8229296d7 100644 --- a/vendor/github.com/containers/common/pkg/util/util.go +++ b/vendor/github.com/containers/common/pkg/util/util.go @@ -9,16 +9,14 @@ import ( "github.com/fsnotify/fsnotify" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" ) -// StringInSlice determines if a string is in a string slice, returns bool +// StringInSlice determines if a string is in a string slice, returns bool. +// +// Deprecated: Use [golang.org/x/exp/slices.Contains] instead. func StringInSlice(s string, sl []string) bool { - for _, i := range sl { - if i == s { - return true - } - } - return false + return slices.Contains(sl, s) } // StringMatchRegexSlice determines if a given string matches one of the given regexes, returns bool diff --git a/vendor/github.com/containers/common/pkg/util/util_supported.go b/vendor/github.com/containers/common/pkg/util/util_supported.go deleted file mode 100644 index 0cd53af53..000000000 --- a/vendor/github.com/containers/common/pkg/util/util_supported.go +++ /dev/null @@ -1,91 +0,0 @@ -//go:build linux || darwin || freebsd -// +build linux darwin freebsd - -package util - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "sync" - "syscall" - - "github.com/containers/storage/pkg/homedir" - "github.com/containers/storage/pkg/unshare" - "github.com/sirupsen/logrus" -) - -var ( - rootlessRuntimeDirOnce sync.Once - rootlessRuntimeDir string -) - -// isWriteableOnlyByOwner checks that the specified permission mask allows write -// access only to the owner. -func isWriteableOnlyByOwner(perm os.FileMode) bool { - return (perm & 0o722) == 0o700 -} - -// GetRuntimeDir returns the runtime directory -func GetRuntimeDir() (string, error) { - var rootlessRuntimeDirError error - - rootlessRuntimeDirOnce.Do(func() { - runtimeDir, err := homedir.GetRuntimeDir() - if err != nil { - logrus.Debug(err) - } - if runtimeDir != "" { - st, err := os.Stat(runtimeDir) - if err != nil { - rootlessRuntimeDirError = err - return - } - if int(st.Sys().(*syscall.Stat_t).Uid) != os.Geteuid() { - rootlessRuntimeDirError = fmt.Errorf("XDG_RUNTIME_DIR directory %q is not owned by the current user", runtimeDir) - return - } - } - uid := fmt.Sprintf("%d", unshare.GetRootlessUID()) - if runtimeDir == "" { - tmpDir := filepath.Join("/run", "user", uid) - if err := os.MkdirAll(tmpDir, 0o700); err != nil { - logrus.Debugf("unable to make temp dir: %v", err) - } - st, err := os.Stat(tmpDir) - if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && isWriteableOnlyByOwner(st.Mode().Perm()) { - runtimeDir = tmpDir - } - } - if runtimeDir == "" { - tmpDir := filepath.Join(os.TempDir(), fmt.Sprintf("podman-run-%s", uid)) - if err := os.MkdirAll(tmpDir, 0o700); err != nil { - logrus.Debugf("unable to make temp dir %v", err) - } - st, err := os.Stat(tmpDir) - if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && isWriteableOnlyByOwner(st.Mode().Perm()) { - runtimeDir = tmpDir - } - } - if runtimeDir == "" { - home := os.Getenv("HOME") - if home == "" { - rootlessRuntimeDirError = errors.New("neither XDG_RUNTIME_DIR nor HOME was set non-empty") - return - } - resolvedHome, err := filepath.EvalSymlinks(home) - if err != nil { - rootlessRuntimeDirError = fmt.Errorf("cannot resolve home: %w", err) - return - } - runtimeDir = filepath.Join(resolvedHome, "rundir") - } - rootlessRuntimeDir = runtimeDir - }) - - if rootlessRuntimeDirError != nil { - return "", rootlessRuntimeDirError - } - return rootlessRuntimeDir, nil -} diff --git a/vendor/github.com/containers/common/pkg/util/util_windows.go b/vendor/github.com/containers/common/pkg/util/util_windows.go deleted file mode 100644 index 1525bdc34..000000000 --- a/vendor/github.com/containers/common/pkg/util/util_windows.go +++ /dev/null @@ -1,13 +0,0 @@ -//go:build windows -// +build windows - -package util - -import ( - "errors" -) - -// getRuntimeDir returns the runtime directory -func GetRuntimeDir() (string, error) { - return "", errors.New("this function is not implemented for windows") -} diff --git a/vendor/github.com/containers/common/version/version.go b/vendor/github.com/containers/common/version/version.go index 19ba92c0f..cd77bf892 100644 --- a/vendor/github.com/containers/common/version/version.go +++ b/vendor/github.com/containers/common/version/version.go @@ -1,4 +1,4 @@ package version // Version is the version of the build. -const Version = "0.57.4" +const Version = "0.58.0" diff --git a/vendor/github.com/containers/image/v5/copy/compression.go b/vendor/github.com/containers/image/v5/copy/compression.go index a42e3b67a..1706f7116 100644 --- a/vendor/github.com/containers/image/v5/copy/compression.go +++ b/vendor/github.com/containers/image/v5/copy/compression.go @@ -23,9 +23,9 @@ var ( // compressionBufferSize is the buffer size used to compress a blob compressionBufferSize = 1048576 - // expectedCompressionFormats is used to check if a blob with a specified media type is compressed + // expectedBaseCompressionFormats is used to check if a blob with a specified media type is compressed // using the algorithm that the media type says it should be compressed with - expectedCompressionFormats = map[string]*compressiontypes.Algorithm{ + expectedBaseCompressionFormats = map[string]*compressiontypes.Algorithm{ imgspecv1.MediaTypeImageLayerGzip: &compression.Gzip, imgspecv1.MediaTypeImageLayerZstd: &compression.Zstd, manifest.DockerV2Schema2LayerMediaType: &compression.Gzip, @@ -62,15 +62,16 @@ func blobPipelineDetectCompressionStep(stream *sourceStream, srcInfo types.BlobI res.srcCompressorName = internalblobinfocache.Uncompressed } - if expectedFormat, known := expectedCompressionFormats[stream.info.MediaType]; known && res.isCompressed && format.Name() != expectedFormat.Name() { - logrus.Debugf("blob %s with type %s should be compressed with %s, but compressor appears to be %s", srcInfo.Digest.String(), srcInfo.MediaType, expectedFormat.Name(), format.Name()) + if expectedBaseFormat, known := expectedBaseCompressionFormats[stream.info.MediaType]; known && res.isCompressed && format.BaseVariantName() != expectedBaseFormat.Name() { + logrus.Debugf("blob %s with type %s should be compressed with %s, but compressor appears to be %s", srcInfo.Digest.String(), srcInfo.MediaType, expectedBaseFormat.Name(), format.Name()) } return res, nil } // bpCompressionStepData contains data that the copy pipeline needs about the compression step. type bpCompressionStepData struct { - operation types.LayerCompression // Operation to use for updating the blob metadata. + operation bpcOperation // What we are actually doing + uploadedOperation types.LayerCompression // Operation to use for updating the blob metadata (matching the end state, not necessarily what we do) uploadedAlgorithm *compressiontypes.Algorithm // An algorithm parameter for the compressionOperation edits. uploadedAnnotations map[string]string // Annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed. srcCompressorName string // Compressor name to record in the blob info cache for the source blob. @@ -78,6 +79,18 @@ type bpCompressionStepData struct { closers []io.Closer // Objects to close after the upload is done, if any. } +type bpcOperation int + +const ( + bpcOpInvalid bpcOperation = iota + bpcOpPreserveOpaque // We are preserving something where compression is not applicable + bpcOpPreserveCompressed // We are preserving a compressed, and decompressible, layer + bpcOpPreserveUncompressed // We are preserving an uncompressed, and compressible, layer + bpcOpCompressUncompressed // We are compressing uncompressed data + bpcOpRecompressCompressed // We are recompressing compressed data + bpcOpDecompressCompressed // We are decompressing compressed data +) + // blobPipelineCompressionStep updates *stream to compress and/or decompress it. // srcInfo is primarily used for error messages. // Returns data for other steps; the caller should eventually call updateCompressionEdits and perhaps recordValidatedBlobData, @@ -112,10 +125,11 @@ func (ic *imageCopier) blobPipelineCompressionStep(stream *sourceStream, canModi // bpcPreserveEncrypted checks if the input is encrypted, and returns a *bpCompressionStepData if so. func (ic *imageCopier) bpcPreserveEncrypted(stream *sourceStream, _ bpDetectCompressionStepData) (*bpCompressionStepData, error) { if isOciEncrypted(stream.info.MediaType) { + // We can’t do anything with an encrypted blob unless decrypted. logrus.Debugf("Using original blob without modification for encrypted blob") - // PreserveOriginal due to any compression not being able to be done on an encrypted blob unless decrypted return &bpCompressionStepData{ - operation: types.PreserveOriginal, + operation: bpcOpPreserveOpaque, + uploadedOperation: types.PreserveOriginal, uploadedAlgorithm: nil, srcCompressorName: internalblobinfocache.UnknownCompression, uploadedCompressorName: internalblobinfocache.UnknownCompression, @@ -143,7 +157,8 @@ func (ic *imageCopier) bpcCompressUncompressed(stream *sourceStream, detected bp Size: -1, } return &bpCompressionStepData{ - operation: types.Compress, + operation: bpcOpCompressUncompressed, + uploadedOperation: types.Compress, uploadedAlgorithm: uploadedAlgorithm, uploadedAnnotations: annotations, srcCompressorName: detected.srcCompressorName, @@ -157,7 +172,8 @@ func (ic *imageCopier) bpcCompressUncompressed(stream *sourceStream, detected bp // bpcRecompressCompressed checks if we should be recompressing a compressed input to another format, and returns a *bpCompressionStepData if so. func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) { if ic.c.dest.DesiredLayerCompression() == types.Compress && detected.isCompressed && - ic.compressionFormat != nil && ic.compressionFormat.Name() != detected.format.Name() { + ic.compressionFormat != nil && + (ic.compressionFormat.Name() != detected.format.Name() && ic.compressionFormat.Name() != detected.format.BaseVariantName()) { // When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally // re-compressed using the desired format. logrus.Debugf("Blob will be converted") @@ -182,7 +198,8 @@ func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bp } succeeded = true return &bpCompressionStepData{ - operation: types.PreserveOriginal, + operation: bpcOpRecompressCompressed, + uploadedOperation: types.PreserveOriginal, uploadedAlgorithm: ic.compressionFormat, uploadedAnnotations: annotations, srcCompressorName: detected.srcCompressorName, @@ -208,7 +225,8 @@ func (ic *imageCopier) bpcDecompressCompressed(stream *sourceStream, detected bp Size: -1, } return &bpCompressionStepData{ - operation: types.Decompress, + operation: bpcOpDecompressCompressed, + uploadedOperation: types.Decompress, uploadedAlgorithm: nil, srcCompressorName: detected.srcCompressorName, uploadedCompressorName: internalblobinfocache.Uncompressed, @@ -232,14 +250,26 @@ func (ic *imageCopier) bpcPreserveOriginal(_ *sourceStream, detected bpDetectCom // But don’t touch blobs in objects where we can’t change compression, // so that src.UpdatedImage() doesn’t fail; assume that for such blobs // LayerInfosForCopy() should not be making any changes in the first place. + var bpcOp bpcOperation + var uploadedOp types.LayerCompression var algorithm *compressiontypes.Algorithm - if layerCompressionChangeSupported && detected.isCompressed { + switch { + case !layerCompressionChangeSupported: + bpcOp = bpcOpPreserveOpaque + uploadedOp = types.PreserveOriginal + algorithm = nil + case detected.isCompressed: + bpcOp = bpcOpPreserveCompressed + uploadedOp = types.PreserveOriginal algorithm = &detected.format - } else { + default: + bpcOp = bpcOpPreserveUncompressed + uploadedOp = types.Decompress algorithm = nil } return &bpCompressionStepData{ - operation: types.PreserveOriginal, + operation: bpcOp, + uploadedOperation: uploadedOp, uploadedAlgorithm: algorithm, srcCompressorName: detected.srcCompressorName, uploadedCompressorName: detected.srcCompressorName, @@ -248,7 +278,7 @@ func (ic *imageCopier) bpcPreserveOriginal(_ *sourceStream, detected bpDetectCom // updateCompressionEdits sets *operation, *algorithm and updates *annotations, if necessary. func (d *bpCompressionStepData) updateCompressionEdits(operation *types.LayerCompression, algorithm **compressiontypes.Algorithm, annotations *map[string]string) { - *operation = d.operation + *operation = d.uploadedOperation // If we can modify the layer's blob, set the desired algorithm for it to be set in the manifest. *algorithm = d.uploadedAlgorithm if *annotations == nil { @@ -257,7 +287,8 @@ func (d *bpCompressionStepData) updateCompressionEdits(operation *types.LayerCom maps.Copy(*annotations, d.uploadedAnnotations) } -// recordValidatedBlobData updates b.blobInfoCache with data about the created uploadedInfo adnd the original srcInfo. +// recordValidatedBlobData updates b.blobInfoCache with data about the created uploadedInfo (as returned by PutBlob) +// and the original srcInfo (which the caller guarantees has been validated). // This must ONLY be called if all data has been validated by OUR code, and is not coming from third parties. func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInfo types.BlobInfo, srcInfo types.BlobInfo, encryptionStep *bpEncryptionStepData, decryptionStep *bpDecryptionStepData) error { @@ -268,17 +299,26 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf // in the blob info cache (which would probably be necessary for any more complex logic), // and the simplicity is attractive. if !encryptionStep.encrypting && !decryptionStep.decrypting { - // If d.operation != types.PreserveOriginal, we now have two reliable digest values: + // If d.operation != bpcOpPreserve*, we now have two reliable digest values: // srcinfo.Digest describes the pre-d.operation input, verified by digestingReader // uploadedInfo.Digest describes the post-d.operation output, computed by PutBlob - // (because stream.info.Digest == "", this must have been computed afresh). + // (because we set stream.info.Digest == "", this must have been computed afresh). switch d.operation { - case types.PreserveOriginal: - break // Do nothing, we have only one digest and we might not have even verified it. - case types.Compress: + case bpcOpPreserveOpaque: + // No useful information + case bpcOpCompressUncompressed: c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest) - case types.Decompress: + case bpcOpDecompressCompressed: c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest) + case bpcOpRecompressCompressed, bpcOpPreserveCompressed: + // We know one or two compressed digests. BlobInfoCache associates compression variants via the uncompressed digest, + // and we don’t know that one. + // That also means that repeated copies with the same recompression don’t identify reuse opportunities (unless + // RecordDigestUncompressedPair was called for both compressed variants for some other reason). + case bpcOpPreserveUncompressed: + c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, srcInfo.Digest) + case bpcOpInvalid: + fallthrough default: return fmt.Errorf("Internal error: Unexpected d.operation value %#v", d.operation) } @@ -286,7 +326,7 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf if d.uploadedCompressorName != "" && d.uploadedCompressorName != internalblobinfocache.UnknownCompression { if d.uploadedCompressorName != compressiontypes.ZstdChunkedAlgorithmName { // HACK: Don’t record zstd:chunked algorithms. - // There is already a similar hack in internal/imagedestination/impl/helpers.BlobMatchesRequiredCompression, + // There is already a similar hack in internal/imagedestination/impl/helpers.CandidateMatchesTryReusingBlobOptions, // and that one prevents reusing zstd:chunked blobs, so recording the algorithm here would be mostly harmless. // // We skip that here anyway to work around the inability of blobPipelineDetectCompressionStep to differentiate diff --git a/vendor/github.com/containers/image/v5/copy/digesting_reader.go b/vendor/github.com/containers/image/v5/copy/digesting_reader.go index 901d10826..4c6ba82ee 100644 --- a/vendor/github.com/containers/image/v5/copy/digesting_reader.go +++ b/vendor/github.com/containers/image/v5/copy/digesting_reader.go @@ -23,11 +23,11 @@ type digestingReader struct { func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) { var digester digest.Digester if err := expectedDigest.Validate(); err != nil { - return nil, fmt.Errorf("Invalid digest specification %s", expectedDigest) + return nil, fmt.Errorf("invalid digest specification %q: %w", expectedDigest, err) } digestAlgorithm := expectedDigest.Algorithm() if !digestAlgorithm.Available() { - return nil, fmt.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm) + return nil, fmt.Errorf("invalid digest specification %q: unsupported digest algorithm %q", expectedDigest, digestAlgorithm) } digester = digestAlgorithm.Digester() diff --git a/vendor/github.com/containers/image/v5/copy/manifest.go b/vendor/github.com/containers/image/v5/copy/manifest.go index 6f01cf5cc..60ea92aae 100644 --- a/vendor/github.com/containers/image/v5/copy/manifest.go +++ b/vendor/github.com/containers/image/v5/copy/manifest.go @@ -6,8 +6,10 @@ import ( "fmt" "strings" + internalManifest "github.com/containers/image/v5/internal/manifest" "github.com/containers/image/v5/internal/set" "github.com/containers/image/v5/manifest" + compressiontypes "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/types" v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" @@ -19,8 +21,8 @@ import ( // Include v2s1 signed but not v2s1 unsigned, because docker/distribution requires a signature even if the unsigned MIME type is used. var preferredManifestMIMETypes = []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType} -// ociEncryptionMIMETypes lists manifest MIME types that are known to support OCI encryption. -var ociEncryptionMIMETypes = []string{v1.MediaTypeImageManifest} +// allManifestMIMETypes lists all possible manifest MIME types. +var allManifestMIMETypes = []string{v1.MediaTypeImageManifest, manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType} // orderedSet is a list of strings (MIME types or platform descriptors in our case), with each string appearing at most once. type orderedSet struct { @@ -51,9 +53,10 @@ type determineManifestConversionInputs struct { destSupportedManifestMIMETypes []string // MIME types supported by the destination, per types.ImageDestination.SupportedManifestMIMETypes() - forceManifestMIMEType string // User’s choice of forced manifest MIME type - requiresOCIEncryption bool // Restrict to manifest formats that can support OCI encryption - cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can + forceManifestMIMEType string // User’s choice of forced manifest MIME type + requestedCompressionFormat *compressiontypes.Algorithm // Compression algorithm to use, if the user _explictily_ requested one. + requiresOCIEncryption bool // Restrict to manifest formats that can support OCI encryption + cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can } // manifestConversionPlan contains the decisions made by determineManifestConversion. @@ -79,42 +82,68 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest if in.forceManifestMIMEType != "" { destSupportedManifestMIMETypes = []string{in.forceManifestMIMEType} } - if len(destSupportedManifestMIMETypes) == 0 { - if !in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(srcType) { - return manifestConversionPlan{ // Anything goes; just use the original as is, do not try any conversions. - preferredMIMEType: srcType, - otherMIMETypeCandidates: []string{}, - }, nil - } - destSupportedManifestMIMETypes = ociEncryptionMIMETypes + destSupportedManifestMIMETypes = allManifestMIMETypes } + + restrictiveCompressionRequired := in.requestedCompressionFormat != nil && !internalManifest.CompressionAlgorithmIsUniversallySupported(*in.requestedCompressionFormat) supportedByDest := set.New[string]() for _, t := range destSupportedManifestMIMETypes { - if !in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(t) { - supportedByDest.Add(t) + if in.requiresOCIEncryption && !manifest.MIMETypeSupportsEncryption(t) { + continue + } + if restrictiveCompressionRequired && !internalManifest.MIMETypeSupportsCompressionAlgorithm(t, *in.requestedCompressionFormat) { + continue } + supportedByDest.Add(t) } if supportedByDest.Empty() { - if len(destSupportedManifestMIMETypes) == 0 { // Coverage: This should never happen, empty values were replaced by ociEncryptionMIMETypes + if len(destSupportedManifestMIMETypes) == 0 { // Coverage: This should never happen, empty values were replaced by allManifestMIMETypes return manifestConversionPlan{}, errors.New("internal error: destSupportedManifestMIMETypes is empty") } - // We know, and have verified, that destSupportedManifestMIMETypes is not empty, so encryption must have been involved. - if !in.requiresOCIEncryption { // Coverage: This should never happen, destSupportedManifestMIMETypes was not empty, so we should have filtered for encryption. - return manifestConversionPlan{}, errors.New("internal error: supportedByDest is empty but destSupportedManifestMIMETypes is not, and not encrypting") - } + // We know, and have verified, that destSupportedManifestMIMETypes is not empty, so some filtering of supported MIME types must have been involved. + // destSupportedManifestMIMETypes has three possible origins: if in.forceManifestMIMEType != "" { // 1. forceManifestType specified - return manifestConversionPlan{}, fmt.Errorf("encryption required together with format %s, which does not support encryption", - in.forceManifestMIMEType) + switch { + case in.requiresOCIEncryption && restrictiveCompressionRequired: + return manifestConversionPlan{}, fmt.Errorf("compression using %s, and encryption, required together with format %s, which does not support both", + in.requestedCompressionFormat.Name(), in.forceManifestMIMEType) + case in.requiresOCIEncryption: + return manifestConversionPlan{}, fmt.Errorf("encryption required together with format %s, which does not support encryption", + in.forceManifestMIMEType) + case restrictiveCompressionRequired: + return manifestConversionPlan{}, fmt.Errorf("compression using %s required together with format %s, which does not support it", + in.requestedCompressionFormat.Name(), in.forceManifestMIMEType) + default: + return manifestConversionPlan{}, errors.New("internal error: forceManifestMIMEType was rejected for an unknown reason") + } + } + if len(in.destSupportedManifestMIMETypes) == 0 { // 2. destination accepts anything and we have chosen allManifestTypes + if !restrictiveCompressionRequired { + // Coverage: This should never happen. + // If we have not rejected for encryption reasons, we must have rejected due to encryption, but + // allManifestTypes includes OCI, which supports encryption. + return manifestConversionPlan{}, errors.New("internal error: in.destSupportedManifestMIMETypes is empty but supportedByDest is empty as well") + } + // This can legitimately happen when the user asks for completely unsupported formats like Bzip2 or Xz. + return manifestConversionPlan{}, fmt.Errorf("compression using %s required, but none of the known manifest formats support it", in.requestedCompressionFormat.Name()) } - if len(in.destSupportedManifestMIMETypes) == 0 { // 2. destination accepts anything and we have chosen ociEncryptionMIMETypes - // Coverage: This should never happen, ociEncryptionMIMETypes all support encryption - return manifestConversionPlan{}, errors.New("internal error: in.destSupportedManifestMIMETypes is empty but supportedByDest is empty as well") + // 3. destination accepts a restricted list of mime types + destMIMEList := strings.Join(destSupportedManifestMIMETypes, ", ") + switch { + case in.requiresOCIEncryption && restrictiveCompressionRequired: + return manifestConversionPlan{}, fmt.Errorf("compression using %s, and encryption, required but the destination only supports MIME types [%s], none of which support both", + in.requestedCompressionFormat.Name(), destMIMEList) + case in.requiresOCIEncryption: + return manifestConversionPlan{}, fmt.Errorf("encryption required but the destination only supports MIME types [%s], none of which support encryption", + destMIMEList) + case restrictiveCompressionRequired: + return manifestConversionPlan{}, fmt.Errorf("compression using %s required but the destination only supports MIME types [%s], none of which support it", + in.requestedCompressionFormat.Name(), destMIMEList) + default: // Coverage: This should never happen, we only filter for in.requiresOCIEncryption || restrictiveCompressionRequired + return manifestConversionPlan{}, errors.New("internal error: supportedByDest is empty but destSupportedManifestMIMETypes is not, and we are neither encrypting nor requiring a restrictive compression algorithm") } - // 3. destination does not support encryption. - return manifestConversionPlan{}, fmt.Errorf("encryption required but the destination only supports MIME types [%s], none of which support encryption", - strings.Join(destSupportedManifestMIMETypes, ", ")) } // destSupportedManifestMIMETypes is a static guess; a particular registry may still only support a subset of the types. @@ -156,7 +185,7 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest } logrus.Debugf("Manifest has MIME type %s, ordered candidate list [%s]", srcType, strings.Join(prioritizedTypes.list, ", ")) - if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes and supportedByDest, which is a subset, is not empty (or we would have exited above), so this should never happen. + if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes and supportedByDest, which is a subset, is not empty (or we would have exited above), so this should never happen. return manifestConversionPlan{}, errors.New("Internal error: no candidate MIME types") } res := manifestConversionPlan{ diff --git a/vendor/github.com/containers/image/v5/copy/multiple.go b/vendor/github.com/containers/image/v5/copy/multiple.go index f252e3476..a219b58b6 100644 --- a/vendor/github.com/containers/image/v5/copy/multiple.go +++ b/vendor/github.com/containers/image/v5/copy/multiple.go @@ -38,6 +38,7 @@ type instanceCopy struct { // Fields which can be used by callers when operation // is `instanceCopyClone` + cloneArtifactType string cloneCompressionVariant OptionCompressionVariant clonePlatform *imgspecv1.Platform cloneAnnotations map[string]string @@ -142,6 +143,7 @@ func prepareInstanceCopies(list internalManifest.List, instanceDigests []digest. res = append(res, instanceCopy{ op: instanceCopyClone, sourceDigest: instanceDigest, + cloneArtifactType: instanceDetails.ReadOnly.ArtifactType, cloneCompressionVariant: compressionVariant, clonePlatform: instanceDetails.ReadOnly.Platform, cloneAnnotations: maps.Clone(instanceDetails.ReadOnly.Annotations), @@ -268,6 +270,7 @@ func (c *copier) copyMultipleImages(ctx context.Context) (copiedManifest []byte, AddDigest: updated.manifestDigest, AddSize: int64(len(updated.manifest)), AddMediaType: updated.manifestMIMEType, + AddArtifactType: instance.cloneArtifactType, AddPlatform: instance.clonePlatform, AddAnnotations: instance.cloneAnnotations, AddCompressionAlgorithms: updated.compressionAlgorithms, diff --git a/vendor/github.com/containers/image/v5/copy/progress_bars.go b/vendor/github.com/containers/image/v5/copy/progress_bars.go index ce078234c..3ac93a9d7 100644 --- a/vendor/github.com/containers/image/v5/copy/progress_bars.go +++ b/vendor/github.com/containers/image/v5/copy/progress_bars.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "io" + "time" "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/types" @@ -148,13 +149,14 @@ type blobChunkAccessorProxy struct { // The readers must be fully consumed, in the order they are returned, before blocking // to read the next chunk. func (s *blobChunkAccessorProxy) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { + start := time.Now() rc, errs, err := s.wrapped.GetBlobAt(ctx, info, chunks) if err == nil { total := int64(0) for _, c := range chunks { total += int64(c.Length) } - s.bar.IncrInt64(total) + s.bar.EwmaIncrInt64(total, time.Since(start)) } return rc, errs, err } diff --git a/vendor/github.com/containers/image/v5/copy/single.go b/vendor/github.com/containers/image/v5/copy/single.go index 67ca43f7b..e1a43f75e 100644 --- a/vendor/github.com/containers/image/v5/copy/single.go +++ b/vendor/github.com/containers/image/v5/copy/single.go @@ -20,6 +20,7 @@ import ( compressiontypes "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" + chunkedToc "github.com/containers/storage/pkg/chunked/toc" digest "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" @@ -32,6 +33,7 @@ type imageCopier struct { c *copier manifestUpdates *types.ManifestUpdateOptions src *image.SourcedImage + manifestConversionPlan manifestConversionPlan diffIDsAreNeeded bool cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can canSubstituteBlobs bool @@ -135,7 +137,7 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar c: c, manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: types.ManifestUpdateInformation{Destination: c.dest}}, src: src, - // diffIDsAreNeeded is computed later + // manifestConversionPlan and diffIDsAreNeeded are computed later cannotModifyManifestReason: cannotModifyManifestReason, requireCompressionFormatMatch: opts.requireCompressionFormatMatch, } @@ -163,10 +165,11 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar destRequiresOciEncryption := (isEncrypted(src) && ic.c.options.OciDecryptConfig == nil) || c.options.OciEncryptLayers != nil - manifestConversionPlan, err := determineManifestConversion(determineManifestConversionInputs{ + ic.manifestConversionPlan, err = determineManifestConversion(determineManifestConversionInputs{ srcMIMEType: ic.src.ManifestMIMEType, destSupportedManifestMIMETypes: ic.c.dest.SupportedManifestMIMETypes(), forceManifestMIMEType: c.options.ForceManifestMIMEType, + requestedCompressionFormat: ic.compressionFormat, requiresOCIEncryption: destRequiresOciEncryption, cannotModifyManifestReason: ic.cannotModifyManifestReason, }) @@ -177,8 +180,8 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar // code that calls copyUpdatedConfigAndManifest, so that other parts of the copy code // (e.g. the UpdatedImageNeedsLayerDiffIDs check just below) can make decisions based // on the expected destination format. - if manifestConversionPlan.preferredMIMETypeNeedsConversion { - ic.manifestUpdates.ManifestMIMEType = manifestConversionPlan.preferredMIMEType + if ic.manifestConversionPlan.preferredMIMETypeNeedsConversion { + ic.manifestUpdates.ManifestMIMEType = ic.manifestConversionPlan.preferredMIMEType } // If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here. @@ -217,11 +220,11 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar manifestBytes, manifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance) wipResult := copySingleImageResult{ manifest: manifestBytes, - manifestMIMEType: manifestConversionPlan.preferredMIMEType, + manifestMIMEType: ic.manifestConversionPlan.preferredMIMEType, manifestDigest: manifestDigest, } if err != nil { - logrus.Debugf("Writing manifest using preferred type %s failed: %v", manifestConversionPlan.preferredMIMEType, err) + logrus.Debugf("Writing manifest using preferred type %s failed: %v", ic.manifestConversionPlan.preferredMIMEType, err) // … if it fails, and the failure is either because the manifest is rejected by the registry, or // because we failed to create a manifest of the specified type because the specific manifest type // doesn't support the type of compression we're trying to use (e.g. docker v2s2 and zstd), we may @@ -230,13 +233,13 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar var manifestLayerCompressionIncompatibilityError manifest.ManifestLayerCompressionIncompatibilityError isManifestRejected := errors.As(err, &manifestTypeRejectedError) isCompressionIncompatible := errors.As(err, &manifestLayerCompressionIncompatibilityError) - if (!isManifestRejected && !isCompressionIncompatible) || len(manifestConversionPlan.otherMIMETypeCandidates) == 0 { + if (!isManifestRejected && !isCompressionIncompatible) || len(ic.manifestConversionPlan.otherMIMETypeCandidates) == 0 { // We don’t have other options. // In principle the code below would handle this as well, but the resulting error message is fairly ugly. // Don’t bother the user with MIME types if we have no choice. return copySingleImageResult{}, err } - // If the original MIME type is acceptable, determineManifestConversion always uses it as manifestConversionPlan.preferredMIMEType. + // If the original MIME type is acceptable, determineManifestConversion always uses it as ic.manifestConversionPlan.preferredMIMEType. // So if we are here, we will definitely be trying to convert the manifest. // With ic.cannotModifyManifestReason != "", that would just be a string of repeated failures for the same reason, // so let’s bail out early and with a better error message. @@ -245,8 +248,8 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar } // errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil. - errs := []string{fmt.Sprintf("%s(%v)", manifestConversionPlan.preferredMIMEType, err)} - for _, manifestMIMEType := range manifestConversionPlan.otherMIMETypeCandidates { + errs := []string{fmt.Sprintf("%s(%v)", ic.manifestConversionPlan.preferredMIMEType, err)} + for _, manifestMIMEType := range ic.manifestConversionPlan.otherMIMETypeCandidates { logrus.Debugf("Trying to use manifest type %s…", manifestMIMEType) ic.manifestUpdates.ManifestMIMEType = manifestMIMEType attemptedManifest, attemptedManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance) @@ -380,7 +383,11 @@ func (ic *imageCopier) compareImageDestinationManifestEqual(ctx context.Context, compressionAlgos := set.New[string]() for _, srcInfo := range ic.src.LayerInfos() { - if c := compressionAlgorithmFromMIMEType(srcInfo); c != nil { + _, c, err := compressionEditsFromBlobInfo(srcInfo) + if err != nil { + return nil, err + } + if c != nil { compressionAlgos.Add(c.Name()) } } @@ -633,17 +640,29 @@ type diffIDResult struct { err error } -func compressionAlgorithmFromMIMEType(srcInfo types.BlobInfo) *compressiontypes.Algorithm { +// compressionEditsFromBlobInfo returns a (CompressionOperation, CompressionAlgorithm) value pair suitable +// for types.BlobInfo. +func compressionEditsFromBlobInfo(srcInfo types.BlobInfo) (types.LayerCompression, *compressiontypes.Algorithm, error) { // This MIME type → compression mapping belongs in manifest-specific code in our manifest // package (but we should preferably replace/change UpdatedImage instead of productizing // this workaround). switch srcInfo.MediaType { case manifest.DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayerGzip: - return &compression.Gzip + return types.PreserveOriginal, &compression.Gzip, nil case imgspecv1.MediaTypeImageLayerZstd: - return &compression.Zstd + tocDigest, err := chunkedToc.GetTOCDigest(srcInfo.Annotations) + if err != nil { + return types.PreserveOriginal, nil, err + } + if tocDigest != nil { + return types.PreserveOriginal, &compression.ZstdChunked, nil + } + return types.PreserveOriginal, &compression.Zstd, nil + case manifest.DockerV2SchemaLayerMediaTypeUncompressed, imgspecv1.MediaTypeImageLayer: + return types.Decompress, nil, nil + default: + return types.PreserveOriginal, nil, nil } - return nil } // copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps (de/re/)compressing it, @@ -657,8 +676,13 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to // which uses the compression information to compute the updated MediaType values. // (Sadly UpdatedImage() is documented to not update MediaTypes from // ManifestUpdateOptions.LayerInfos[].MediaType, so we are doing it indirectly.) - if srcInfo.CompressionAlgorithm == nil { - srcInfo.CompressionAlgorithm = compressionAlgorithmFromMIMEType(srcInfo) + if srcInfo.CompressionOperation == types.PreserveOriginal && srcInfo.CompressionAlgorithm == nil { + op, algo, err := compressionEditsFromBlobInfo(srcInfo) + if err != nil { + return types.BlobInfo{}, "", err + } + srcInfo.CompressionOperation = op + srcInfo.CompressionAlgorithm = algo } ic.c.printCopyInfo("blob", srcInfo) @@ -681,26 +705,33 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to logrus.Debugf("Checking if we can reuse blob %s: general substitution = %v, compression for MIME type %q = %v", srcInfo.Digest, ic.canSubstituteBlobs, srcInfo.MediaType, canChangeLayerCompression) canSubstitute := ic.canSubstituteBlobs && ic.src.CanChangeLayerCompression(srcInfo.MediaType) - // TODO: at this point we don't know whether or not a blob we end up reusing is compressed using an algorithm - // that is acceptable for use on layers in the manifest that we'll be writing later, so if we end up reusing - // a blob that's compressed with e.g. zstd, but we're only allowed to write a v2s2 manifest, this will cause - // a failure when we eventually try to update the manifest with the digest and MIME type of the reused blob. - // Fixing that will probably require passing more information to TryReusingBlob() than the current version of - // the ImageDestination interface lets us pass in. + var requiredCompression *compressiontypes.Algorithm - var originalCompression *compressiontypes.Algorithm if ic.requireCompressionFormatMatch { requiredCompression = ic.compressionFormat - originalCompression = srcInfo.CompressionAlgorithm } + + var tocDigest digest.Digest + + // Check if we have a chunked layer in storage that's based on that blob. These layers are stored by their TOC digest. + d, err := chunkedToc.GetTOCDigest(srcInfo.Annotations) + if err != nil { + return types.BlobInfo{}, "", err + } + if d != nil { + tocDigest = *d + } + reused, reusedBlob, err := ic.c.dest.TryReusingBlobWithOptions(ctx, srcInfo, private.TryReusingBlobOptions{ - Cache: ic.c.blobInfoCache, - CanSubstitute: canSubstitute, - EmptyLayer: emptyLayer, - LayerIndex: &layerIndex, - SrcRef: srcRef, - RequiredCompression: requiredCompression, - OriginalCompression: originalCompression, + Cache: ic.c.blobInfoCache, + CanSubstitute: canSubstitute, + EmptyLayer: emptyLayer, + LayerIndex: &layerIndex, + SrcRef: srcRef, + PossibleManifestFormats: append([]string{ic.manifestConversionPlan.preferredMIMEType}, ic.manifestConversionPlan.otherMIMETypeCandidates...), + RequiredCompression: requiredCompression, + OriginalCompression: srcInfo.CompressionAlgorithm, + TOCDigest: tocDigest, }) if err != nil { return types.BlobInfo{}, "", fmt.Errorf("trying to reuse blob %s at destination: %w", srcInfo.Digest, err) @@ -708,7 +739,11 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to if reused { logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest) func() { // A scope for defer - bar := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: reusedBlob.Digest, Size: 0}, "blob", "skipped: already exists") + label := "skipped: already exists" + if reusedBlob.MatchedByTOCDigest { + label = "skipped: already exists (found by TOC)" + } + bar := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: reusedBlob.Digest, Size: 0}, "blob", label) defer bar.Abort(false) bar.mark100PercentComplete() }() @@ -741,7 +776,10 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to wrapped: ic.c.rawSource, bar: bar, } - uploadedBlob, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, ic.c.blobInfoCache) + uploadedBlob, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, private.PutBlobPartialOptions{ + Cache: ic.c.blobInfoCache, + LayerIndex: layerIndex, + }) if err == nil { if srcInfo.Size != -1 { refill := srcInfo.Size - bar.Current() diff --git a/vendor/github.com/containers/image/v5/directory/directory_dest.go b/vendor/github.com/containers/image/v5/directory/directory_dest.go index 222723a8f..9d9219241 100644 --- a/vendor/github.com/containers/image/v5/directory/directory_dest.go +++ b/vendor/github.com/containers/image/v5/directory/directory_dest.go @@ -190,7 +190,7 @@ func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io. // If the blob has been successfully reused, returns (true, info, nil). // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. func (d *dirImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { - if !impl.OriginalBlobMatchesRequiredCompression(options) { + if !impl.OriginalCandidateMatchesTryReusingBlobOptions(options) { return false, private.ReusedBlob{}, nil } if info.Digest == "" { diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go index 6ce8f7008..a60ec563d 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_client.go +++ b/vendor/github.com/containers/image/v5/docker/docker_client.go @@ -978,13 +978,10 @@ func (c *dockerClient) fetchManifest(ctx context.Context, ref dockerReference, t // This function can return nil reader when no url is supported by this function. In this case, the caller // should fallback to fetch the non-external blob (i.e. pull from the registry). func (c *dockerClient) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) { - var ( - resp *http.Response - err error - ) if len(urls) == 0 { return nil, 0, errors.New("internal error: getExternalBlob called with no URLs") } + var remoteErrors []error for _, u := range urls { blobURL, err := url.Parse(u) if err != nil || (blobURL.Scheme != "http" && blobURL.Scheme != "https") { @@ -993,24 +990,28 @@ func (c *dockerClient) getExternalBlob(ctx context.Context, urls []string) (io.R // NOTE: we must not authenticate on additional URLs as those // can be abused to leak credentials or tokens. Please // refer to CVE-2020-15157 for more information. - resp, err = c.makeRequestToResolvedURL(ctx, http.MethodGet, blobURL, nil, nil, -1, noAuth, nil) - if err == nil { - if resp.StatusCode != http.StatusOK { - err = fmt.Errorf("error fetching external blob from %q: %d (%s)", u, resp.StatusCode, http.StatusText(resp.StatusCode)) - logrus.Debug(err) - resp.Body.Close() - continue - } - break + resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, blobURL, nil, nil, -1, noAuth, nil) + if err != nil { + remoteErrors = append(remoteErrors, err) + continue + } + if resp.StatusCode != http.StatusOK { + err := fmt.Errorf("error fetching external blob from %q: %d (%s)", u, resp.StatusCode, http.StatusText(resp.StatusCode)) + remoteErrors = append(remoteErrors, err) + logrus.Debug(err) + resp.Body.Close() + continue } + return resp.Body, getBlobSize(resp), nil } - if resp == nil && err == nil { + if remoteErrors == nil { return nil, 0, nil // fallback to non-external blob } - if err != nil { - return nil, 0, err + err := fmt.Errorf("failed fetching external blob from all urls: %w", remoteErrors[0]) + for _, e := range remoteErrors[1:] { + err = fmt.Errorf("%s, %w", err, e) } - return resp.Body, getBlobSize(resp), nil + return nil, 0, err } func getBlobSize(resp *http.Response) int64 { diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go index a9a36f0a3..877d11b73 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go @@ -27,6 +27,7 @@ import ( "github.com/containers/image/v5/internal/uploadreader" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/blobinfocache/none" + compressiontypes "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/types" "github.com/docker/distribution/registry/api/errcode" v2 "github.com/docker/distribution/registry/api/v2" @@ -311,6 +312,13 @@ func (d *dockerImageDestination) tryReusingExactBlob(ctx context.Context, info t return false, private.ReusedBlob{}, nil } +func optionalCompressionName(algo *compressiontypes.Algorithm) string { + if algo != nil { + return algo.Name() + } + return "nil" +} + // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. @@ -321,7 +329,7 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest") } - if impl.OriginalBlobMatchesRequiredCompression(options) { + if impl.OriginalCandidateMatchesTryReusingBlobOptions(options) { // First, check whether the blob happens to already exist at the destination. haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, info, options.Cache) if err != nil { @@ -331,11 +339,8 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, return true, reusedInfo, nil } } else { - requiredCompression := "nil" - if options.OriginalCompression != nil { - requiredCompression = options.OriginalCompression.Name() - } - logrus.Debugf("Ignoring exact blob match case due to compression mismatch ( %s vs %s )", options.RequiredCompression.Name(), requiredCompression) + logrus.Debugf("Ignoring exact blob match, compression %s does not match required %s or MIME types %#v", + optionalCompressionName(options.OriginalCompression), optionalCompressionName(options.RequiredCompression), options.PossibleManifestFormats) } // Then try reusing blobs from other locations. @@ -355,15 +360,13 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, continue } } - if !impl.BlobMatchesRequiredCompression(options, compressionAlgorithm) { - requiredCompression := "nil" - if compressionAlgorithm != nil { - requiredCompression = compressionAlgorithm.Name() - } + if !impl.CandidateMatchesTryReusingBlobOptions(options, compressionAlgorithm) { if !candidate.UnknownLocation { - logrus.Debugf("Ignoring candidate blob %s as reuse candidate due to compression mismatch ( %s vs %s ) in %s", candidate.Digest.String(), options.RequiredCompression.Name(), requiredCompression, candidateRepo.Name()) + logrus.Debugf("Ignoring candidate blob %s in %s, compression %s does not match required %s or MIME types %#v", candidate.Digest.String(), candidateRepo.Name(), + optionalCompressionName(compressionAlgorithm), optionalCompressionName(options.RequiredCompression), options.PossibleManifestFormats) } else { - logrus.Debugf("Ignoring candidate blob %s as reuse candidate due to compression mismatch ( %s vs %s ) with no location match, checking current repo", candidate.Digest.String(), options.RequiredCompression.Name(), requiredCompression) + logrus.Debugf("Ignoring candidate blob %s with no known location, compression %s does not match required %s or MIME types %#v", candidate.Digest.String(), + optionalCompressionName(compressionAlgorithm), optionalCompressionName(options.RequiredCompression), options.PossibleManifestFormats) } continue } diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go index 7507d8559..b44f4ca1f 100644 --- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go @@ -129,7 +129,7 @@ func (d *Destination) PutBlobWithOptions(ctx context.Context, stream io.Reader, // If the blob has been successfully reused, returns (true, info, nil). // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. func (d *Destination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { - if !impl.OriginalBlobMatchesRequiredCompression(options) { + if !impl.OriginalCandidateMatchesTryReusingBlobOptions(options) { return false, private.ReusedBlob{}, nil } if err := d.archive.lock(); err != nil { diff --git a/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go b/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go index 429d68263..4d3858ab8 100644 --- a/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go +++ b/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go @@ -36,7 +36,7 @@ type BlobInfoCache2 interface { // that could possibly be reused within the specified (transport scope) (if they still // exist, which is not guaranteed). // - // If !canSubstitute, the returned cadidates will match the submitted digest exactly; if + // If !canSubstitute, the returned candidates will match the submitted digest exactly; if // canSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look // up variants of the blob which have the same uncompressed digest. // diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go index 5d28b3e73..553569a03 100644 --- a/vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go @@ -1,25 +1,42 @@ package impl import ( + "github.com/containers/image/v5/internal/manifest" "github.com/containers/image/v5/internal/private" compression "github.com/containers/image/v5/pkg/compression/types" + "golang.org/x/exp/slices" ) -// BlobMatchesRequiredCompression validates if compression is required by the caller while selecting a blob, if it is required +// CandidateMatchesTryReusingBlobOptions validates if compression is required by the caller while selecting a blob, if it is required // then function performs a match against the compression requested by the caller and compression of existing blob // (which can be nil to represent uncompressed or unknown) -func BlobMatchesRequiredCompression(options private.TryReusingBlobOptions, candidateCompression *compression.Algorithm) bool { - if options.RequiredCompression == nil { - return true // no requirement imposed +func CandidateMatchesTryReusingBlobOptions(options private.TryReusingBlobOptions, candidateCompression *compression.Algorithm) bool { + if options.RequiredCompression != nil { + if options.RequiredCompression.Name() == compression.ZstdChunkedAlgorithmName { + // HACK: Never match when the caller asks for zstd:chunked, because we don’t record the annotations required to use the chunked blobs. + // The caller must re-compress to build those annotations. + return false + } + if candidateCompression == nil || + (options.RequiredCompression.Name() != candidateCompression.Name() && options.RequiredCompression.Name() != candidateCompression.BaseVariantName()) { + return false + } } - if options.RequiredCompression.Name() == compression.ZstdChunkedAlgorithmName { - // HACK: Never match when the caller asks for zstd:chunked, because we don’t record the annotations required to use the chunked blobs. - // The caller must re-compress to build those annotations. - return false + + // For candidateCompression == nil, we can’t tell the difference between “uncompressed” and “unknown”; + // and “uncompressed” is acceptable in all known formats (well, it seems to work in practice for schema1), + // so don’t impose any restrictions if candidateCompression == nil + if options.PossibleManifestFormats != nil && candidateCompression != nil { + if !slices.ContainsFunc(options.PossibleManifestFormats, func(mt string) bool { + return manifest.MIMETypeSupportsCompressionAlgorithm(mt, *candidateCompression) + }) { + return false + } } - return candidateCompression != nil && (options.RequiredCompression.Name() == candidateCompression.Name()) + + return true } -func OriginalBlobMatchesRequiredCompression(opts private.TryReusingBlobOptions) bool { - return BlobMatchesRequiredCompression(opts, opts.OriginalCompression) +func OriginalCandidateMatchesTryReusingBlobOptions(opts private.TryReusingBlobOptions) bool { + return CandidateMatchesTryReusingBlobOptions(opts, opts.OriginalCompression) } diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go index 0dc6bd5af..bbb53c198 100644 --- a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go @@ -4,7 +4,6 @@ import ( "context" "fmt" - "github.com/containers/image/v5/internal/blobinfocache" "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/types" ) @@ -39,7 +38,7 @@ func (stub NoPutBlobPartialInitialize) SupportsPutBlobPartial() bool { // It is available only if SupportsPutBlobPartial(). // Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller // should fall back to PutBlobWithOptions. -func (stub NoPutBlobPartialInitialize) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) { +func (stub NoPutBlobPartialInitialize) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) { return private.UploadedBlob{}, fmt.Errorf("internal error: PutBlobPartial is not supported by the %q transport", stub.transportName) } diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go index 17e1870c1..cdd3c5e5d 100644 --- a/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go @@ -28,7 +28,7 @@ type wrapped struct { // // NOTE: The returned API MUST NOT be a public interface (it can be either just a struct // with public methods, or perhaps a private interface), so that we can add methods -// without breaking any external implementors of a public interface. +// without breaking any external implementers of a public interface. func FromPublic(dest types.ImageDestination) private.ImageDestination { if dest2, ok := dest.(private.ImageDestination); ok { return dest2 diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go b/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go index 886b4e833..f0d1d042b 100644 --- a/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go +++ b/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go @@ -27,7 +27,7 @@ type wrapped struct { // // NOTE: The returned API MUST NOT be a public interface (it can be either just a struct // with public methods, or perhaps a private interface), so that we can add methods -// without breaking any external implementors of a public interface. +// without breaking any external implementers of a public interface. func FromPublic(src types.ImageSource) private.ImageSource { if src2, ok := src.(private.ImageSource); ok { return src2 diff --git a/vendor/github.com/containers/image/v5/internal/manifest/list.go b/vendor/github.com/containers/image/v5/internal/manifest/list.go index 189f1a718..1d60da752 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/list.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/list.go @@ -73,6 +73,7 @@ type ListUpdate struct { Platform *imgspecv1.Platform Annotations map[string]string CompressionAlgorithmNames []string + ArtifactType string } } @@ -101,6 +102,7 @@ type ListEdit struct { AddDigest digest.Digest AddSize int64 AddMediaType string + AddArtifactType string AddPlatform *imgspecv1.Platform AddAnnotations map[string]string AddCompressionAlgorithms []compression.Algorithm diff --git a/vendor/github.com/containers/image/v5/internal/manifest/manifest.go b/vendor/github.com/containers/image/v5/internal/manifest/manifest.go index 1dbcc1418..c77db7522 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/manifest.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/manifest.go @@ -3,6 +3,7 @@ package manifest import ( "encoding/json" + compressiontypes "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/libtrust" digest "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -14,7 +15,7 @@ import ( const ( // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 DockerV2Schema1MediaType = "application/vnd.docker.distribution.manifest.v1+json" - // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 with a JWS signature + // DockerV2Schema1SignedMediaType MIME type represents Docker manifest schema 1 with a JWS signature DockerV2Schema1SignedMediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws" // DockerV2Schema2MediaType MIME type represents Docker manifest schema 2 DockerV2Schema2MediaType = "application/vnd.docker.distribution.manifest.v2+json" @@ -165,3 +166,29 @@ func NormalizedMIMEType(input string) string { return DockerV2Schema1SignedMediaType } } + +// CompressionAlgorithmIsUniversallySupported returns true if MIMETypeSupportsCompressionAlgorithm(mimeType, algo) returns true for all mimeType values. +func CompressionAlgorithmIsUniversallySupported(algo compressiontypes.Algorithm) bool { + // Compare the discussion about BaseVariantName in MIMETypeSupportsCompressionAlgorithm(). + switch algo.Name() { + case compressiontypes.GzipAlgorithmName: + return true + default: + return false + } +} + +// MIMETypeSupportsCompressionAlgorithm returns true if mimeType can represent algo. +func MIMETypeSupportsCompressionAlgorithm(mimeType string, algo compressiontypes.Algorithm) bool { + if CompressionAlgorithmIsUniversallySupported(algo) { + return true + } + // This does not use BaseVariantName: Plausibly a manifest format might support zstd but not have annotation fields. + // The logic might have to be more complex (and more ad-hoc) if more manifest formats, with more capabilities, emerge. + switch algo.Name() { + case compressiontypes.ZstdAlgorithmName, compressiontypes.ZstdChunkedAlgorithmName: + return mimeType == imgspecv1.MediaTypeImageManifest + default: // Includes Bzip2AlgorithmName and XzAlgorithmName, which are defined names but are not supported anywhere + return false + } +} diff --git a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go index d8d06513b..829852a83 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go @@ -61,6 +61,7 @@ func (index *OCI1IndexPublic) Instance(instanceDigest digest.Digest) (ListUpdate ret.ReadOnly.Platform = manifest.Platform ret.ReadOnly.Annotations = manifest.Annotations ret.ReadOnly.CompressionAlgorithmNames = annotationsToCompressionAlgorithmNames(manifest.Annotations) + ret.ReadOnly.ArtifactType = manifest.ArtifactType return ret, nil } } @@ -102,7 +103,7 @@ func addCompressionAnnotations(compressionAlgorithms []compression.Algorithm, an *annotationsMap = map[string]string{} } for _, algo := range compressionAlgorithms { - switch algo.Name() { + switch algo.BaseVariantName() { case compression.ZstdAlgorithmName: (*annotationsMap)[OCI1InstanceAnnotationCompressionZSTD] = OCI1InstanceAnnotationCompressionZSTDValue default: @@ -157,11 +158,13 @@ func (index *OCI1IndexPublic) editInstances(editInstances []ListEdit) error { } addCompressionAnnotations(editInstance.AddCompressionAlgorithms, &annotations) addedEntries = append(addedEntries, imgspecv1.Descriptor{ - MediaType: editInstance.AddMediaType, - Size: editInstance.AddSize, - Digest: editInstance.AddDigest, - Platform: editInstance.AddPlatform, - Annotations: annotations}) + MediaType: editInstance.AddMediaType, + ArtifactType: editInstance.AddArtifactType, + Size: editInstance.AddSize, + Digest: editInstance.AddDigest, + Platform: editInstance.AddPlatform, + Annotations: annotations, + }) default: return fmt.Errorf("internal error: invalid operation: %d", editInstance.ListOperation) } @@ -299,12 +302,13 @@ func OCI1IndexPublicFromComponents(components []imgspecv1.Descriptor, annotation platform = &platformCopy } m := imgspecv1.Descriptor{ - MediaType: component.MediaType, - Size: component.Size, - Digest: component.Digest, - URLs: slices.Clone(component.URLs), - Annotations: maps.Clone(component.Annotations), - Platform: platform, + MediaType: component.MediaType, + ArtifactType: component.ArtifactType, + Size: component.Size, + Digest: component.Digest, + URLs: slices.Clone(component.URLs), + Annotations: maps.Clone(component.Annotations), + Platform: platform, } index.Manifests[i] = m } diff --git a/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go b/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go index 3ba0e4084..94002d6d4 100644 --- a/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go +++ b/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go @@ -25,6 +25,7 @@ import ( "github.com/containers/image/v5/types" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" "golang.org/x/exp/slices" ) @@ -82,15 +83,40 @@ func getCPUVariantWindows(arch string) string { func getCPUVariantArm() string { variant, err := getCPUInfo("Cpu architecture") if err != nil { + logrus.Errorf("Couldn't get cpu architecture: %v", err) return "" } - // TODO handle RPi Zero mismatch (https://github.com/moby/moby/pull/36121#issuecomment-398328286) switch strings.ToLower(variant) { case "8", "aarch64": variant = "v8" - case "7", "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)": + case "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)": variant = "v7" + case "7": + // handle RPi Zero variant mismatch due to wrong variant from kernel + // https://github.com/containerd/containerd/pull/4530 + // https://www.raspberrypi.org/forums/viewtopic.php?t=12614 + // https://github.com/moby/moby/pull/36121#issuecomment-398328286 + model, err := getCPUInfo("model name") + if err != nil { + logrus.Errorf("Couldn't get cpu model name, it may be the corner case where variant is 6: %v", err) + return "" + } + // model name is NOT a value provided by the CPU; it is another outcome of Linux CPU detection, + // https://github.com/torvalds/linux/blob/190bf7b14b0cf3df19c059061be032bd8994a597/arch/arm/mm/proc-v6.S#L178C35-L178C35 + // (matching happens based on value + mask at https://github.com/torvalds/linux/blob/190bf7b14b0cf3df19c059061be032bd8994a597/arch/arm/mm/proc-v6.S#L273-L274 ) + // ARM CPU ID starts with a “main” ID register https://developer.arm.com/documentation/ddi0406/cb/System-Level-Architecture/System-Control-Registers-in-a-VMSA-implementation/VMSA-System-control-registers-descriptions--in-register-order/MIDR--Main-ID-Register--VMSA?lang=en , + // but the ARMv6/ARMv7 differences are not a single dimension, https://developer.arm.com/documentation/ddi0406/cb/System-Level-Architecture/The-CPUID-Identification-Scheme?lang=en . + // The Linux "cpu architecture" is determined by a “memory model” feature. + // + // So, the "armv6-compatible" check basically checks for a "v6 or v7 CPU, but not one found listed as a known v7 one in the .proc.info.init tables of + // https://github.com/torvalds/linux/blob/190bf7b14b0cf3df19c059061be032bd8994a597/arch/arm/mm/proc-v7.S . + if strings.HasPrefix(strings.ToLower(model), "armv6-compatible") { + logrus.Debugf("Detected corner case, setting cpu variant to v6") + variant = "v6" + } else { + variant = "v7" + } case "6", "6tej": variant = "v6" case "5", "5t", "5te", "5tej": diff --git a/vendor/github.com/containers/image/v5/internal/private/private.go b/vendor/github.com/containers/image/v5/internal/private/private.go index 95d561fcd..562adbea8 100644 --- a/vendor/github.com/containers/image/v5/internal/private/private.go +++ b/vendor/github.com/containers/image/v5/internal/private/private.go @@ -55,7 +55,7 @@ type ImageDestinationInternalOnly interface { // It is available only if SupportsPutBlobPartial(). // Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller // should fall back to PutBlobWithOptions. - PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (UploadedBlob, error) + PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, options PutBlobPartialOptions) (UploadedBlob, error) // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). @@ -100,6 +100,12 @@ type PutBlobOptions struct { LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise. } +// PutBlobPartialOptions are used in PutBlobPartial. +type PutBlobPartialOptions struct { + Cache blobinfocache.BlobInfoCache2 // Cache to use and/or update. + LayerIndex int // A zero-based index of the layer within the image (PutBlobPartial is only called with layer-like blobs, not configs) +} + // TryReusingBlobOptions are used in TryReusingBlobWithOptions. type TryReusingBlobOptions struct { Cache blobinfocache.BlobInfoCache2 // Cache to use and/or update. @@ -112,11 +118,13 @@ type TryReusingBlobOptions struct { // Transports, OTOH, MUST support these fields being zero-valued for types.ImageDestination callers // if they use internal/imagedestination/impl.Compat; // in that case, they will all be consistently zero-valued. - RequiredCompression *compression.Algorithm // If set, reuse blobs with a matching algorithm as per implementations in internal/imagedestination/impl.helpers.go - OriginalCompression *compression.Algorithm // Must be set if RequiredCompression is set; can be set to nil to indicate “uncompressed” or “unknown”. - EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented. - LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise. - SrcRef reference.Named // A reference to the source image that contains the input blob. + EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented. + LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise. + SrcRef reference.Named // A reference to the source image that contains the input blob. + PossibleManifestFormats []string // A set of possible manifest formats; at least one should support the reused layer blob. + RequiredCompression *compression.Algorithm // If set, reuse blobs with a matching algorithm as per implementations in internal/imagedestination/impl.helpers.go + OriginalCompression *compression.Algorithm // May be nil to indicate “uncompressed” or “unknown”. + TOCDigest digest.Digest // If specified, the blob can be looked up in the destination also by its TOC digest. } // ReusedBlob is information about a blob reused in a destination. @@ -128,6 +136,8 @@ type ReusedBlob struct { // a differently-compressed blob. CompressionOperation types.LayerCompression // Compress/Decompress, matching the reused blob; PreserveOriginal if N/A CompressionAlgorithm *compression.Algorithm // Algorithm if compressed, nil if decompressed or N/A + + MatchedByTOCDigest bool // Whether the layer was reused/matched by TOC digest. Used only for UI purposes. } // ImageSourceChunk is a portion of a blob. diff --git a/vendor/github.com/containers/image/v5/manifest/common.go b/vendor/github.com/containers/image/v5/manifest/common.go index 1bdcf3d30..de4628115 100644 --- a/vendor/github.com/containers/image/v5/manifest/common.go +++ b/vendor/github.com/containers/image/v5/manifest/common.go @@ -55,7 +55,7 @@ func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType if variants != nil { name := mtsUncompressed if algorithm != nil { - name = algorithm.InternalUnstableUndocumentedMIMEQuestionMark() + name = algorithm.BaseVariantName() } if res, ok := variants[name]; ok { if res != mtsUnsupportedMIMEType { diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go index a80af701a..762815570 100644 --- a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go @@ -10,6 +10,7 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/internal/manifest" "github.com/containers/image/v5/internal/set" + compressiontypes "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/regexp" "github.com/docker/docker/api/types/versions" @@ -142,6 +143,15 @@ func (m *Schema1) LayerInfos() []LayerInfo { return layers } +const fakeSchema1MIMEType = DockerV2Schema2LayerMediaType // Used only in schema1CompressionMIMETypeSets +var schema1CompressionMIMETypeSets = []compressionMIMETypeSet{ + { + mtsUncompressed: fakeSchema1MIMEType, + compressiontypes.GzipAlgorithmName: fakeSchema1MIMEType, + compressiontypes.ZstdAlgorithmName: mtsUnsupportedMIMEType, + }, +} + // UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { // Our LayerInfos includes empty layers (where m.ExtractedV1Compatibility[].ThrowAway), so expect them to be included here as well. @@ -150,6 +160,11 @@ func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { } m.FSLayers = make([]Schema1FSLayers, len(layerInfos)) for i, info := range layerInfos { + // There are no MIME types in schema1, but we do a “conversion” here to reject unsupported compression algorithms, + // in a way that is consistent with the other schema implementations. + if _, err := updatedMIMEType(schema1CompressionMIMETypeSets, fakeSchema1MIMEType, info); err != nil { + return fmt.Errorf("preparing updated manifest, layer %q: %w", info.Digest, err) + } // (docker push) sets up m.ExtractedV1Compatibility[].{Id,Parent} based on values of info.Digest, // but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness. // So, we don't bother recomputing the IDs in m.History.V1Compatibility. diff --git a/vendor/github.com/containers/image/v5/manifest/manifest.go b/vendor/github.com/containers/image/v5/manifest/manifest.go index 959aac935..828b8da0b 100644 --- a/vendor/github.com/containers/image/v5/manifest/manifest.go +++ b/vendor/github.com/containers/image/v5/manifest/manifest.go @@ -16,7 +16,7 @@ import ( const ( // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 DockerV2Schema1MediaType = manifest.DockerV2Schema1MediaType - // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 with a JWS signature + // DockerV2Schema1SignedMediaType MIME type represents Docker manifest schema 1 with a JWS signature DockerV2Schema1SignedMediaType = manifest.DockerV2Schema1SignedMediaType // DockerV2Schema2MediaType MIME type represents Docker manifest schema 2 DockerV2Schema2MediaType = manifest.DockerV2Schema2MediaType diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go index a85641c36..548994ffa 100644 --- a/vendor/github.com/containers/image/v5/manifest/oci.go +++ b/vendor/github.com/containers/image/v5/manifest/oci.go @@ -235,7 +235,7 @@ func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*type } // ImageID computes an ID which can uniquely identify this image by its contents. -func (m *OCI1) ImageID([]digest.Digest) (string, error) { +func (m *OCI1) ImageID(diffIDs []digest.Digest) (string, error) { // The way m.Config.Digest “uniquely identifies” an image is // by containing RootFS.DiffIDs, which identify the layers of the image. // For non-image artifacts, the we can’t expect the config to change diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go index 8386c47a3..a3eb5d7a1 100644 --- a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go @@ -6,13 +6,13 @@ import ( "io" "os" - "github.com/containers/image/v5/internal/blobinfocache" "github.com/containers/image/v5/internal/imagedestination" "github.com/containers/image/v5/internal/imagedestination/impl" "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/internal/signature" "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/idtools" digest "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" ) @@ -119,8 +119,8 @@ func (d *ociArchiveImageDestination) PutBlobWithOptions(ctx context.Context, str // It is available only if SupportsPutBlobPartial(). // Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller // should fall back to PutBlobWithOptions. -func (d *ociArchiveImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) { - return d.unpackedDest.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache) +func (d *ociArchiveImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) { + return d.unpackedDest.PutBlobPartial(ctx, chunkAccessor, srcInfo, options) } // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination @@ -169,10 +169,15 @@ func (d *ociArchiveImageDestination) Commit(ctx context.Context, unparsedTopleve // tar converts the directory at src and saves it to dst func tarDirectory(src, dst string) error { // input is a stream of bytes from the archive of the directory at path - input, err := archive.Tar(src, archive.Uncompressed) + input, err := archive.TarWithOptions(src, &archive.TarOptions{ + Compression: archive.Uncompressed, + // Don’t include the data about the user account this code is running under. + ChownOpts: &idtools.IDPair{UID: 0, GID: 0}, + }) if err != nil { return fmt.Errorf("retrieving stream of bytes from %q: %w", src, err) } + defer input.Close() // creates the tar file outFile, err := os.Create(dst) diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go index 100d16763..305d8c9c7 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go @@ -173,7 +173,7 @@ func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io. // If the blob has been successfully reused, returns (true, info, nil). // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. func (d *ociImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { - if !impl.OriginalBlobMatchesRequiredCompression(options) { + if !impl.OriginalCandidateMatchesTryReusingBlobOptions(options) { return false, private.ReusedBlob{}, nil } if info.Digest == "" { diff --git a/vendor/github.com/containers/image/v5/openshift/openshift_dest.go b/vendor/github.com/containers/image/v5/openshift/openshift_dest.go index 50a5339e1..656f4518d 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift_dest.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift_dest.go @@ -12,7 +12,6 @@ import ( "github.com/containers/image/v5/docker" "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/internal/blobinfocache" "github.com/containers/image/v5/internal/imagedestination" "github.com/containers/image/v5/internal/imagedestination/impl" "github.com/containers/image/v5/internal/imagedestination/stubs" @@ -128,8 +127,8 @@ func (d *openshiftImageDestination) PutBlobWithOptions(ctx context.Context, stre // It is available only if SupportsPutBlobPartial(). // Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller // should fall back to PutBlobWithOptions. -func (d *openshiftImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) { - return d.docker.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache) +func (d *openshiftImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) { + return d.docker.PutBlobPartial(ctx, chunkAccessor, srcInfo, options) } // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go index d00a0cdf8..228af90ca 100644 --- a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go +++ b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go @@ -335,7 +335,7 @@ func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobTo // reflected in the manifest that will be written. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. func (d *ostreeImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { - if !impl.OriginalBlobMatchesRequiredCompression(options) { + if !impl.OriginalCandidateMatchesTryReusingBlobOptions(options) { return false, private.ReusedBlob{}, nil } if d.repo == nil { diff --git a/vendor/github.com/containers/image/v5/pkg/blobcache/dest.go b/vendor/github.com/containers/image/v5/pkg/blobcache/dest.go index 9bda08515..663df11dd 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobcache/dest.go +++ b/vendor/github.com/containers/image/v5/pkg/blobcache/dest.go @@ -9,7 +9,6 @@ import ( "path/filepath" "sync" - "github.com/containers/image/v5/internal/blobinfocache" "github.com/containers/image/v5/internal/imagedestination" "github.com/containers/image/v5/internal/imagedestination/impl" "github.com/containers/image/v5/internal/private" @@ -227,8 +226,8 @@ func (d *blobCacheDestination) SupportsPutBlobPartial() bool { // It is available only if SupportsPutBlobPartial(). // Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller // should fall back to PutBlobWithOptions. -func (d *blobCacheDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) { - return d.destination.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache) +func (d *blobCacheDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) { + return d.destination.PutBlobPartial(ctx, chunkAccessor, srcInfo, options) } // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination @@ -237,7 +236,7 @@ func (d *blobCacheDestination) PutBlobPartial(ctx context.Context, chunkAccessor // If the blob has been successfully reused, returns (true, info, nil). // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. func (d *blobCacheDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { - if !impl.OriginalBlobMatchesRequiredCompression(options) { + if !impl.OriginalCandidateMatchesTryReusingBlobOptions(options) { return false, private.ReusedBlob{}, nil } present, reusedInfo, err := d.destination.TryReusingBlobWithOptions(ctx, info, options) diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go index 97562687c..470fca0c1 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go @@ -91,11 +91,11 @@ func min(a, b int) int { // destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with parameters for the // number of entries to limit for known and unknown location separately, only to make testing simpler. -// TODO: following function is not destructive any more in the nature instead priortized result is actually copies of the original +// TODO: following function is not destructive any more in the nature instead prioritized result is actually copies of the original // candidate set, so In future we might wanna re-name this public API and remove the destructive prefix. func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, totalLimit int, noLocationLimit int) []blobinfocache.BICReplacementCandidate2 { // split unknown candidates and known candidates - // and limit them seperately. + // and limit them separately. var knownLocationCandidates []CandidateWithTime var unknownLocationCandidates []CandidateWithTime // We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go index cfad16b2e..16193db95 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go @@ -184,7 +184,7 @@ func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types // CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known) that could possibly be reused // within the specified (transport scope) (if they still exist, which is not guaranteed). // -// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, +// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute, // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same // uncompressed digest. func (mem *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 { diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go index 2b446a61c..d8bde2fa0 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go @@ -171,7 +171,7 @@ func transaction[T any](sqc *cache, fn func(tx *sql.Tx) (T, error)) (T, error) { // dbTransaction calls fn within a read-write transaction in db. func dbTransaction[T any](db *sql.DB, fn func(tx *sql.Tx) (T, error)) (T, error) { - // Ideally we should be able to distinguish between read-only and read-write transactions, see the _txlock=exclusive dicussion. + // Ideally we should be able to distinguish between read-only and read-write transactions, see the _txlock=exclusive discussion. var zeroRes T // A zero value of T @@ -496,7 +496,7 @@ func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW // that could possibly be reused within the specified (transport scope) (if they still // exist, which is not guaranteed). // -// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if +// If !canSubstitute, the returned candidates will match the submitted digest exactly; if // canSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look // up variants of the blob which have the same uncompressed digest. // diff --git a/vendor/github.com/containers/image/v5/pkg/compression/compression.go b/vendor/github.com/containers/image/v5/pkg/compression/compression.go index 4443dda7f..b83a257e4 100644 --- a/vendor/github.com/containers/image/v5/pkg/compression/compression.go +++ b/vendor/github.com/containers/image/v5/pkg/compression/compression.go @@ -19,19 +19,19 @@ type Algorithm = types.Algorithm var ( // Gzip compression. - Gzip = internal.NewAlgorithm(types.GzipAlgorithmName, types.GzipAlgorithmName, + Gzip = internal.NewAlgorithm(types.GzipAlgorithmName, "", []byte{0x1F, 0x8B, 0x08}, GzipDecompressor, gzipCompressor) // Bzip2 compression. - Bzip2 = internal.NewAlgorithm(types.Bzip2AlgorithmName, types.Bzip2AlgorithmName, + Bzip2 = internal.NewAlgorithm(types.Bzip2AlgorithmName, "", []byte{0x42, 0x5A, 0x68}, Bzip2Decompressor, bzip2Compressor) // Xz compression. - Xz = internal.NewAlgorithm(types.XzAlgorithmName, types.XzAlgorithmName, + Xz = internal.NewAlgorithm(types.XzAlgorithmName, "", []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor, xzCompressor) // Zstd compression. - Zstd = internal.NewAlgorithm(types.ZstdAlgorithmName, types.ZstdAlgorithmName, + Zstd = internal.NewAlgorithm(types.ZstdAlgorithmName, "", []byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, zstdCompressor) - // ZstdChunked is a Zstd compression with chunk metadta which allows random access to individual files. - ZstdChunked = internal.NewAlgorithm(types.ZstdChunkedAlgorithmName, types.ZstdAlgorithmName, /* Note: InternalUnstableUndocumentedMIMEQuestionMark is not ZstdChunkedAlgorithmName */ + // ZstdChunked is a Zstd compression with chunk metadata which allows random access to individual files. + ZstdChunked = internal.NewAlgorithm(types.ZstdChunkedAlgorithmName, types.ZstdAlgorithmName, nil, ZstdDecompressor, compressor.ZstdCompressor) compressionAlgorithms = map[string]Algorithm{ diff --git a/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go b/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go index ba619be00..d6f85274d 100644 --- a/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go +++ b/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go @@ -12,23 +12,28 @@ type DecompressorFunc func(io.Reader) (io.ReadCloser, error) // Algorithm is a compression algorithm that can be used for CompressStream. type Algorithm struct { - name string - mime string - prefix []byte // Initial bytes of a stream compressed using this algorithm, or empty to disable detection. - decompressor DecompressorFunc - compressor CompressorFunc + name string + baseVariantName string + prefix []byte // Initial bytes of a stream compressed using this algorithm, or empty to disable detection. + decompressor DecompressorFunc + compressor CompressorFunc } // NewAlgorithm creates an Algorithm instance. +// nontrivialBaseVariantName is typically "". // This function exists so that Algorithm instances can only be created by code that // is allowed to import this internal subpackage. -func NewAlgorithm(name, mime string, prefix []byte, decompressor DecompressorFunc, compressor CompressorFunc) Algorithm { +func NewAlgorithm(name, nontrivialBaseVariantName string, prefix []byte, decompressor DecompressorFunc, compressor CompressorFunc) Algorithm { + baseVariantName := name + if nontrivialBaseVariantName != "" { + baseVariantName = nontrivialBaseVariantName + } return Algorithm{ - name: name, - mime: mime, - prefix: prefix, - decompressor: decompressor, - compressor: compressor, + name: name, + baseVariantName: baseVariantName, + prefix: prefix, + decompressor: decompressor, + compressor: compressor, } } @@ -37,10 +42,11 @@ func (c Algorithm) Name() string { return c.name } -// InternalUnstableUndocumentedMIMEQuestionMark ??? -// DO NOT USE THIS anywhere outside of c/image until it is properly documented. -func (c Algorithm) InternalUnstableUndocumentedMIMEQuestionMark() string { - return c.mime +// BaseVariantName returns the name of the “base variant” of the compression algorithm. +// It is either equal to Name() of the same algorithm, or equal to Name() of some other Algorithm (the “base variant”). +// This supports a single level of “is-a” relationship between compression algorithms, e.g. where "zstd:chunked" data is valid "zstd" data. +func (c Algorithm) BaseVariantName() string { + return c.baseVariantName } // AlgorithmCompressor returns the compressor field of algo. diff --git a/vendor/github.com/containers/image/v5/signature/fulcio_cert.go b/vendor/github.com/containers/image/v5/signature/fulcio_cert.go index ef5d3df6f..c11fa46a9 100644 --- a/vendor/github.com/containers/image/v5/signature/fulcio_cert.go +++ b/vendor/github.com/containers/image/v5/signature/fulcio_cert.go @@ -1,3 +1,6 @@ +//go:build !containers_image_fulcio_stub +// +build !containers_image_fulcio_stub + package signature import ( diff --git a/vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go b/vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go new file mode 100644 index 000000000..c0b48dafa --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go @@ -0,0 +1,28 @@ +//go:build containers_image_fulcio_stub +// +build containers_image_fulcio_stub + +package signature + +import ( + "crypto" + "crypto/ecdsa" + "crypto/x509" + "errors" +) + +type fulcioTrustRoot struct { + caCertificates *x509.CertPool + oidcIssuer string + subjectEmail string +} + +func (f *fulcioTrustRoot) validate() error { + return errors.New("fulcio disabled at compile-time") +} + +func verifyRekorFulcio(rekorPublicKey *ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte, + untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte, untrustedBase64Signature string, + untrustedPayloadBytes []byte) (crypto.PublicKey, error) { + return nil, errors.New("fulcio disabled at compile-time") + +} diff --git a/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go b/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go index d439b5f7a..50243da33 100644 --- a/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go +++ b/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go @@ -1,3 +1,6 @@ +//go:build !containers_image_rekor_stub +// +build !containers_image_rekor_stub + package internal import ( @@ -216,6 +219,10 @@ func VerifyRekorSET(publicKey *ecdsa.PublicKey, unverifiedRekorSET []byte, unver if hashedRekordV001.Data.Hash.Algorithm == nil { return time.Time{}, NewInvalidSignatureError(`Missing "data.hash.algorithm" field in hashedrekord`) } + // FIXME: Rekor 1.3.5 has added SHA-386 and SHA-512 as recognized values. + // Eventually we should support them as well; doing that cleanly would require updqating to Rekor 1.3.5, which requires Go 1.21. + // Short-term, Cosign (as of 2024-02 and Cosign 2.2.3) only produces and accepts SHA-256, so right now that’s not a compatibility + // issue. if *hashedRekordV001.Data.Hash.Algorithm != models.HashedrekordV001SchemaDataHashAlgorithmSha256 { return time.Time{}, NewInvalidSignatureError(fmt.Sprintf(`Unexpected "data.hash.algorithm" value %#v`, *hashedRekordV001.Data.Hash.Algorithm)) } diff --git a/vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go b/vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go new file mode 100644 index 000000000..7c121cc2e --- /dev/null +++ b/vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go @@ -0,0 +1,15 @@ +//go:build containers_image_rekor_stub +// +build containers_image_rekor_stub + +package internal + +import ( + "crypto/ecdsa" + "time" +) + +// VerifyRekorSET verifies that unverifiedRekorSET is correctly signed by publicKey and matches the rest of the data. +// Returns bundle upload time on success. +func VerifyRekorSET(publicKey *ecdsa.PublicKey, unverifiedRekorSET []byte, unverifiedKeyOrCertBytes []byte, unverifiedBase64Signature string, unverifiedPayloadBytes []byte) (time.Time, error) { + return time.Time{}, NewInvalidSignatureError("rekor disabled at compile-time") +} diff --git a/vendor/github.com/containers/image/v5/storage/storage_dest.go b/vendor/github.com/containers/image/v5/storage/storage_dest.go index 07e1d5e1f..c06d6c09f 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_dest.go +++ b/vendor/github.com/containers/image/v5/storage/storage_dest.go @@ -16,7 +16,6 @@ import ( "sync/atomic" "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/internal/blobinfocache" "github.com/containers/image/v5/internal/imagedestination/impl" "github.com/containers/image/v5/internal/imagedestination/stubs" "github.com/containers/image/v5/internal/private" @@ -55,41 +54,61 @@ type storageImageDestination struct { stubs.ImplementsPutBlobPartial stubs.AlwaysSupportsSignatures - imageRef storageReference - directory string // Temporary directory where we store blobs until Commit() time - nextTempFileID atomic.Int32 // A counter that we use for computing filenames to assign to blobs - manifest []byte // Manifest contents, temporary - manifestDigest digest.Digest // Valid if len(manifest) != 0 - signatures []byte // Signature contents, temporary - signatureses map[digest.Digest][]byte // Instance signature contents, temporary - SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice - SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // Sizes of each manifest's signature slice - - // A storage destination may be used concurrently. Accesses are - // serialized via a mutex. Please refer to the individual comments - // below for details. - lock sync.Mutex + imageRef storageReference + directory string // Temporary directory where we store blobs until Commit() time + nextTempFileID atomic.Int32 // A counter that we use for computing filenames to assign to blobs + manifest []byte // Manifest contents, temporary + manifestDigest digest.Digest // Valid if len(manifest) != 0 + untrustedDiffIDValues []digest.Digest // From config’s RootFS.DiffIDs, valid if not nil + signatures []byte // Signature contents, temporary + signatureses map[digest.Digest][]byte // Instance signature contents, temporary + metadata storageImageMetadata // Metadata contents being built + // Mapping from layer (by index) to the associated ID in the storage. // It's protected *implicitly* since `commitLayer()`, at any given // time, can only be executed by *one* goroutine. Please refer to // `queueOrCommit()` for further details on how the single-caller // guarantee is implemented. - indexToStorageID map[int]*string - // All accesses to below data are protected by `lock` which is made - // *explicit* in the code. - blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs - fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes - filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them - currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed) - indexToAddedLayerInfo map[int]addedLayerInfo // Mapping from layer (by index) to blob to add to the image - blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer - diffOutputs map[digest.Digest]*graphdriver.DriverWithDifferOutput // Mapping from digest to differ output + indexToStorageID map[int]string + + // A storage destination may be used concurrently, due to HasThreadSafePutBlob. + lock sync.Mutex // Protects lockProtected + lockProtected storageImageDestinationLockProtected +} + +// storageImageDestinationLockProtected contains storageImageDestination data which might be +// accessed concurrently, due to HasThreadSafePutBlob. +// _During the concurrent TryReusingBlob/PutBlob/* calls_ (but not necessarily during the final Commit) +// uses must hold storageImageDestination.lock. +type storageImageDestinationLockProtected struct { + currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed) + indexToAddedLayerInfo map[int]addedLayerInfo // Mapping from layer (by index) to blob to add to the image + + // In general, a layer is identified either by (compressed) digest, or by TOC digest. + // When creating a layer, the c/storage layer metadata and image IDs must _only_ be based on trusted values + // we have computed ourselves. (Layer reuse can then look up against such trusted values, but it might not + // recompute those values for incomding layers — the point of the reuse is that we don’t need to consume the incoming layer.) + + // Layer identification: For a layer, at least one of indexToTOCDigest and blobDiffIDs must be available before commitLayer is called. + // The presence of an indexToTOCDigest is what decides how the layer is identified, i.e. which fields must be trusted. + blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs + indexToTOCDigest map[int]digest.Digest // Mapping from layer index to a TOC Digest, IFF the layer was created/found/reused by TOC digest + + // Layer data: Before commitLayer is called, either at least one of (diffOutputs, blobAdditionalLayer, filenames) + // should be available; or indexToTOCDigest/blobDiffIDs should be enough to locate an existing c/storage layer. + // They are looked up in the order they are mentioned above. + diffOutputs map[int]*graphdriver.DriverWithDifferOutput // Mapping from layer index to a partially-pulled layer intermediate data + blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer + // Mapping from layer blobsums to names of files we used to hold them. If set, fileSizes and blobDiffIDs must also be set. + filenames map[digest.Digest]string + // Mapping from layer blobsums to their sizes. If set, filenames and blobDiffIDs must also be set. + fileSizes map[digest.Digest]int64 } // addedLayerInfo records data about a layer to use in this image. type addedLayerInfo struct { - digest digest.Digest - emptyLayer bool // The layer is an “empty”/“throwaway” one, and may or may not be physically represented in various transport / storage systems. false if the manifest type does not have the concept. + digest digest.Digest // Mandatory, the digest of the layer. + emptyLayer bool // The layer is an “empty”/“throwaway” one, and may or may not be physically represented in various transport / storage systems. false if the manifest type does not have the concept. } // newImageDestination sets us up to write a new image, caching blobs in a temporary directory until @@ -117,18 +136,23 @@ func newImageDestination(sys *types.SystemContext, imageRef storageReference) (* HasThreadSafePutBlob: true, }), - imageRef: imageRef, - directory: directory, - signatureses: make(map[digest.Digest][]byte), - blobDiffIDs: make(map[digest.Digest]digest.Digest), - blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer), - fileSizes: make(map[digest.Digest]int64), - filenames: make(map[digest.Digest]string), - SignatureSizes: []int{}, - SignaturesSizes: make(map[digest.Digest][]int), - indexToStorageID: make(map[int]*string), - indexToAddedLayerInfo: make(map[int]addedLayerInfo), - diffOutputs: make(map[digest.Digest]*graphdriver.DriverWithDifferOutput), + imageRef: imageRef, + directory: directory, + signatureses: make(map[digest.Digest][]byte), + metadata: storageImageMetadata{ + SignatureSizes: []int{}, + SignaturesSizes: make(map[digest.Digest][]int), + }, + indexToStorageID: make(map[int]string), + lockProtected: storageImageDestinationLockProtected{ + indexToAddedLayerInfo: make(map[int]addedLayerInfo), + blobDiffIDs: make(map[digest.Digest]digest.Digest), + indexToTOCDigest: make(map[int]digest.Digest), + diffOutputs: make(map[int]*graphdriver.DriverWithDifferOutput), + blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer), + filenames: make(map[digest.Digest]string), + fileSizes: make(map[digest.Digest]int64), + }, } dest.Compat = impl.AddCompat(dest) return dest, nil @@ -142,12 +166,13 @@ func (s *storageImageDestination) Reference() types.ImageReference { // Close cleans up the temporary directory and additional layer store handlers. func (s *storageImageDestination) Close() error { - for _, al := range s.blobAdditionalLayer { + // This is outside of the scope of HasThreadSafePutBlob, so we don’t need to hold s.lock. + for _, al := range s.lockProtected.blobAdditionalLayer { al.Release() } - for _, v := range s.diffOutputs { + for _, v := range s.lockProtected.diffOutputs { if v.Target != "" { - _ = s.imageRef.transport.store.CleanupStagingDirectory(v.Target) + _ = s.imageRef.transport.store.CleanupStagedLayer(v) } } return os.RemoveAll(s.directory) @@ -227,9 +252,9 @@ func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinf // Record information about the blob. s.lock.Lock() - s.blobDiffIDs[blobDigest] = diffID.Digest() - s.fileSizes[blobDigest] = counter.Count - s.filenames[blobDigest] = filename + s.lockProtected.blobDiffIDs[blobDigest] = diffID.Digest() + s.lockProtected.fileSizes[blobDigest] = counter.Count + s.lockProtected.filenames[blobDigest] = filename s.lock.Unlock() // This is safe because we have just computed diffID, and blobDigest was either computed // by us, or validated by the caller (usually copy.digestingReader). @@ -269,14 +294,14 @@ func (f *zstdFetcher) GetBlobAt(chunks []chunked.ImageSourceChunk) (chan io.Read // It is available only if SupportsPutBlobPartial(). // Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller // should fall back to PutBlobWithOptions. -func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) { +func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) { fetcher := zstdFetcher{ chunkAccessor: chunkAccessor, ctx: ctx, blobInfo: srcInfo, } - differ, err := chunked.GetDiffer(ctx, s.imageRef.transport.store, srcInfo.Size, srcInfo.Annotations, &fetcher) + differ, err := chunked.GetDiffer(ctx, s.imageRef.transport.store, srcInfo.Digest, srcInfo.Size, srcInfo.Annotations, &fetcher) if err != nil { return private.UploadedBlob{}, err } @@ -286,13 +311,25 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces return private.UploadedBlob{}, err } + if out.TOCDigest == "" && out.UncompressedDigest == "" { + return private.UploadedBlob{}, errors.New("internal error: ApplyDiffWithDiffer succeeded with neither TOCDigest nor UncompressedDigest set") + } + blobDigest := srcInfo.Digest s.lock.Lock() - s.blobDiffIDs[blobDigest] = blobDigest - s.fileSizes[blobDigest] = 0 - s.filenames[blobDigest] = "" - s.diffOutputs[blobDigest] = out + if out.UncompressedDigest != "" { + // The computation of UncompressedDigest means the whole layer has been consumed; while doing that, chunked.GetDiffer is + // responsible for ensuring blobDigest has been validated. + s.lockProtected.blobDiffIDs[blobDigest] = out.UncompressedDigest + } else { + // Don’t identify layers by TOC if UncompressedDigest is available. + // - Using UncompressedDigest allows image reuse with non-partially-pulled layers + // - If UncompressedDigest has been computed, that means the layer was read completely, and the TOC has been created from scratch. + // That TOC is quite unlikely to match with any other TOC value. + s.lockProtected.indexToTOCDigest[options.LayerIndex] = out.TOCDigest + } + s.lockProtected.diffOutputs[options.LayerIndex] = out s.lock.Unlock() return private.UploadedBlob{ @@ -307,7 +344,7 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces // If the blob has been successfully reused, returns (true, info, nil). // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { - if !impl.OriginalBlobMatchesRequiredCompression(options) { + if !impl.OriginalCandidateMatchesTryReusingBlobOptions(options) { return false, private.ReusedBlob{}, nil } reused, info, err := s.tryReusingBlobAsPending(blobinfo.Digest, blobinfo.Size, &options) @@ -321,68 +358,79 @@ func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, }) } -// tryReusingBlobAsPending implements TryReusingBlobWithOptions for (digest, size or -1), filling s.blobDiffIDs and other metadata. +// tryReusingBlobAsPending implements TryReusingBlobWithOptions for (blobDigest, size or -1), filling s.blobDiffIDs and other metadata. // The caller must arrange the blob to be eventually committed using s.commitLayer(). -func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest, size int64, options *private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { +func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Digest, size int64, options *private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { // lock the entire method as it executes fairly quickly s.lock.Lock() defer s.lock.Unlock() if options.SrcRef != nil { // Check if we have the layer in the underlying additional layer store. - aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(digest, options.SrcRef.String()) + aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(blobDigest, options.SrcRef.String()) if err != nil && !errors.Is(err, storage.ErrLayerUnknown) { - return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, digest, err) + return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, blobDigest, err) } else if err == nil { - // Record the uncompressed value so that we can use it to calculate layer IDs. - s.blobDiffIDs[digest] = aLayer.UncompressedDigest() - s.blobAdditionalLayer[digest] = aLayer + s.lockProtected.blobDiffIDs[blobDigest] = aLayer.UncompressedDigest() + s.lockProtected.blobAdditionalLayer[blobDigest] = aLayer return true, private.ReusedBlob{ - Digest: digest, + Digest: blobDigest, Size: aLayer.CompressedSize(), }, nil } } - if digest == "" { + if blobDigest == "" { return false, private.ReusedBlob{}, errors.New(`Can not check for a blob with unknown digest`) } - if err := digest.Validate(); err != nil { + if err := blobDigest.Validate(); err != nil { return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err) } + if options.TOCDigest != "" { + if err := options.TOCDigest.Validate(); err != nil { + return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err) + } + } + + // Check if we have a wasn't-compressed layer in storage that's based on that blob. // Check if we've already cached it in a file. - if size, ok := s.fileSizes[digest]; ok { + if size, ok := s.lockProtected.fileSizes[blobDigest]; ok { + // s.lockProtected.blobDiffIDs is set either by putBlobToPendingFile or in createNewLayer when creating the + // filenames/fileSizes entry. return true, private.ReusedBlob{ - Digest: digest, + Digest: blobDigest, Size: size, }, nil } - // Check if we have a wasn't-compressed layer in storage that's based on that blob. - layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(digest) + layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobDigest) if err != nil && !errors.Is(err, storage.ErrLayerUnknown) { - return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, digest, err) + return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, blobDigest, err) } if len(layers) > 0 { - // Save this for completeness. - s.blobDiffIDs[digest] = layers[0].UncompressedDigest + s.lockProtected.blobDiffIDs[blobDigest] = blobDigest return true, private.ReusedBlob{ - Digest: digest, + Digest: blobDigest, Size: layers[0].UncompressedSize, }, nil } // Check if we have a was-compressed layer in storage that's based on that blob. - layers, err = s.imageRef.transport.store.LayersByCompressedDigest(digest) + layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobDigest) if err != nil && !errors.Is(err, storage.ErrLayerUnknown) { - return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q: %w`, digest, err) + return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q: %w`, blobDigest, err) } if len(layers) > 0 { - // Record the uncompressed value so that we can use it to calculate layer IDs. - s.blobDiffIDs[digest] = layers[0].UncompressedDigest + // LayersByCompressedDigest only finds layers which were created from a full layer blob, and extracting that + // always sets UncompressedDigest. + diffID := layers[0].UncompressedDigest + if diffID == "" { + return false, private.ReusedBlob{}, fmt.Errorf("internal error: compressed layer %q (for compressed digest %q) does not have an uncompressed digest", layers[0].ID, blobDigest.String()) + } + s.lockProtected.blobDiffIDs[blobDigest] = diffID return true, private.ReusedBlob{ - Digest: digest, + Digest: blobDigest, Size: layers[0].CompressedSize, }, nil } @@ -391,23 +439,23 @@ func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest, // Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the // uncompressed layer, and that can happen only if options.CanSubstitute, or if the incoming manifest already specifies the size. if options.CanSubstitute || size != -1 { - if uncompressedDigest := options.Cache.UncompressedDigest(digest); uncompressedDigest != "" && uncompressedDigest != digest { + if uncompressedDigest := options.Cache.UncompressedDigest(blobDigest); uncompressedDigest != "" && uncompressedDigest != blobDigest { layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest) if err != nil && !errors.Is(err, storage.ErrLayerUnknown) { return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, uncompressedDigest, err) } if len(layers) > 0 { if size != -1 { - s.blobDiffIDs[digest] = layers[0].UncompressedDigest + s.lockProtected.blobDiffIDs[blobDigest] = uncompressedDigest return true, private.ReusedBlob{ - Digest: digest, + Digest: blobDigest, Size: size, }, nil } if !options.CanSubstitute { - return false, private.ReusedBlob{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blob with digest %s", digest) + return false, private.ReusedBlob{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blob with digest %s", blobDigest) } - s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest + s.lockProtected.blobDiffIDs[uncompressedDigest] = uncompressedDigest return true, private.ReusedBlob{ Digest: uncompressedDigest, Size: layers[0].UncompressedSize, @@ -416,6 +464,32 @@ func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest, } } + if options.TOCDigest != "" && options.LayerIndex != nil { + // Check if we have a chunked layer in storage with the same TOC digest. + layers, err := s.imageRef.transport.store.LayersByTOCDigest(options.TOCDigest) + + if err != nil && !errors.Is(err, storage.ErrLayerUnknown) { + return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with TOC digest %q: %w`, options.TOCDigest, err) + } + if len(layers) > 0 { + if size != -1 { + s.lockProtected.indexToTOCDigest[*options.LayerIndex] = options.TOCDigest + return true, private.ReusedBlob{ + Digest: blobDigest, + Size: size, + MatchedByTOCDigest: true, + }, nil + } else if options.CanSubstitute && layers[0].UncompressedDigest != "" { + s.lockProtected.indexToTOCDigest[*options.LayerIndex] = options.TOCDigest + return true, private.ReusedBlob{ + Digest: layers[0].UncompressedDigest, + Size: layers[0].UncompressedSize, + MatchedByTOCDigest: true, + }, nil + } + } + } + // Nope, we don't have it. return false, private.ReusedBlob{}, nil } @@ -425,6 +499,8 @@ func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest, // that since we don't have a recommendation, a random ID should be used if one needs // to be allocated. func (s *storageImageDestination) computeID(m manifest.Manifest) string { + // This is outside of the scope of HasThreadSafePutBlob, so we don’t need to hold s.lock. + // Build the diffID list. We need the decompressed sums that we've been calculating to // fill in the DiffIDs. It's expected (but not enforced by us) that the number of // diffIDs corresponds to the number of non-EmptyLayer entries in the history. @@ -438,24 +514,59 @@ func (s *storageImageDestination) computeID(m manifest.Manifest) string { continue } blobSum := m.FSLayers[i].BlobSum - diffID, ok := s.blobDiffIDs[blobSum] + diffID, ok := s.lockProtected.blobDiffIDs[blobSum] if !ok { + // this can, in principle, legitimately happen when a layer is reused by TOC. logrus.Infof("error looking up diffID for layer %q", blobSum.String()) return "" } diffIDs = append([]digest.Digest{diffID}, diffIDs...) } case *manifest.Schema2, *manifest.OCI1: - // We know the ID calculation for these formats doesn't actually use the diffIDs, - // so we don't need to populate the diffID list. + // We know the ID calculation doesn't actually use the diffIDs, so we don't need to populate + // the diffID list. default: return "" } - id, err := m.ImageID(diffIDs) + + // We want to use the same ID for “the same” images, but without risking unwanted sharing / malicious image corruption. + // + // Traditionally that means the same ~config digest, as computed by m.ImageID; + // but if we pull a layer by TOC, we verify the layer against neither the (compressed) blob digest in the manifest, + // nor against the config’s RootFS.DiffIDs. We don’t really want to do either, to allow partial layer pulls where we never see + // most of the data. + // + // So, if a layer is pulled by TOC (and we do validate against the TOC), the fact that we used the TOC, and the value of the TOC, + // must enter into the image ID computation. + // But for images where no TOC was used, continue to use IDs computed the traditional way, to maximize image reuse on upgrades, + // and to introduce the changed behavior only when partial pulls are used. + // + // Note that it’s not 100% guaranteed that an image pulled by TOC uses an OCI manifest; consider + // (skopeo copy --format v2s2 docker://…/zstd-chunked-image containers-storage:… ). So this is not happening only in the OCI case above. + ordinaryImageID, err := m.ImageID(diffIDs) if err != nil { return "" } - return id + tocIDInput := "" + hasLayerPulledByTOC := false + for i := range m.LayerInfos() { + layerValue := "" // An empty string is not a valid digest, so this is unambiguous with the TOC case. + tocDigest, ok := s.lockProtected.indexToTOCDigest[i] // "" if not a TOC + if ok { + hasLayerPulledByTOC = true + layerValue = tocDigest.String() + } + tocIDInput += layerValue + "|" // "|" can not be present in a TOC digest, so this is an unambiguous separator. + } + + if !hasLayerPulledByTOC { + return ordinaryImageID + } + // ordinaryImageID is a digest of a config, which is a JSON value. + // To avoid the risk of collisions, start the input with @ so that the input is not a valid JSON. + tocImageID := digest.FromString("@With TOC:" + tocIDInput).Hex() + logrus.Debugf("Ordinary storage image ID %s; a layer was looked up by TOC, so using image ID %s", ordinaryImageID, tocImageID) + return tocImageID } // getConfigBlob exists only to let us retrieve the configuration blob so that the manifest package can dig @@ -468,7 +579,7 @@ func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, er return nil, fmt.Errorf("invalid digest supplied when reading blob: %w", err) } // Assume it's a file, since we're only calling this from a place that expects to read files. - if filename, ok := s.filenames[info.Digest]; ok { + if filename, ok := s.lockProtected.filenames[info.Digest]; ok { contents, err2 := os.ReadFile(filename) if err2 != nil { return nil, fmt.Errorf(`reading blob from file %q: %w`, filename, err2) @@ -502,23 +613,23 @@ func (s *storageImageDestination) queueOrCommit(index int, info addedLayerInfo) // caller is the "worker" routine committing layers. All other routines // can continue pulling and queuing in layers. s.lock.Lock() - s.indexToAddedLayerInfo[index] = info + s.lockProtected.indexToAddedLayerInfo[index] = info // We're still waiting for at least one previous/parent layer to be // committed, so there's nothing to do. - if index != s.currentIndex { + if index != s.lockProtected.currentIndex { s.lock.Unlock() return nil } for { - info, ok := s.indexToAddedLayerInfo[index] + info, ok := s.lockProtected.indexToAddedLayerInfo[index] if !ok { break } s.lock.Unlock() // Note: commitLayer locks on-demand. - if err := s.commitLayer(index, info, -1); err != nil { + if stopQueue, err := s.commitLayer(index, info, -1); stopQueue || err != nil { return err } s.lock.Lock() @@ -527,185 +638,337 @@ func (s *storageImageDestination) queueOrCommit(index int, info addedLayerInfo) // Set the index at the very end to make sure that only one routine // enters stage 2). - s.currentIndex = index + s.lockProtected.currentIndex = index s.lock.Unlock() return nil } +// singleLayerIDComponent returns a single layer’s the input to computing a layer (chain) ID, +// and an indication whether the input already has the shape of a layer ID. +// It returns ("", false) if the layer is not found at all (which should never happen) +func (s *storageImageDestination) singleLayerIDComponent(layerIndex int, blobDigest digest.Digest) (string, bool) { + s.lock.Lock() + defer s.lock.Unlock() + + if d, found := s.lockProtected.indexToTOCDigest[layerIndex]; found { + return "@TOC=" + d.Hex(), false // "@" is not a valid start of a digest.Digest, so this is unambiguous. + } + + if d, found := s.lockProtected.blobDiffIDs[blobDigest]; found { + return d.Hex(), true // This looks like chain IDs, and it uses the traditional value. + } + return "", false +} + // commitLayer commits the specified layer with the given index to the storage. // size can usually be -1; it can be provided if the layer is not known to be already present in blobDiffIDs. // +// If the layer cannot be committed yet, the function returns (true, nil). +// // Note that the previous layer is expected to already be committed. // // Caution: this function must be called without holding `s.lock`. Callers // must guarantee that, at any given time, at most one goroutine may execute // `commitLayer()`. -func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, size int64) error { +func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, size int64) (bool, error) { // Already committed? Return early. if _, alreadyCommitted := s.indexToStorageID[index]; alreadyCommitted { - return nil + return false, nil } // Start with an empty string or the previous layer ID. Note that // `s.indexToStorageID` can only be accessed by *one* goroutine at any // given time. Hence, we don't need to lock accesses. - var lastLayer string - if prev := s.indexToStorageID[index-1]; prev != nil { - lastLayer = *prev + var parentLayer string + if index != 0 { + prev, ok := s.indexToStorageID[index-1] + if !ok { + return false, fmt.Errorf("Internal error: commitLayer called with previous layer %d not committed yet", index-1) + } + parentLayer = prev } // Carry over the previous ID for empty non-base layers. if info.emptyLayer { - s.indexToStorageID[index] = &lastLayer - return nil + s.indexToStorageID[index] = parentLayer + return false, nil } // Check if there's already a layer with the ID that we'd give to the result of applying // this layer blob to its parent, if it has one, or the blob's hex value otherwise. - s.lock.Lock() - diffID, haveDiffID := s.blobDiffIDs[info.digest] - s.lock.Unlock() - if !haveDiffID { - // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(), - // or to even check if we had it. - // Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller + // The layerID refers either to the DiffID or the digest of the TOC. + layerIDComponent, layerIDComponentStandalone := s.singleLayerIDComponent(index, info.digest) + if layerIDComponent == "" { + // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob() / TryReusingBlob() / … + // + // Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache: a caller // that relies on using a blob digest that has never been seen by the store had better call // TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only // so far we are going to accommodate that (if we should be doing that at all). - logrus.Debugf("looking for diffID for blob %+v", info.digest) + // + // We are also ignoring lookups by TOC, and other non-trivial situations. + // Those can only happen using the c/image/internal/private API, + // so those internal callers should be fixed to follow the API instead of expanding this fallback. + logrus.Debugf("looking for diffID for blob=%+v", info.digest) + // Use tryReusingBlobAsPending, not the top-level TryReusingBlobWithOptions, to prevent recursion via queueOrCommit. has, _, err := s.tryReusingBlobAsPending(info.digest, size, &private.TryReusingBlobOptions{ Cache: none.NoCache, CanSubstitute: false, }) if err != nil { - return fmt.Errorf("checking for a layer based on blob %q: %w", info.digest.String(), err) + return false, fmt.Errorf("checking for a layer based on blob %q: %w", info.digest.String(), err) } if !has { - return fmt.Errorf("error determining uncompressed digest for blob %q", info.digest.String()) + return false, fmt.Errorf("error determining uncompressed digest for blob %q", info.digest.String()) } - diffID, haveDiffID = s.blobDiffIDs[info.digest] - if !haveDiffID { - return fmt.Errorf("we have blob %q, but don't know its uncompressed digest", info.digest.String()) + + layerIDComponent, layerIDComponentStandalone = s.singleLayerIDComponent(index, info.digest) + if layerIDComponent == "" { + return false, fmt.Errorf("we have blob %q, but don't know its layer ID", info.digest.String()) } } - id := diffID.Hex() - if lastLayer != "" { - id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffID.Hex())).Hex() + + id := layerIDComponent + if !layerIDComponentStandalone || parentLayer != "" { + id = digest.Canonical.FromString(parentLayer + "+" + layerIDComponent).Hex() } if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil { // There's already a layer that should have the right contents, just reuse it. - lastLayer = layer.ID - s.indexToStorageID[index] = &lastLayer - return nil + s.indexToStorageID[index] = layer.ID + return false, nil } + layer, err := s.createNewLayer(index, info.digest, parentLayer, id) + if err != nil { + return false, err + } + if layer == nil { + return true, nil + } + s.indexToStorageID[index] = layer.ID + return false, nil +} + +// createNewLayer creates a new layer newLayerID for (index, layerDigest) on top of parentLayer (which may be ""). +// If the layer cannot be committed yet, the function returns (nil, nil). +func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.Digest, parentLayer, newLayerID string) (*storage.Layer, error) { s.lock.Lock() - diffOutput, ok := s.diffOutputs[info.digest] + diffOutput, ok := s.lockProtected.diffOutputs[index] s.lock.Unlock() if ok { - layer, err := s.imageRef.transport.store.CreateLayer(id, lastLayer, nil, "", false, nil) - if err != nil { - return err + var untrustedUncompressedDigest digest.Digest + if diffOutput.UncompressedDigest == "" { + d, err := s.untrustedLayerDiffID(index) + if err != nil { + return nil, err + } + if d == "" { + logrus.Debugf("Skipping commit for layer %q, manifest not yet available", newLayerID) + return nil, nil + } + untrustedUncompressedDigest = d } - // FIXME: what to do with the uncompressed digest? - diffOutput.UncompressedDigest = info.digest - - if err := s.imageRef.transport.store.ApplyDiffFromStagingDirectory(layer.ID, diffOutput.Target, diffOutput, nil); err != nil { - _ = s.imageRef.transport.store.Delete(layer.ID) - return err + flags := make(map[string]interface{}) + if untrustedUncompressedDigest != "" { + flags[expectedLayerDiffIDFlag] = untrustedUncompressedDigest + logrus.Debugf("Setting uncompressed digest to %q for layer %q", untrustedUncompressedDigest, newLayerID) } - s.indexToStorageID[index] = &layer.ID - return nil + args := storage.ApplyStagedLayerOptions{ + ID: newLayerID, + ParentLayer: parentLayer, + + DiffOutput: diffOutput, + DiffOptions: &graphdriver.ApplyDiffWithDifferOpts{ + Flags: flags, + }, + } + layer, err := s.imageRef.transport.store.ApplyStagedLayer(args) + if err != nil && !errors.Is(err, storage.ErrDuplicateID) { + return nil, fmt.Errorf("failed to put layer using a partial pull: %w", err) + } + return layer, nil } s.lock.Lock() - al, ok := s.blobAdditionalLayer[info.digest] + al, ok := s.lockProtected.blobAdditionalLayer[layerDigest] s.lock.Unlock() if ok { - layer, err := al.PutAs(id, lastLayer, nil) + layer, err := al.PutAs(newLayerID, parentLayer, nil) if err != nil && !errors.Is(err, storage.ErrDuplicateID) { - return fmt.Errorf("failed to put layer from digest and labels: %w", err) + return nil, fmt.Errorf("failed to put layer from digest and labels: %w", err) } - lastLayer = layer.ID - s.indexToStorageID[index] = &lastLayer - return nil + return layer, nil } // Check if we previously cached a file with that blob's contents. If we didn't, // then we need to read the desired contents from a layer. + var trustedUncompressedDigest, trustedOriginalDigest digest.Digest // For storage.LayerOptions s.lock.Lock() - filename, ok := s.filenames[info.digest] + tocDigest := s.lockProtected.indexToTOCDigest[index] // "" if not set + optionalDiffID := s.lockProtected.blobDiffIDs[layerDigest] // "" if not set + filename, gotFilename := s.lockProtected.filenames[layerDigest] s.lock.Unlock() - if !ok { - // Try to find the layer with contents matching that blobsum. - layer := "" - layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(diffID) - if err2 == nil && len(layers) > 0 { - layer = layers[0].ID + if gotFilename && tocDigest == "" { + // If tocDigest != "", if we now happen to find a layerDigest match, the newLayerID has already been computed as TOC-based, + // and we don't know the relationship of the layerDigest and TOC digest. + // We could recompute newLayerID to be DiffID-based and use the file, but such a within-image layer + // reuse is expected to be pretty rare; instead, ignore the unexpected file match and proceed to the + // originally-planned TOC match. + + // Because tocDigest == "", optionaldiffID must have been set; and even if it weren’t, PutLayer will recompute the digest from the stream. + trustedUncompressedDigest = optionalDiffID + trustedOriginalDigest = layerDigest // The code setting .filenames[layerDigest] is responsible for the contents matching. + } else { + // Try to find the layer with contents matching the data we use. + var layer *storage.Layer // = nil + if tocDigest != "" { + layers, err2 := s.imageRef.transport.store.LayersByTOCDigest(tocDigest) + if err2 == nil && len(layers) > 0 { + layer = &layers[0] + } else { + return nil, fmt.Errorf("locating layer for TOC digest %q: %w", tocDigest, err2) + } } else { - layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(info.digest) + // Because tocDigest == "", optionaldiffID must have been set + layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(optionalDiffID) if err2 == nil && len(layers) > 0 { - layer = layers[0].ID + layer = &layers[0] + } else { + layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(layerDigest) + if err2 == nil && len(layers) > 0 { + layer = &layers[0] + } + } + if layer == nil { + return nil, fmt.Errorf("locating layer for blob %q: %w", layerDigest, err2) } - } - if layer == "" { - return fmt.Errorf("locating layer for blob %q: %w", info.digest, err2) } // Read the layer's contents. noCompression := archive.Uncompressed diffOptions := &storage.DiffOptions{ Compression: &noCompression, } - diff, err2 := s.imageRef.transport.store.Diff("", layer, diffOptions) + diff, err2 := s.imageRef.transport.store.Diff("", layer.ID, diffOptions) if err2 != nil { - return fmt.Errorf("reading layer %q for blob %q: %w", layer, info.digest, err2) + return nil, fmt.Errorf("reading layer %q for blob %q: %w", layer.ID, layerDigest, err2) } // Copy the layer diff to a file. Diff() takes a lock that it holds // until the ReadCloser that it returns is closed, and PutLayer() wants // the same lock, so the diff can't just be directly streamed from one // to the other. filename = s.computeNextBlobCacheFile() - file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600) + file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0o600) if err != nil { diff.Close() - return fmt.Errorf("creating temporary file %q: %w", filename, err) + return nil, fmt.Errorf("creating temporary file %q: %w", filename, err) } // Copy the data to the file. // TODO: This can take quite some time, and should ideally be cancellable using // ctx.Done(). - _, err = io.Copy(file, diff) + fileSize, err := io.Copy(file, diff) diff.Close() file.Close() if err != nil { - return fmt.Errorf("storing blob to file %q: %w", filename, err) + return nil, fmt.Errorf("storing blob to file %q: %w", filename, err) + } + + if optionalDiffID == "" && layer.UncompressedDigest != "" { + optionalDiffID = layer.UncompressedDigest + } + // The stream we have is uncompressed, this matches contents of the stream. + // If tocDigest != "", trustedUncompressedDigest might still be ""; in that case PutLayer will compute the value from the stream. + trustedUncompressedDigest = optionalDiffID + // FIXME? trustedOriginalDigest could be set to layerDigest IF tocDigest == "" (otherwise layerDigest is untrusted). + // But for c/storage to reasonably use it (as a CompressedDigest value), we should also ensure the CompressedSize of the created + // layer is correct, and the API does not currently make it possible (.CompressedSize is set from the input stream). + // + // We can legitimately set storage.LayerOptions.OriginalDigest to "", + // but that would just result in PutLayer computing the digest of the input stream == optionalDiffID. + // So, instead, set .OriginalDigest to the value we know already, to avoid that digest computation. + trustedOriginalDigest = optionalDiffID + + // Allow using the already-collected layer contents without extracting the layer again. + // + // This only matches against the uncompressed digest. + // We don’t have the original compressed data here to trivially set filenames[layerDigest]. + // In particular we can’t achieve the correct Layer.CompressedSize value with the current c/storage API. + // Within-image layer reuse is probably very rare, for now we prefer to avoid that complexity. + if trustedUncompressedDigest != "" { + s.lock.Lock() + s.lockProtected.blobDiffIDs[trustedUncompressedDigest] = trustedUncompressedDigest + s.lockProtected.filenames[trustedUncompressedDigest] = filename + s.lockProtected.fileSizes[trustedUncompressedDigest] = fileSize + s.lock.Unlock() } - // Make sure that we can find this file later, should we need the layer's - // contents again. - s.lock.Lock() - s.filenames[info.digest] = filename - s.lock.Unlock() } // Read the cached blob and use it as a diff. file, err := os.Open(filename) if err != nil { - return fmt.Errorf("opening file %q: %w", filename, err) + return nil, fmt.Errorf("opening file %q: %w", filename, err) } defer file.Close() // Build the new layer using the diff, regardless of where it came from. // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). - layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, &storage.LayerOptions{ - OriginalDigest: info.digest, - UncompressedDigest: diffID, + layer, _, err := s.imageRef.transport.store.PutLayer(newLayerID, parentLayer, nil, "", false, &storage.LayerOptions{ + OriginalDigest: trustedOriginalDigest, + UncompressedDigest: trustedUncompressedDigest, }, file) if err != nil && !errors.Is(err, storage.ErrDuplicateID) { - return fmt.Errorf("adding layer with blob %q: %w", info.digest, err) + return nil, fmt.Errorf("adding layer with blob %q: %w", layerDigest, err) } + return layer, nil +} - s.indexToStorageID[index] = &layer.ID - return nil +// untrustedLayerDiffID returns a DiffID value for layerIndex from the image’s config. +// If the value is not yet available (but it can be available after s.manifets is set), it returns ("", nil). +// WARNING: We don’t validate the DiffID value against the layer contents; it must not be used for any deduplication. +func (s *storageImageDestination) untrustedLayerDiffID(layerIndex int) (digest.Digest, error) { + // At this point, we are either inside the multi-threaded scope of HasThreadSafePutBlob, and + // nothing is writing to s.manifest yet, or PutManifest has been called and s.manifest != nil. + // Either way this function does not need the protection of s.lock. + if s.manifest == nil { + logrus.Debugf("Skipping commit for layer %d, manifest not yet available", layerIndex) + return "", nil + } + + if s.untrustedDiffIDValues == nil { + mt := manifest.GuessMIMEType(s.manifest) + if mt != imgspecv1.MediaTypeImageManifest { + // We could, in principle, build an ImageSource, support arbitrary image formats using image.FromUnparsedImage, + // and then use types.Image.OCIConfig so that we can parse the image. + // + // In practice, this should, right now, only matter for pulls of OCI images (this code path implies that a layer has annotation), + // while converting to a non-OCI formats, using a manual (skopeo copy) or something similar, not (podman pull). + // So it is not implemented yet. + return "", fmt.Errorf("determining DiffID for manifest type %q is not yet supported", mt) + } + man, err := manifest.FromBlob(s.manifest, mt) + if err != nil { + return "", fmt.Errorf("parsing manifest: %w", err) + } + + cb, err := s.getConfigBlob(man.ConfigInfo()) + if err != nil { + return "", err + } + + // retrieve the expected uncompressed digest from the config blob. + configOCI := &imgspecv1.Image{} + if err := json.Unmarshal(cb, configOCI); err != nil { + return "", err + } + s.untrustedDiffIDValues = slices.Clone(configOCI.RootFS.DiffIDs) + if s.untrustedDiffIDValues == nil { // Unlikely but possible in theory… + s.untrustedDiffIDValues = []digest.Digest{} + } + } + if layerIndex >= len(s.untrustedDiffIDValues) { + return "", fmt.Errorf("image config has only %d DiffID values, but a layer with index %d exists", len(s.untrustedDiffIDValues), layerIndex) + } + return s.untrustedDiffIDValues[layerIndex], nil } // Commit marks the process of storing the image as successful and asks for the image to be persisted. @@ -716,6 +979,8 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si // - Uploaded data MAY be visible to others before Commit() is called // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { + // This function is outside of the scope of HasThreadSafePutBlob, so we don’t need to hold s.lock. + if len(s.manifest) == 0 { return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()") } @@ -752,20 +1017,22 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t // Extract, commit, or find the layers. for i, blob := range layerBlobs { - if err := s.commitLayer(i, addedLayerInfo{ + if stopQueue, err := s.commitLayer(i, addedLayerInfo{ digest: blob.Digest, emptyLayer: blob.EmptyLayer, }, blob.Size); err != nil { return err + } else if stopQueue { + return fmt.Errorf("Internal error: storageImageDestination.Commit(): commitLayer() not ready to commit for layer %q", blob.Digest) } } var lastLayer string - if len(layerBlobs) > 0 { // Can happen when using caches - prev := s.indexToStorageID[len(layerBlobs)-1] - if prev == nil { + if len(layerBlobs) > 0 { // Zero-layer images rarely make sense, but it is technically possible, and may happen for non-image artifacts. + prev, ok := s.indexToStorageID[len(layerBlobs)-1] + if !ok { return fmt.Errorf("Internal error: storageImageDestination.Commit(): previous layer %d hasn't been committed (lastLayer == nil)", len(layerBlobs)-1) } - lastLayer = *prev + lastLayer = prev } // If one of those blobs was a configuration blob, then we can try to dig out the date when the image @@ -779,14 +1046,14 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t // Set up to save the non-layer blobs as data items. Since we only share layers, they should all be in files, so // we just need to screen out the ones that are actually layers to get the list of non-layers. dataBlobs := set.New[digest.Digest]() - for blob := range s.filenames { + for blob := range s.lockProtected.filenames { dataBlobs.Add(blob) } for _, layerBlob := range layerBlobs { dataBlobs.Delete(layerBlob.Digest) } for _, blob := range dataBlobs.Values() { - v, err := os.ReadFile(s.filenames[blob]) + v, err := os.ReadFile(s.lockProtected.filenames[blob]) if err != nil { return fmt.Errorf("copying non-layer blob %q to image: %w", blob, err) } @@ -839,7 +1106,7 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t } // Set up to save our metadata. - metadata, err := json.Marshal(s) + metadata, err := json.Marshal(s.metadata) if err != nil { return fmt.Errorf("encoding metadata for image: %w", err) } @@ -944,7 +1211,7 @@ func (s *storageImageDestination) PutSignaturesWithFormat(ctx context.Context, s } if instanceDigest == nil { s.signatures = sigblob - s.SignatureSizes = sizes + s.metadata.SignatureSizes = sizes if len(s.manifest) > 0 { manifestDigest := s.manifestDigest instanceDigest = &manifestDigest @@ -952,7 +1219,7 @@ func (s *storageImageDestination) PutSignaturesWithFormat(ctx context.Context, s } if instanceDigest != nil { s.signatureses[*instanceDigest] = sigblob - s.SignaturesSizes[*instanceDigest] = sizes + s.metadata.SignaturesSizes[*instanceDigest] = sizes } return nil } diff --git a/vendor/github.com/containers/image/v5/storage/storage_image.go b/vendor/github.com/containers/image/v5/storage/storage_image.go index ac09f3dbb..eed846eb9 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_image.go +++ b/vendor/github.com/containers/image/v5/storage/storage_image.go @@ -18,11 +18,6 @@ var ( ErrNoSuchImage = storage.ErrNotAnImage ) -type storageImageCloser struct { - types.ImageCloser - size int64 -} - // manifestBigDataKey returns a key suitable for recording a manifest with the specified digest using storage.Store.ImageBigData and related functions. // If a specific manifest digest is explicitly requested by the user, the key returned by this function should be used preferably; // for compatibility, if a manifest is not available under this key, check also storage.ImageDigestBigDataKey @@ -36,6 +31,17 @@ func signatureBigDataKey(digest digest.Digest) string { return "signature-" + digest.Encoded() } +// storageImageMetadata is stored, as JSON, in storage.Image.Metadata +type storageImageMetadata struct { + SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice + SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // Sizes of each manifest's signature slice +} + +type storageImageCloser struct { + types.ImageCloser + size int64 +} + // Size() returns the previously-computed size of the image, with no error. func (s *storageImageCloser) Size() (int64, error) { return s.size, nil diff --git a/vendor/github.com/containers/image/v5/storage/storage_src.go b/vendor/github.com/containers/image/v5/storage/storage_src.go index f1ce0861e..ead3300f3 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_src.go +++ b/vendor/github.com/containers/image/v5/storage/storage_src.go @@ -34,16 +34,30 @@ type storageImageSource struct { impl.PropertyMethodsInitialize stubs.NoGetBlobAtInitialize - imageRef storageReference - image *storage.Image - systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files - layerPosition map[digest.Digest]int // Where we are in reading a blob's layers - cachedManifest []byte // A cached copy of the manifest, if already known, or nil - getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions - SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice - SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // List of sizes of each signature slice + imageRef storageReference + image *storage.Image + systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files + metadata storageImageMetadata + cachedManifest []byte // A cached copy of the manifest, if already known, or nil + getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions + getBlobMutexProtected getBlobMutexProtected } +// getBlobMutexProtected contains storageImageSource data protected by getBlobMutex. +type getBlobMutexProtected struct { + // digestToLayerID is a lookup map from a possibly-untrusted uncompressed layer digest (as returned by LayerInfosForCopy) to the + // layer ID in the store. + digestToLayerID map[digest.Digest]string + + // layerPosition stores where we are in reading a blob's layers + layerPosition map[digest.Digest]int +} + +// expectedLayerDiffIDFlag is a per-layer flag containing an UNTRUSTED uncompressed digest of the layer. +// It is set when pulling a layer by TOC; later, this value is used with digestToLayerID +// to allow identifying the layer — and the consumer is expected to verify the blob returned by GetBlob against the digest. +const expectedLayerDiffIDFlag = "expected-layer-diffid" + // newImageSource sets up an image for reading. func newImageSource(sys *types.SystemContext, imageRef storageReference) (*storageImageSource, error) { // First, locate the image. @@ -59,16 +73,21 @@ func newImageSource(sys *types.SystemContext, imageRef storageReference) (*stora }), NoGetBlobAtInitialize: stubs.NoGetBlobAt(imageRef), - imageRef: imageRef, - systemContext: sys, - image: img, - layerPosition: make(map[digest.Digest]int), - SignatureSizes: []int{}, - SignaturesSizes: make(map[digest.Digest][]int), + imageRef: imageRef, + systemContext: sys, + image: img, + metadata: storageImageMetadata{ + SignatureSizes: []int{}, + SignaturesSizes: make(map[digest.Digest][]int), + }, + getBlobMutexProtected: getBlobMutexProtected{ + digestToLayerID: make(map[digest.Digest]string), + layerPosition: make(map[digest.Digest]int), + }, } image.Compat = impl.AddCompat(image) if img.Metadata != "" { - if err := json.Unmarshal([]byte(img.Metadata), image); err != nil { + if err := json.Unmarshal([]byte(img.Metadata), &image.metadata); err != nil { return nil, fmt.Errorf("decoding metadata for source image: %w", err) } } @@ -91,6 +110,7 @@ func (s *storageImageSource) Close() error { func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) { // We need a valid digest value. digest := info.Digest + err = digest.Validate() if err != nil { return nil, 0, err @@ -100,10 +120,25 @@ func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, c return io.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil } - // Check if the blob corresponds to a diff that was used to initialize any layers. Our - // callers should try to retrieve layers using their uncompressed digests, so no need to - // check if they're using one of the compressed digests, which we can't reproduce anyway. - layers, _ := s.imageRef.transport.store.LayersByUncompressedDigest(digest) + var layers []storage.Layer + + // This lookup path is strictly necessary for layers identified by TOC digest + // (where LayersByUncompressedDigest might not find our layer); + // for other layers it is an optimization to avoid the cost of the LayersByUncompressedDigest call. + s.getBlobMutex.Lock() + layerID, found := s.getBlobMutexProtected.digestToLayerID[digest] + s.getBlobMutex.Unlock() + + if found { + if layer, err := s.imageRef.transport.store.Layer(layerID); err == nil { + layers = []storage.Layer{*layer} + } + } else { + // Check if the blob corresponds to a diff that was used to initialize any layers. Our + // callers should try to retrieve layers using their uncompressed digests, so no need to + // check if they're using one of the compressed digests, which we can't reproduce anyway. + layers, _ = s.imageRef.transport.store.LayersByUncompressedDigest(digest) + } // If it's not a layer, then it must be a data item. if len(layers) == 0 { @@ -174,8 +209,8 @@ func (s *storageImageSource) getBlobAndLayerID(digest digest.Digest, layers []st // which claim to have the same contents, that we actually do have multiple layers, otherwise we could // just go ahead and use the first one every time. s.getBlobMutex.Lock() - i := s.layerPosition[digest] - s.layerPosition[digest] = i + 1 + i := s.getBlobMutexProtected.layerPosition[digest] + s.getBlobMutexProtected.layerPosition[digest] = i + 1 s.getBlobMutex.Unlock() if len(layers) > 0 { layer = layers[i%len(layers)] @@ -267,14 +302,35 @@ func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDige if err != nil { return nil, fmt.Errorf("reading layer %q in image %q: %w", layerID, s.image.ID, err) } - if layer.UncompressedDigest == "" { - return nil, fmt.Errorf("uncompressed digest for layer %q is unknown", layerID) - } if layer.UncompressedSize < 0 { return nil, fmt.Errorf("uncompressed size for layer %q is unknown", layerID) } + + blobDigest := layer.UncompressedDigest + if blobDigest == "" { + if layer.TOCDigest == "" { + return nil, fmt.Errorf("uncompressed digest and TOC digest for layer %q is unknown", layerID) + } + if layer.Flags == nil || layer.Flags[expectedLayerDiffIDFlag] == nil { + return nil, fmt.Errorf("TOC digest %q for layer %q is present but %q flag is not set", layer.TOCDigest, layerID, expectedLayerDiffIDFlag) + } + expectedDigest, ok := layer.Flags[expectedLayerDiffIDFlag].(string) + if !ok { + return nil, fmt.Errorf("TOC digest %q for layer %q is present but %q flag is not a string", layer.TOCDigest, layerID, expectedLayerDiffIDFlag) + } + // If the layer is stored by its TOC, report the expected diffID as the layer Digest; + // the generic code is responsible for validating the digest. + // We can locate the layer without further c/storage help using s.getBlobMutexProtected.digestToLayerID. + blobDigest, err = digest.Parse(expectedDigest) + if err != nil { + return nil, fmt.Errorf("parsing expected diffID %q for layer %q: %w", expectedDigest, layerID, err) + } + } + s.getBlobMutex.Lock() + s.getBlobMutexProtected.digestToLayerID[blobDigest] = layer.ID + s.getBlobMutex.Unlock() blobInfo := types.BlobInfo{ - Digest: layer.UncompressedDigest, + Digest: blobDigest, Size: layer.UncompressedSize, MediaType: uncompressedLayerType, } @@ -324,11 +380,11 @@ func buildLayerInfosForCopy(manifestInfos []manifest.LayerInfo, physicalInfos [] func (s *storageImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { var offset int signatureBlobs := []byte{} - signatureSizes := s.SignatureSizes + signatureSizes := s.metadata.SignatureSizes key := "signatures" instance := "default instance" if instanceDigest != nil { - signatureSizes = s.SignaturesSizes[*instanceDigest] + signatureSizes = s.metadata.SignaturesSizes[*instanceDigest] key = signatureBigDataKey(*instanceDigest) instance = instanceDigest.Encoded() } @@ -374,7 +430,7 @@ func (s *storageImageSource) getSize() (int64, error) { sum += bigSize } // Add the signature sizes. - for _, sigSize := range s.SignatureSizes { + for _, sigSize := range s.metadata.SignatureSizes { sum += int64(sigSize) } // Walk the layer list. @@ -384,7 +440,7 @@ func (s *storageImageSource) getSize() (int64, error) { if err != nil { return -1, err } - if layer.UncompressedDigest == "" || layer.UncompressedSize < 0 { + if (layer.TOCDigest == "" && layer.UncompressedDigest == "") || layer.UncompressedSize < 0 { return -1, fmt.Errorf("size for layer %q is unknown, failing getSize()", layerID) } sum += layer.UncompressedSize diff --git a/vendor/github.com/containers/image/v5/storage/storage_transport.go b/vendor/github.com/containers/image/v5/storage/storage_transport.go index deb500b4d..b981953ad 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_transport.go +++ b/vendor/github.com/containers/image/v5/storage/storage_transport.go @@ -213,7 +213,7 @@ func (s *storageTransport) GetStore() (storage.Store, error) { // Return the transport's previously-set store. If we don't have one // of those, initialize one now. if s.store == nil { - options, err := storage.DefaultStoreOptionsAutoDetectUID() + options, err := storage.DefaultStoreOptions() if err != nil { return nil, err } diff --git a/vendor/github.com/containers/image/v5/types/types.go b/vendor/github.com/containers/image/v5/types/types.go index 180a98c5b..7d6097346 100644 --- a/vendor/github.com/containers/image/v5/types/types.go +++ b/vendor/github.com/containers/image/v5/types/types.go @@ -135,8 +135,8 @@ type BlobInfo struct { // CompressionOperation is used in Image.UpdateLayerInfos to instruct // whether the original layer's "compressed or not" should be preserved, // possibly while changing the compression algorithm from one to another, - // or if it should be compressed or decompressed. The field defaults to - // preserve the original layer's compressedness. + // or if it should be changed to compressed or decompressed. + // The field defaults to preserve the original layer's compressedness. // TODO: To remove together with CryptoOperation in re-design to remove // field out of BlobInfo. CompressionOperation LayerCompression diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go index b24ee881a..a14c8e2ff 100644 --- a/vendor/github.com/containers/image/v5/version/version.go +++ b/vendor/github.com/containers/image/v5/version/version.go @@ -6,9 +6,9 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 5 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 29 + VersionMinor = 30 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 2 + VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. VersionDev = "" diff --git a/vendor/github.com/containers/podman/v4/libpod/networking_linux.go b/vendor/github.com/containers/podman/v4/libpod/networking_linux.go deleted file mode 100644 index 601d9a49b..000000000 --- a/vendor/github.com/containers/podman/v4/libpod/networking_linux.go +++ /dev/null @@ -1,772 +0,0 @@ -//go:build !remote -// +build !remote - -package libpod - -import ( - "crypto/rand" - "crypto/sha256" - "errors" - "fmt" - "net" - "os" - "path/filepath" - "strconv" - "strings" - "syscall" - - "github.com/containernetworking/plugins/pkg/ns" - "github.com/containers/common/libnetwork/resolvconf" - "github.com/containers/common/libnetwork/slirp4netns" - "github.com/containers/common/libnetwork/types" - netUtil "github.com/containers/common/libnetwork/util" - "github.com/containers/common/pkg/netns" - "github.com/containers/common/pkg/util" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/rootless" - "github.com/containers/podman/v4/utils" - "github.com/containers/storage/pkg/lockfile" - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/opencontainers/selinux/go-selinux/label" - "github.com/sirupsen/logrus" - "github.com/vishvananda/netlink" - "golang.org/x/sys/unix" -) - -const ( - // rootlessNetNsName is the file name for the rootless network namespace bind mount - rootlessNetNsName = "rootless-netns" - - // rootlessNetNsSilrp4netnsPidFile is the name of the rootless netns slirp4netns pid file - rootlessNetNsSilrp4netnsPidFile = "rootless-netns-slirp4netns.pid" - - // persistentCNIDir is the directory where the CNI files are stored - persistentCNIDir = "/var/lib/cni" -) - -type RootlessNetNS struct { - ns ns.NetNS - dir string - Lock *lockfile.LockFile -} - -// getPath will join the given path to the rootless netns dir -func (r *RootlessNetNS) getPath(path string) string { - return filepath.Join(r.dir, path) -} - -// Do - run the given function in the rootless netns. -// It does not lock the rootlessCNI lock, the caller -// should only lock when needed, e.g. for network operations. -func (r *RootlessNetNS) Do(toRun func() error) error { - err := r.ns.Do(func(_ ns.NetNS) error { - // Before we can run the given function, - // we have to set up all mounts correctly. - - // The order of the mounts is IMPORTANT. - // The idea of the extra mount ns is to make /run and /var/lib/cni writeable - // for the cni plugins but not affecting the podman user namespace. - // Because the plugins also need access to XDG_RUNTIME_DIR/netns some special setup is needed. - - // The following bind mounts are needed - // 1. XDG_RUNTIME_DIR -> XDG_RUNTIME_DIR/rootless-netns/XDG_RUNTIME_DIR - // 2. /run/systemd -> XDG_RUNTIME_DIR/rootless-netns/run/systemd (only if it exists) - // 3. XDG_RUNTIME_DIR/rootless-netns/resolv.conf -> /etc/resolv.conf or XDG_RUNTIME_DIR/rootless-netns/run/symlink/target - // 4. XDG_RUNTIME_DIR/rootless-netns/var/lib/cni -> /var/lib/cni (if /var/lib/cni does not exist, use the parent dir) - // 5. XDG_RUNTIME_DIR/rootless-netns/run -> /run - - // Create a new mount namespace, - // this must happen inside the netns thread. - err := unix.Unshare(unix.CLONE_NEWNS) - if err != nil { - return fmt.Errorf("cannot create a new mount namespace: %w", err) - } - - xdgRuntimeDir, err := util.GetRuntimeDir() - if err != nil { - return fmt.Errorf("could not get runtime directory: %w", err) - } - newXDGRuntimeDir := r.getPath(xdgRuntimeDir) - // 1. Mount the netns into the new run to keep them accessible. - // Otherwise cni setup will fail because it cannot access the netns files. - err = unix.Mount(xdgRuntimeDir, newXDGRuntimeDir, "none", unix.MS_BIND|unix.MS_SHARED|unix.MS_REC, "") - if err != nil { - return fmt.Errorf("failed to mount runtime directory for rootless netns: %w", err) - } - - // 2. Also keep /run/systemd if it exists. - // Many files are symlinked into this dir, for example /dev/log. - runSystemd := "/run/systemd" - _, err = os.Stat(runSystemd) - if err == nil { - newRunSystemd := r.getPath(runSystemd) - err = unix.Mount(runSystemd, newRunSystemd, "none", unix.MS_BIND|unix.MS_REC, "") - if err != nil { - return fmt.Errorf("failed to mount /run/systemd directory for rootless netns: %w", err) - } - } - - // 3. On some distros /etc/resolv.conf is symlinked to somewhere under /run. - // Because the kernel will follow the symlink before mounting, it is not - // possible to mount a file at /etc/resolv.conf. We have to ensure that - // the link target will be available in the mount ns. - // see: https://github.com/containers/podman/issues/10855 - resolvePath := "/etc/resolv.conf" - linkCount := 0 - for i := 1; i < len(resolvePath); i++ { - // Do not use filepath.EvalSymlinks, we only want the first symlink under /run. - // If /etc/resolv.conf has more than one symlink under /run, e.g. - // -> /run/systemd/resolve/stub-resolv.conf -> /run/systemd/resolve/resolv.conf - // we would put the netns resolv.conf file to the last path. However this will - // break dns because the second link does not exist in the mount ns. - // see https://github.com/containers/podman/issues/11222 - // - // We also need to resolve all path components not just the last file. - // see https://github.com/containers/podman/issues/12461 - - if resolvePath[i] != '/' { - // if we are at the last char we need to inc i by one because there is no final slash - if i == len(resolvePath)-1 { - i++ - } else { - // not the end of path, keep going - continue - } - } - path := resolvePath[:i] - - fi, err := os.Lstat(path) - if err != nil { - return fmt.Errorf("failed to stat resolv.conf path: %w", err) - } - - // no link, just continue - if fi.Mode()&os.ModeSymlink == 0 { - continue - } - - link, err := os.Readlink(path) - if err != nil { - return fmt.Errorf("failed to read resolv.conf symlink: %w", err) - } - linkCount++ - if filepath.IsAbs(link) { - // link is as an absolute path - resolvePath = filepath.Join(link, resolvePath[i:]) - } else { - // link is as a relative, join it with the previous path - base := filepath.Dir(path) - resolvePath = filepath.Join(base, link, resolvePath[i:]) - } - // set i back to zero since we now have a new base path - i = 0 - - // we have to stop at the first path under /run because we will have an empty /run and will create the path anyway - // if we would continue we would need to recreate all links under /run - if strings.HasPrefix(resolvePath, "/run/") { - break - } - // make sure wo do not loop forever - if linkCount == 255 { - return errors.New("too many symlinks while resolving /etc/resolv.conf") - } - } - logrus.Debugf("The path of /etc/resolv.conf in the mount ns is %q", resolvePath) - // When /etc/resolv.conf on the host is a symlink to /run/systemd/resolve/stub-resolv.conf, - // we have to mount an empty filesystem on /run/systemd/resolve in the child namespace, - // so as to isolate the directory from the host mount namespace. - // - // Otherwise our bind-mount for /run/systemd/resolve/stub-resolv.conf is unmounted - // when systemd-resolved unlinks and recreates /run/systemd/resolve/stub-resolv.conf on the host. - // see: https://github.com/containers/podman/issues/10929 - if strings.HasPrefix(resolvePath, "/run/systemd/resolve/") { - rsr := r.getPath("/run/systemd/resolve") - err = unix.Mount("", rsr, define.TypeTmpfs, unix.MS_NOEXEC|unix.MS_NOSUID|unix.MS_NODEV, "") - if err != nil { - return fmt.Errorf("failed to mount tmpfs on %q for rootless netns: %w", rsr, err) - } - } - if strings.HasPrefix(resolvePath, "/run/") { - resolvePath = r.getPath(resolvePath) - err = os.MkdirAll(filepath.Dir(resolvePath), 0700) - if err != nil { - return fmt.Errorf("failed to create rootless-netns resolv.conf directory: %w", err) - } - // we want to bind mount on this file so we have to create the file first - _, err = os.OpenFile(resolvePath, os.O_CREATE|os.O_RDONLY, 0700) - if err != nil { - return fmt.Errorf("failed to create rootless-netns resolv.conf file: %w", err) - } - } - // mount resolv.conf to make use of the host dns - err = unix.Mount(r.getPath("resolv.conf"), resolvePath, "none", unix.MS_BIND, "") - if err != nil { - return fmt.Errorf("failed to mount resolv.conf for rootless netns: %w", err) - } - - // 4. CNI plugins need access to /var/lib/cni and /run - varDir := "" - varTarget := persistentCNIDir - // we can only mount to a target dir which exists, check /var/lib/cni recursively - // while we could always use /var there are cases where a user might store the cni - // configs under /var/custom and this would break - for { - if _, err := os.Stat(varTarget); err == nil { - varDir = r.getPath(varTarget) - break - } - varTarget = filepath.Dir(varTarget) - if varTarget == "/" { - break - } - } - if varDir == "" { - return errors.New("failed to stat /var directory") - } - // make sure to mount var first - err = unix.Mount(varDir, varTarget, "none", unix.MS_BIND, "") - if err != nil { - return fmt.Errorf("failed to mount %s for rootless netns: %w", varTarget, err) - } - - // 5. Mount the new prepared run dir to /run, it has to be recursive to keep the other bind mounts. - runDir := r.getPath("run") - err = unix.Mount(runDir, "/run", "none", unix.MS_BIND|unix.MS_REC, "") - if err != nil { - return fmt.Errorf("failed to mount /run for rootless netns: %w", err) - } - - // run the given function in the correct namespace - err = toRun() - return err - }) - return err -} - -// Clean up the rootless network namespace if needed. -// It checks if we have running containers with the bridge network mode. -// Cleanup() expects that r.Lock is locked -func (r *RootlessNetNS) Cleanup(runtime *Runtime) error { - _, err := os.Stat(r.dir) - if os.IsNotExist(err) { - // the directory does not exist, so no need for cleanup - return nil - } - activeNetns := func(c *Container) bool { - // no bridge => no need to check - if !c.config.NetMode.IsBridge() { - return false - } - - // we cannot use c.state() because it will try to lock the container - // locking is a problem because cleanup is called after net teardown - // at this stage the container is already locked. - // also do not try to lock only containers which are not currently in net - // teardown because this will result in an ABBA deadlock between the rootless - // rootless netns lock and the container lock - // because we need to get the state we have to sync otherwise this will not - // work because the state is empty by default - // I do not like this but I do not see a better way at moment - err := c.syncContainer() - if err != nil { - return false - } - - // only check for an active netns, we cannot use the container state - // because not running does not mean that the netns does not need cleanup - // only if the netns is empty we know that we do not need cleanup - return c.state.NetNS != "" - } - ctrs, err := runtime.GetContainers(false, activeNetns) - if err != nil { - return err - } - // no cleanup if we found no other containers with a netns - // we will always find one container (the container cleanup that is currently calling us) - if len(ctrs) > 1 { - return nil - } - logrus.Debug("Cleaning up rootless network namespace") - err = netns.UnmountNS(r.ns.Path()) - if err != nil { - return err - } - // make the following errors not fatal - err = r.ns.Close() - if err != nil { - logrus.Error(err) - } - b, err := os.ReadFile(r.getPath(rootlessNetNsSilrp4netnsPidFile)) - if err == nil { - var i int - i, err = strconv.Atoi(string(b)) - if err == nil { - // kill the slirp process so we do not leak it - err = syscall.Kill(i, syscall.SIGTERM) - } - } - if err != nil { - logrus.Errorf("Failed to kill slirp4netns process: %v", err) - } - err = os.RemoveAll(r.dir) - if err != nil { - logrus.Error(err) - } - return nil -} - -// GetRootlessNetNs returns the rootless netns object. If create is set to true -// the rootless network namespace will be created if it does not already exist. -// If called as root it returns always nil. -// On success the returned RootlessCNI lock is locked and must be unlocked by the caller. -func (r *Runtime) GetRootlessNetNs(new bool) (*RootlessNetNS, error) { - if !rootless.IsRootless() { - return nil, nil - } - var rootlessNetNS *RootlessNetNS - runDir := r.config.Engine.TmpDir - - lfile := filepath.Join(runDir, "rootless-netns.lock") - lock, err := lockfile.GetLockFile(lfile) - if err != nil { - return nil, fmt.Errorf("failed to get rootless-netns lockfile: %w", err) - } - lock.Lock() - defer func() { - // In case of an error (early exit) rootlessNetNS will be nil. - // Make sure to unlock otherwise we could deadlock. - if rootlessNetNS == nil { - lock.Unlock() - } - }() - - rootlessNetNsDir := filepath.Join(runDir, rootlessNetNsName) - err = os.MkdirAll(rootlessNetNsDir, 0700) - if err != nil { - return nil, fmt.Errorf("could not create rootless-netns directory: %w", err) - } - - nsDir, err := netns.GetNSRunDir() - if err != nil { - return nil, err - } - - // create a hash from the static dir - // the cleanup will check if there are running containers - // if you run a several libpod instances with different root/runroot directories this check will fail - // we want one netns for each libpod static dir so we use the hash to prevent name collisions - hash := sha256.Sum256([]byte(r.config.Engine.StaticDir)) - netnsName := fmt.Sprintf("%s-%x", rootlessNetNsName, hash[:10]) - - path := filepath.Join(nsDir, netnsName) - nsReference, err := ns.GetNS(path) - if err != nil { - if !new { - // return an error if we could not get the namespace and should no create one - return nil, fmt.Errorf("getting rootless network namespace: %w", err) - } - - // When the netns is not valid but the file exists we have to remove it first, - // https://github.com/containers/common/pull/1381 changed the behavior from - // NewNSWithName()so it will now error when the file already exists. - // https://github.com/containers/podman/issues/17903#issuecomment-1494329622 - if errors.As(err, &ns.NSPathNotNSErr{}) { - logrus.Infof("rootless netns is no longer valid: %v", err) - // ignore errors, if something is wrong NewNSWithName() will fail below anyway - _ = os.Remove(path) - } - - // create a new namespace - logrus.Debugf("creating rootless network namespace with name %q", netnsName) - nsReference, err = netns.NewNSWithName(netnsName) - if err != nil { - return nil, fmt.Errorf("creating rootless network namespace: %w", err) - } - res, err := slirp4netns.Setup(&slirp4netns.SetupOptions{ - Config: r.config, - ContainerID: "rootless-netns", - Netns: nsReference.Path(), - }) - if err != nil { - return nil, fmt.Errorf("failed to start rootless-netns slirp4netns: %w", err) - } - // create pid file for the slirp4netns process - // this is need to kill the process in the cleanup - pid := strconv.Itoa(res.Pid) - err = os.WriteFile(filepath.Join(rootlessNetNsDir, rootlessNetNsSilrp4netnsPidFile), []byte(pid), 0700) - if err != nil { - return nil, fmt.Errorf("unable to write rootless-netns slirp4netns pid file: %w", err) - } - - if utils.RunsOnSystemd() { - // move to systemd scope to prevent systemd from killing it - err = utils.MoveRootlessNetnsSlirpProcessToUserSlice(res.Pid) - if err != nil { - // only log this, it is not fatal but can lead to issues when running podman inside systemd units - logrus.Errorf("failed to move the rootless netns slirp4netns process to the systemd user.slice: %v", err) - } - } - - // build a new resolv.conf file which uses the slirp4netns dns server address - resolveIP, err := slirp4netns.GetDNS(res.Subnet) - if err != nil { - return nil, fmt.Errorf("failed to determine default slirp4netns DNS address: %w", err) - } - - if err := resolvconf.New(&resolvconf.Params{ - Path: filepath.Join(rootlessNetNsDir, "resolv.conf"), - // fake the netns since we want to filter localhost - Namespaces: []specs.LinuxNamespace{ - {Type: specs.NetworkNamespace}, - }, - IPv6Enabled: res.IPv6, - KeepHostServers: true, - Nameservers: []string{resolveIP.String()}, - }); err != nil { - return nil, fmt.Errorf("failed to create rootless netns resolv.conf: %w", err) - } - // create cni directories to store files - // they will be bind mounted to the correct location in an extra mount ns - err = os.MkdirAll(filepath.Join(rootlessNetNsDir, persistentCNIDir), 0700) - if err != nil { - return nil, fmt.Errorf("could not create rootless-netns var directory: %w", err) - } - runDir := filepath.Join(rootlessNetNsDir, "run") - err = os.MkdirAll(runDir, 0700) - if err != nil { - return nil, fmt.Errorf("could not create rootless-netns run directory: %w", err) - } - // relabel the new run directory to the iptables /run label - // this is important, otherwise the iptables command will fail - err = label.Relabel(runDir, "system_u:object_r:iptables_var_run_t:s0", false) - if err != nil { - return nil, fmt.Errorf("could not create relabel rootless-netns run directory: %w", err) - } - // create systemd run directory - err = os.MkdirAll(filepath.Join(runDir, "systemd"), 0700) - if err != nil { - return nil, fmt.Errorf("could not create rootless-netns systemd directory: %w", err) - } - // create the directory for the netns files at the same location - // relative to the rootless-netns location - err = os.MkdirAll(filepath.Join(rootlessNetNsDir, nsDir), 0700) - if err != nil { - return nil, fmt.Errorf("could not create rootless-netns netns directory: %w", err) - } - } - - // The CNI plugins and netavark need access to iptables in $PATH. As it turns out debian doesn't put - // /usr/sbin in $PATH for rootless users. This will break rootless networking completely. - // We might break existing users and we cannot expect everyone to change their $PATH so - // let's add /usr/sbin to $PATH ourselves. - path = os.Getenv("PATH") - if !strings.Contains(path, "/usr/sbin") { - path += ":/usr/sbin" - os.Setenv("PATH", path) - } - - // Important set rootlessNetNS as last step. - // Do not return any errors after this. - rootlessNetNS = &RootlessNetNS{ - ns: nsReference, - dir: rootlessNetNsDir, - Lock: lock, - } - return rootlessNetNS, nil -} - -// Create and configure a new network namespace for a container -func (r *Runtime) configureNetNS(ctr *Container, ctrNS string) (status map[string]types.StatusBlock, rerr error) { - if err := r.exposeMachinePorts(ctr.config.PortMappings); err != nil { - return nil, err - } - defer func() { - // make sure to unexpose the gvproxy ports when an error happens - if rerr != nil { - if err := r.unexposeMachinePorts(ctr.config.PortMappings); err != nil { - logrus.Errorf("failed to free gvproxy machine ports: %v", err) - } - } - }() - if ctr.config.NetMode.IsSlirp4netns() { - return nil, r.setupSlirp4netns(ctr, ctrNS) - } - if ctr.config.NetMode.IsPasta() { - return nil, r.setupPasta(ctr, ctrNS) - } - networks, err := ctr.networks() - if err != nil { - return nil, err - } - // All networks have been removed from the container. - // This is effectively forcing net=none. - if len(networks) == 0 { - return nil, nil - } - - netOpts := ctr.getNetworkOptions(networks) - netStatus, err := r.setUpNetwork(ctrNS, netOpts) - if err != nil { - return nil, err - } - defer func() { - // do not forget to tear down the netns when a later error happened. - if rerr != nil { - if err := r.teardownNetworkBackend(ctrNS, netOpts); err != nil { - logrus.Warnf("failed to teardown network after failed setup: %v", err) - } - } - }() - - // set up rootless port forwarder when rootless with ports and the network status is empty, - // if this is called from network reload the network status will not be empty and we should - // not set up port because they are still active - if rootless.IsRootless() && len(ctr.config.PortMappings) > 0 && ctr.getNetworkStatus() == nil { - // set up port forwarder for rootless netns - // TODO: support slirp4netns port forwarder as well - // make sure to fix this in container.handleRestartPolicy() as well - // Important we have to call this after r.setUpNetwork() so that - // we can use the proper netStatus - err = r.setupRootlessPortMappingViaRLK(ctr, ctrNS, netStatus) - } - return netStatus, err -} - -// Create and configure a new network namespace for a container -func (r *Runtime) createNetNS(ctr *Container) (n string, q map[string]types.StatusBlock, retErr error) { - ctrNS, err := netns.NewNS() - if err != nil { - return "", nil, fmt.Errorf("creating network namespace for container %s: %w", ctr.ID(), err) - } - defer func() { - if retErr != nil { - if err := netns.UnmountNS(ctrNS.Path()); err != nil { - logrus.Errorf("Unmounting partially created network namespace for container %s: %v", ctr.ID(), err) - } - if err := ctrNS.Close(); err != nil { - logrus.Errorf("Closing partially created network namespace for container %s: %v", ctr.ID(), err) - } - } - }() - - logrus.Debugf("Made network namespace at %s for container %s", ctrNS.Path(), ctr.ID()) - - var networkStatus map[string]types.StatusBlock - networkStatus, err = r.configureNetNS(ctr, ctrNS.Path()) - return ctrNS.Path(), networkStatus, err -} - -// Configure the network namespace using the container process -func (r *Runtime) setupNetNS(ctr *Container) error { - nsProcess := fmt.Sprintf("/proc/%d/ns/net", ctr.state.PID) - - b := make([]byte, 16) - - if _, err := rand.Reader.Read(b); err != nil { - return fmt.Errorf("failed to generate random netns name: %w", err) - } - nsPath, err := netns.GetNSRunDir() - if err != nil { - return err - } - nsPath = filepath.Join(nsPath, fmt.Sprintf("netns-%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:])) - - if err := os.MkdirAll(filepath.Dir(nsPath), 0711); err != nil { - return err - } - - mountPointFd, err := os.Create(nsPath) - if err != nil { - return err - } - if err := mountPointFd.Close(); err != nil { - return err - } - - if err := unix.Mount(nsProcess, nsPath, "none", unix.MS_BIND, ""); err != nil { - return fmt.Errorf("cannot mount %s: %w", nsPath, err) - } - - networkStatus, err := r.configureNetNS(ctr, nsPath) - - // Assign NetNS attributes to container - ctr.state.NetNS = nsPath - ctr.state.NetworkStatus = networkStatus - return err -} - -// Tear down a network namespace, undoing all state associated with it. -func (r *Runtime) teardownNetNS(ctr *Container) error { - if err := r.unexposeMachinePorts(ctr.config.PortMappings); err != nil { - // do not return an error otherwise we would prevent network cleanup - logrus.Errorf("failed to free gvproxy machine ports: %v", err) - } - - // Do not check the error here, we want to always umount the netns - // This will ensure that the container interface will be deleted - // even when there is a CNI or netavark bug. - prevErr := r.teardownNetwork(ctr) - - // First unmount the namespace - if err := netns.UnmountNS(ctr.state.NetNS); err != nil { - if prevErr != nil { - logrus.Error(prevErr) - } - return fmt.Errorf("unmounting network namespace for container %s: %w", ctr.ID(), err) - } - - ctr.state.NetNS = "" - - return prevErr -} - -func getContainerNetNS(ctr *Container) (string, *Container, error) { - if ctr.state.NetNS != "" { - return ctr.state.NetNS, nil, nil - } - if ctr.config.NetNsCtr != "" { - c, err := ctr.runtime.GetContainer(ctr.config.NetNsCtr) - if err != nil { - return "", nil, err - } - if err = c.syncContainer(); err != nil { - return "", c, err - } - netNs, c2, err := getContainerNetNS(c) - if c2 != nil { - c = c2 - } - return netNs, c, err - } - return "", nil, nil -} - -// TODO (5.0): return the statistics per network interface -// This would allow better compat with docker. -func getContainerNetIO(ctr *Container) (*netlink.LinkStatistics, error) { - var netStats *netlink.LinkStatistics - - netNSPath, otherCtr, netPathErr := getContainerNetNS(ctr) - if netPathErr != nil { - return nil, netPathErr - } - if netNSPath == "" { - // If netNSPath is empty, it was set as none, and no netNS was set up - // this is a valid state and thus return no error, nor any statistics - return nil, nil - } - - netMode := ctr.config.NetMode - netStatus := ctr.getNetworkStatus() - if otherCtr != nil { - netMode = otherCtr.config.NetMode - netStatus = otherCtr.getNetworkStatus() - } - if netMode.IsSlirp4netns() { - // create a fake status with correct interface name for the logic below - netStatus = map[string]types.StatusBlock{ - "slirp4netns": { - Interfaces: map[string]types.NetInterface{"tap0": {}}, - }, - } - } - err := ns.WithNetNSPath(netNSPath, func(_ ns.NetNS) error { - for _, status := range netStatus { - for dev := range status.Interfaces { - link, err := netlink.LinkByName(dev) - if err != nil { - return err - } - if netStats == nil { - netStats = link.Attrs().Statistics - continue - } - // Currently only Tx/RxBytes are used. - // In the future we should return all stats per interface so that - // api users have a better options. - stats := link.Attrs().Statistics - netStats.TxBytes += stats.TxBytes - netStats.RxBytes += stats.RxBytes - } - } - return nil - }) - return netStats, err -} - -// joinedNetworkNSPath returns netns path and bool if netns was set -func (c *Container) joinedNetworkNSPath() (string, bool) { - for _, namespace := range c.config.Spec.Linux.Namespaces { - if namespace.Type == specs.NetworkNamespace { - return namespace.Path, true - } - } - return "", false -} - -func (c *Container) inspectJoinedNetworkNS(networkns string) (q types.StatusBlock, retErr error) { - var result types.StatusBlock - err := ns.WithNetNSPath(networkns, func(_ ns.NetNS) error { - ifaces, err := net.Interfaces() - if err != nil { - return err - } - routes, err := netlink.RouteList(nil, netlink.FAMILY_ALL) - if err != nil { - return err - } - var gateway net.IP - for _, route := range routes { - // default gateway - if route.Dst == nil { - gateway = route.Gw - } - } - result.Interfaces = make(map[string]types.NetInterface) - for _, iface := range ifaces { - if iface.Flags&net.FlagLoopback != 0 { - continue - } - addrs, err := iface.Addrs() - if err != nil { - continue - } - if len(addrs) == 0 { - continue - } - subnets := make([]types.NetAddress, 0, len(addrs)) - for _, address := range addrs { - if ipnet, ok := address.(*net.IPNet); ok { - if ipnet.IP.IsLinkLocalMulticast() || ipnet.IP.IsLinkLocalUnicast() { - continue - } - subnet := types.NetAddress{ - IPNet: types.IPNet{ - IPNet: *ipnet, - }, - } - if ipnet.Contains(gateway) { - subnet.Gateway = gateway - } - subnets = append(subnets, subnet) - } - } - result.Interfaces[iface.Name] = types.NetInterface{ - Subnets: subnets, - MacAddress: types.HardwareAddr(iface.HardwareAddr), - } - } - return nil - }) - return result, err -} - -func getPastaIP(state *ContainerState) (net.IP, error) { - var ip string - err := ns.WithNetNSPath(state.NetNS, func(_ ns.NetNS) error { - // get the first ip in the netns - ip = netUtil.GetLocalIP() - return nil - }) - return net.ParseIP(ip), err -} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/manifests/types.go b/vendor/github.com/containers/podman/v4/pkg/bindings/manifests/types.go deleted file mode 100644 index c9e14b123..000000000 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/manifests/types.go +++ /dev/null @@ -1,75 +0,0 @@ -package manifests - -// InspectOptions are optional options for inspecting manifests -// -//go:generate go run ../generator/generator.go InspectOptions -type InspectOptions struct { - // Authfile - path to an authentication file. - Authfile *string - // SkipTLSVerify - skip https and certificate validation when - // contacting container registries. - SkipTLSVerify *bool -} - -// CreateOptions are optional options for creating manifests -// -//go:generate go run ../generator/generator.go CreateOptions -type CreateOptions struct { - All *bool - Amend *bool -} - -// ExistsOptions are optional options for checking -// if a manifest list exists -// -//go:generate go run ../generator/generator.go ExistsOptions -type ExistsOptions struct { -} - -// AddOptions are optional options for adding manifest lists -// -//go:generate go run ../generator/generator.go AddOptions -type AddOptions struct { - All *bool - Annotation map[string]string - Arch *string - Features []string - Images []string - OS *string - OSVersion *string - Variant *string - Authfile *string - Password *string - Username *string - SkipTLSVerify *bool `schema:"-"` -} - -// RemoveOptions are optional options for removing manifest lists -// -//go:generate go run ../generator/generator.go RemoveOptions -type RemoveOptions struct { -} - -// ModifyOptions are optional options for modifying manifest lists -// -//go:generate go run ../generator/generator.go ModifyOptions -type ModifyOptions struct { - // Operation values are "update", "remove" and "annotate". This allows the service to - // efficiently perform each update on a manifest list. - Operation *string - All *bool // All when true, operate on all images in a manifest list that may be included in Images - Annotations map[string]string // Annotations to add to manifest list - Arch *string // Arch overrides the architecture for the image - Features []string // Feature list for the image - Images []string // Images is an optional list of images to add/remove to/from manifest list depending on operation - OS *string // OS overrides the operating system for the image - // OS features for the image - OSFeatures []string `json:"os_features" schema:"os_features"` - // OSVersion overrides the operating system for the image - OSVersion *string `json:"os_version" schema:"os_version"` - Variant *string // Variant overrides the operating system variant for the image - Authfile *string - Password *string - Username *string - SkipTLSVerify *bool `schema:"-"` -} diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/manifest.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/manifest.go deleted file mode 100644 index 819266980..000000000 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/manifest.go +++ /dev/null @@ -1,122 +0,0 @@ -package entities - -import "github.com/containers/image/v5/types" - -// ManifestCreateOptions provides model for creating manifest -type ManifestCreateOptions struct { - // True when adding lists to include all images - All bool `schema:"all"` - // Amend an extant list if there's already one with the desired name - Amend bool `schema:"amend"` - // Should TLS registry certificate be verified? - SkipTLSVerify types.OptionalBool `json:"-" schema:"-"` -} - -// ManifestInspectOptions provides model for inspecting manifest -type ManifestInspectOptions struct { - // Path to an authentication file. - Authfile string `json:"-" schema:"-"` - // Should TLS registry certificate be verified? - SkipTLSVerify types.OptionalBool `json:"-" schema:"-"` -} - -// ManifestAddOptions provides model for adding digests to manifest list -// -// swagger:model -type ManifestAddOptions struct { - ManifestAnnotateOptions - // True when operating on a list to include all images - All bool `json:"all" schema:"all"` - // authfile to use when pushing manifest list - Authfile string `json:"-" schema:"-"` - // Home directory for certificates when pushing a manifest list - CertDir string `json:"-" schema:"-"` - // Password to authenticate to registry when pushing manifest list - Password string `json:"-" schema:"-"` - // Should TLS registry certificate be verified? - SkipTLSVerify types.OptionalBool `json:"-" schema:"-"` - // Username to authenticate to registry when pushing manifest list - Username string `json:"-" schema:"-"` - // Images is an optional list of images to add to manifest list - Images []string `json:"images" schema:"images"` -} - -// ManifestAnnotateOptions provides model for annotating manifest list -type ManifestAnnotateOptions struct { - // Annotation to add to manifest list - Annotation []string `json:"annotation" schema:"annotation"` - // Annotations to add to manifest list by a map which is prefferred over Annotation - Annotations map[string]string `json:"annotations" schema:"annotations"` - // Arch overrides the architecture for the image - Arch string `json:"arch" schema:"arch"` - // Feature list for the image - Features []string `json:"features" schema:"features"` - // OS overrides the operating system for the image - OS string `json:"os" schema:"os"` - // OS features for the image - OSFeatures []string `json:"os_features" schema:"os_features"` - // OSVersion overrides the operating system for the image - OSVersion string `json:"os_version" schema:"os_version"` - // Variant for the image - Variant string `json:"variant" schema:"variant"` -} - -// ManifestModifyOptions provides the model for mutating a manifest -// -// swagger 2.0 does not support oneOf for schema validation. -// -// Operation "update" uses all fields. -// Operation "remove" uses fields: Operation and Images -// Operation "annotate" uses fields: Operation and Annotations -// -// swagger:model -type ManifestModifyOptions struct { - Operation string `json:"operation" schema:"operation"` // Valid values: update, remove, annotate - ManifestAddOptions - ManifestRemoveOptions -} - -// ManifestPushReport provides the model for the pushed manifest -// -// swagger:model -type ManifestPushReport struct { - // ID of the pushed manifest - ID string `json:"Id"` - // Stream used to provide push progress - Stream string `json:"stream,omitempty"` - // Error contains text of errors from pushing - Error string `json:"error,omitempty"` -} - -// ManifestRemoveOptions provides the model for removing digests from a manifest -// -// swagger:model -type ManifestRemoveOptions struct { -} - -// ManifestRemoveReport provides the model for the removed manifest -// -// swagger:model -type ManifestRemoveReport struct { - // Deleted manifest list. - Deleted []string `json:",omitempty"` - // Untagged images. Can be longer than Deleted. - Untagged []string `json:",omitempty"` - // Errors associated with operation - Errors []string `json:",omitempty"` - // ExitCode describes the exit codes as described in the `podman rmi` - // man page. - ExitCode int -} - -// ManifestModifyReport provides the model for removed digests and changed manifest -// -// swagger:model -type ManifestModifyReport struct { - // Manifest List ID - ID string `json:"Id"` - // Images to removed from manifest list, otherwise not provided. - Images []string `json:"images,omitempty" schema:"images"` - // Errors associated with operation - Errors []error `json:"errors,omitempty"` -} diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/types/types.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/types/types.go deleted file mode 100644 index 7dc785078..000000000 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/types/types.go +++ /dev/null @@ -1,29 +0,0 @@ -// copied from github.com/docker/docker/api/types -package types - -// ComponentVersion describes the version information for a specific component. -type ComponentVersion struct { - Name string - Version string - Details map[string]string `json:",omitempty"` -} - -// Version contains response of Engine API: -// GET "/version" -type Version struct { - Platform struct{ Name string } `json:",omitempty"` - Components []ComponentVersion `json:",omitempty"` - - // The following fields are deprecated, they relate to the Engine component and are kept for backwards compatibility - - Version string - APIVersion string `json:"ApiVersion"` - MinAPIVersion string `json:"MinAPIVersion,omitempty"` - GitCommit string - GoVersion string - Os string - Arch string - KernelVersion string `json:",omitempty"` - Experimental bool `json:",omitempty"` - BuildTime string `json:",omitempty"` -} diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/system_freebsd.go b/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/system_freebsd.go deleted file mode 100644 index 33ccebbb3..000000000 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/system_freebsd.go +++ /dev/null @@ -1,4 +0,0 @@ -package abi - -// Default path for system runtime state -const defaultRunPath = "/var/run" diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/system_linux.go b/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/system_linux.go deleted file mode 100644 index 6a13f0705..000000000 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/system_linux.go +++ /dev/null @@ -1,4 +0,0 @@ -package abi - -// Default path for system runtime state -const defaultRunPath = "/run" diff --git a/vendor/github.com/containers/podman/v4/pkg/util/utils_supported.go b/vendor/github.com/containers/podman/v4/pkg/util/utils_supported.go deleted file mode 100644 index 406d56ce6..000000000 --- a/vendor/github.com/containers/podman/v4/pkg/util/utils_supported.go +++ /dev/null @@ -1,120 +0,0 @@ -//go:build !windows -// +build !windows - -package util - -// TODO once rootless function is consolidated under libpod, we -// should work to take darwin from this - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "strconv" - "syscall" - - "github.com/containers/podman/v4/pkg/rootless" - "github.com/sirupsen/logrus" -) - -// GetRuntimeDir returns the runtime directory -func GetRuntimeDir() (string, error) { - var rootlessRuntimeDirError error - - if !rootless.IsRootless() { - return "", nil - } - - rootlessRuntimeDirOnce.Do(func() { - runtimeDir := os.Getenv("XDG_RUNTIME_DIR") - - if runtimeDir != "" { - rootlessRuntimeDir, rootlessRuntimeDirError = filepath.EvalSymlinks(runtimeDir) - return - } - - uid := strconv.Itoa(rootless.GetRootlessUID()) - if runtimeDir == "" { - tmpDir := filepath.Join("/run", "user", uid) - if err := os.MkdirAll(tmpDir, 0700); err != nil { - logrus.Debug(err) - } - st, err := os.Stat(tmpDir) - if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && (st.Mode().Perm()&0700 == 0700) { - runtimeDir = tmpDir - } - } - if runtimeDir == "" { - tmpDir := filepath.Join(os.TempDir(), fmt.Sprintf("podman-run-%s", uid)) - if err := os.MkdirAll(tmpDir, 0700); err != nil { - logrus.Debug(err) - } - st, err := os.Stat(tmpDir) - if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && (st.Mode().Perm()&0700 == 0700) { - runtimeDir = tmpDir - } - } - if runtimeDir == "" { - home := os.Getenv("HOME") - if home == "" { - rootlessRuntimeDirError = errors.New("neither XDG_RUNTIME_DIR nor HOME was set non-empty") - return - } - resolvedHome, err := filepath.EvalSymlinks(home) - if err != nil { - rootlessRuntimeDirError = fmt.Errorf("cannot resolve %s: %w", home, err) - return - } - runtimeDir = filepath.Join(resolvedHome, "rundir") - } - rootlessRuntimeDir = runtimeDir - }) - - if rootlessRuntimeDirError != nil { - return "", rootlessRuntimeDirError - } - return rootlessRuntimeDir, nil -} - -// GetRootlessConfigHomeDir returns the config home directory when running as non root -func GetRootlessConfigHomeDir() (string, error) { - var rootlessConfigHomeDirError error - - rootlessConfigHomeDirOnce.Do(func() { - cfgHomeDir := os.Getenv("XDG_CONFIG_HOME") - if cfgHomeDir == "" { - home := os.Getenv("HOME") - resolvedHome, err := filepath.EvalSymlinks(home) - if err != nil { - rootlessConfigHomeDirError = fmt.Errorf("cannot resolve %s: %w", home, err) - return - } - tmpDir := filepath.Join(resolvedHome, ".config") - st, err := os.Stat(tmpDir) - if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && st.Mode().Perm() >= 0700 { - cfgHomeDir = tmpDir - } - } - rootlessConfigHomeDir = cfgHomeDir - }) - - if rootlessConfigHomeDirError != nil { - return "", rootlessConfigHomeDirError - } - - return rootlessConfigHomeDir, nil -} - -// GetRootlessPauseProcessPidPath returns the path to the file that holds the pid for -// the pause process. -func GetRootlessPauseProcessPidPath() (string, error) { - runtimeDir, err := GetRuntimeDir() - if err != nil { - return "", err - } - // Note this path must be kept in sync with pkg/rootless/rootless_linux.go - // We only want a single pause process per user, so we do not want to use - // the tmpdir which can be changed via --tmpdir. - return filepath.Join(runtimeDir, "libpod", "tmp", "pause.pid"), nil -} diff --git a/vendor/github.com/containers/podman/v4/utils/utils_supported.go b/vendor/github.com/containers/podman/v4/utils/utils_supported.go deleted file mode 100644 index 3bbd5dbbd..000000000 --- a/vendor/github.com/containers/podman/v4/utils/utils_supported.go +++ /dev/null @@ -1,205 +0,0 @@ -//go:build linux || darwin || freebsd -// +build linux darwin freebsd - -package utils - -import ( - "bufio" - "bytes" - "context" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/containers/common/pkg/cgroups" - "github.com/containers/podman/v4/pkg/rootless" - systemdDbus "github.com/coreos/go-systemd/v22/dbus" - "github.com/godbus/dbus/v5" - "github.com/sirupsen/logrus" -) - -// RunUnderSystemdScope adds the specified pid to a systemd scope -func RunUnderSystemdScope(pid int, slice string, unitName string) error { - var properties []systemdDbus.Property - var conn *systemdDbus.Conn - var err error - - if rootless.IsRootless() { - conn, err = cgroups.UserConnection(rootless.GetRootlessUID()) - if err != nil { - return err - } - } else { - conn, err = systemdDbus.NewWithContext(context.Background()) - if err != nil { - return err - } - } - defer conn.Close() - properties = append(properties, systemdDbus.PropSlice(slice)) - properties = append(properties, newProp("PIDs", []uint32{uint32(pid)})) - properties = append(properties, newProp("Delegate", true)) - properties = append(properties, newProp("DefaultDependencies", false)) - ch := make(chan string) - _, err = conn.StartTransientUnitContext(context.Background(), unitName, "replace", properties, ch) - if err != nil { - // On errors check if the cgroup already exists, if it does move the process there - if props, err := conn.GetUnitTypePropertiesContext(context.Background(), unitName, "Scope"); err == nil { - if cgroup, ok := props["ControlGroup"].(string); ok && cgroup != "" { - if err := MoveUnderCgroup(cgroup, "", []uint32{uint32(pid)}); err == nil { - return nil - } - // On errors return the original error message we got from StartTransientUnit. - } - } - return err - } - - // Block until job is started - <-ch - - return nil -} - -func getCgroupProcess(procFile string, allowRoot bool) (string, error) { - f, err := os.Open(procFile) - if err != nil { - return "", err - } - defer f.Close() - - scanner := bufio.NewScanner(f) - cgroup := "" - for scanner.Scan() { - line := scanner.Text() - parts := strings.SplitN(line, ":", 3) - if len(parts) != 3 { - return "", fmt.Errorf("cannot parse cgroup line %q", line) - } - if strings.HasPrefix(line, "0::") { - cgroup = line[3:] - break - } - if len(parts[2]) > len(cgroup) { - cgroup = parts[2] - } - } - if len(cgroup) == 0 || (!allowRoot && cgroup == "/") { - return "", fmt.Errorf("could not find cgroup mount in %q", procFile) - } - return cgroup, nil -} - -// GetOwnCgroup returns the cgroup for the current process. -func GetOwnCgroup() (string, error) { - return getCgroupProcess("/proc/self/cgroup", true) -} - -func GetOwnCgroupDisallowRoot() (string, error) { - return getCgroupProcess("/proc/self/cgroup", false) -} - -// GetCgroupProcess returns the cgroup for the specified process process. -func GetCgroupProcess(pid int) (string, error) { - return getCgroupProcess(fmt.Sprintf("/proc/%d/cgroup", pid), true) -} - -// MoveUnderCgroupSubtree moves the PID under a cgroup subtree. -func MoveUnderCgroupSubtree(subtree string) error { - return MoveUnderCgroup("", subtree, nil) -} - -// MoveUnderCgroup moves a group of processes to a new cgroup. -// If cgroup is the empty string, then the current calling process cgroup is used. -// If processes is empty, then the processes from the current cgroup are moved. -func MoveUnderCgroup(cgroup, subtree string, processes []uint32) error { - procFile := "/proc/self/cgroup" - f, err := os.Open(procFile) - if err != nil { - return err - } - defer f.Close() - - unifiedMode, err := cgroups.IsCgroup2UnifiedMode() - if err != nil { - return err - } - - scanner := bufio.NewScanner(f) - for scanner.Scan() { - line := scanner.Text() - parts := strings.SplitN(line, ":", 3) - if len(parts) != 3 { - return fmt.Errorf("cannot parse cgroup line %q", line) - } - - // root cgroup, skip it - if parts[2] == "/" && !(unifiedMode && parts[1] == "") { - continue - } - - cgroupRoot := "/sys/fs/cgroup" - // Special case the unified mount on hybrid cgroup and named hierarchies. - // This works on Fedora 31, but we should really parse the mounts to see - // where the cgroup hierarchy is mounted. - if parts[1] == "" && !unifiedMode { - // If it is not using unified mode, the cgroup v2 hierarchy is - // usually mounted under /sys/fs/cgroup/unified - cgroupRoot = filepath.Join(cgroupRoot, "unified") - - // Ignore the unified mount if it doesn't exist - if _, err := os.Stat(cgroupRoot); err != nil && os.IsNotExist(err) { - continue - } - } else if parts[1] != "" { - // Assume the controller is mounted at /sys/fs/cgroup/$CONTROLLER. - controller := strings.TrimPrefix(parts[1], "name=") - cgroupRoot = filepath.Join(cgroupRoot, controller) - } - - parentCgroup := cgroup - if parentCgroup == "" { - parentCgroup = parts[2] - } - newCgroup := filepath.Join(cgroupRoot, parentCgroup, subtree) - if err := os.MkdirAll(newCgroup, 0755); err != nil && !os.IsExist(err) { - return err - } - - f, err := os.OpenFile(filepath.Join(newCgroup, "cgroup.procs"), os.O_RDWR, 0755) - if err != nil { - return err - } - defer f.Close() - - if len(processes) > 0 { - for _, pid := range processes { - if _, err := f.WriteString(fmt.Sprintf("%d\n", pid)); err != nil { - logrus.Debugf("Cannot move process %d to cgroup %q: %v", pid, newCgroup, err) - } - } - } else { - processesData, err := os.ReadFile(filepath.Join(cgroupRoot, parts[2], "cgroup.procs")) - if err != nil { - return err - } - for _, pid := range bytes.Split(processesData, []byte("\n")) { - if len(pid) == 0 { - continue - } - if _, err := f.Write(pid); err != nil { - logrus.Debugf("Cannot move process %s to cgroup %q: %v", string(pid), newCgroup, err) - } - } - } - } - return nil -} - -func newProp(name string, units interface{}) systemdDbus.Property { - return systemdDbus.Property{ - Name: name, - Value: dbus.MakeVariant(units), - } -} diff --git a/vendor/github.com/containers/podman/v4/utils/utils_windows.go b/vendor/github.com/containers/podman/v4/utils/utils_windows.go deleted file mode 100644 index 18f232116..000000000 --- a/vendor/github.com/containers/podman/v4/utils/utils_windows.go +++ /dev/null @@ -1,26 +0,0 @@ -//go:build windows -// +build windows - -package utils - -import "errors" - -func RunUnderSystemdScope(pid int, slice string, unitName string) error { - return errors.New("not implemented for windows") -} - -func MoveUnderCgroupSubtree(subtree string) error { - return errors.New("not implemented for windows") -} - -func GetOwnCgroup() (string, error) { - return "", errors.New("not implemented for windows") -} - -func GetOwnCgroupDisallowRoot() (string, error) { - return "", errors.New("not implemented for windows") -} - -func GetCgroupProcess(pid int) (string, error) { - return "", errors.New("not implemented for windows") -} diff --git a/vendor/github.com/containers/podman/v4/LICENSE b/vendor/github.com/containers/podman/v5/LICENSE similarity index 100% rename from vendor/github.com/containers/podman/v4/LICENSE rename to vendor/github.com/containers/podman/v5/LICENSE diff --git a/vendor/github.com/containers/podman/v4/cmd/podman/parse/filters.go b/vendor/github.com/containers/podman/v5/cmd/podman/parse/filters.go similarity index 76% rename from vendor/github.com/containers/podman/v4/cmd/podman/parse/filters.go rename to vendor/github.com/containers/podman/v5/cmd/podman/parse/filters.go index e4ab942af..af9157cde 100644 --- a/vendor/github.com/containers/podman/v4/cmd/podman/parse/filters.go +++ b/vendor/github.com/containers/podman/v5/cmd/podman/parse/filters.go @@ -9,11 +9,11 @@ import ( func FilterArgumentsIntoFilters(filters []string) (url.Values, error) { parsedFilters := make(url.Values) for _, f := range filters { - t := strings.SplitN(f, "=", 2) - if len(t) < 2 { + fname, filter, hasFilter := strings.Cut(f, "=") + if !hasFilter { return parsedFilters, fmt.Errorf("filter input must be in the form of filter=value: %s is invalid", f) } - parsedFilters.Add(t[0], t[1]) + parsedFilters.Add(fname, filter) } return parsedFilters, nil } diff --git a/vendor/github.com/containers/podman/v4/cmd/podman/parse/net.go b/vendor/github.com/containers/podman/v5/cmd/podman/parse/net.go similarity index 84% rename from vendor/github.com/containers/podman/v4/cmd/podman/parse/net.go rename to vendor/github.com/containers/podman/v5/cmd/podman/parse/net.go index b1331464a..9d6ae1947 100644 --- a/vendor/github.com/containers/podman/v4/cmd/podman/parse/net.go +++ b/vendor/github.com/containers/podman/v5/cmd/podman/parse/net.go @@ -33,15 +33,15 @@ var ( // for add-host flag func ValidateExtraHost(val string) (string, error) { // allow for IPv6 addresses in extra hosts by only splitting on first ":" - arr := strings.SplitN(val, ":", 2) - if len(arr) != 2 || len(arr[0]) == 0 { + name, ip, hasIP := strings.Cut(val, ":") + if !hasIP || len(name) == 0 { return "", fmt.Errorf("bad format for add-host: %q", val) } - if arr[1] == etchosts.HostGateway { + if ip == etchosts.HostGateway { return val, nil } - if _, err := validateIPAddress(arr[1]); err != nil { - return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1]) + if _, err := validateIPAddress(ip); err != nil { + return "", fmt.Errorf("invalid IP address in add-host: %q", ip) } return val, nil } @@ -82,45 +82,37 @@ func GetAllLabels(labelFile, inputLabels []string) (map[string]string, error) { } } for _, label := range inputLabels { - split := strings.SplitN(label, "=", 2) - if split[0] == "" { + key, value, _ := strings.Cut(label, "=") + if key == "" { return nil, fmt.Errorf("invalid label format: %q", label) } - value := "" - if len(split) > 1 { - value = split[1] - } - labels[split[0]] = value + labels[key] = value } return labels, nil } func parseEnvOrLabel(env map[string]string, line, configType string) error { - data := strings.SplitN(line, "=", 2) + key, val, hasVal := strings.Cut(line, "=") // catch invalid variables such as "=" or "=A" - if data[0] == "" { + if key == "" { return fmt.Errorf("invalid environment variable: %q", line) } // trim the front of a variable, but nothing else - name := strings.TrimLeft(data[0], whiteSpaces) + name := strings.TrimLeft(key, whiteSpaces) if strings.ContainsAny(name, whiteSpaces) { return fmt.Errorf("name %q has white spaces, poorly formatted name", name) } - if len(data) > 1 { - env[name] = data[1] + if hasVal { + env[name] = val } else { - if strings.HasSuffix(name, "*") { - name = strings.TrimSuffix(name, "*") + if name, hasStar := strings.CutSuffix(name, "*"); hasStar { for _, e := range os.Environ() { - part := strings.SplitN(e, "=", 2) - if len(part) < 2 { - continue - } - if strings.HasPrefix(part[0], name) { - env[part[0]] = part[1] + envKey, envVal, hasEq := strings.Cut(e, "=") + if hasEq && strings.HasPrefix(envKey, name) { + env[envKey] = envVal } } } else if configType == ENVType { diff --git a/vendor/github.com/containers/podman/v4/cmd/podman/parse/parse.go b/vendor/github.com/containers/podman/v5/cmd/podman/parse/parse.go similarity index 94% rename from vendor/github.com/containers/podman/v4/cmd/podman/parse/parse.go rename to vendor/github.com/containers/podman/v5/cmd/podman/parse/parse.go index 47db066d3..b018dad25 100644 --- a/vendor/github.com/containers/podman/v4/cmd/podman/parse/parse.go +++ b/vendor/github.com/containers/podman/v5/cmd/podman/parse/parse.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package parse diff --git a/vendor/github.com/containers/podman/v4/cmd/podman/parse/parse_windows.go b/vendor/github.com/containers/podman/v5/cmd/podman/parse/parse_windows.go similarity index 100% rename from vendor/github.com/containers/podman/v4/cmd/podman/parse/parse_windows.go rename to vendor/github.com/containers/podman/v5/cmd/podman/parse/parse_windows.go diff --git a/vendor/github.com/containers/podman/v4/cmd/podman/registry/config.go b/vendor/github.com/containers/podman/v5/cmd/podman/registry/config.go similarity index 91% rename from vendor/github.com/containers/podman/v4/cmd/podman/registry/config.go rename to vendor/github.com/containers/podman/v5/cmd/podman/registry/config.go index 1f6f6f566..9b52d65bc 100644 --- a/vendor/github.com/containers/podman/v4/cmd/podman/registry/config.go +++ b/vendor/github.com/containers/podman/v5/cmd/podman/registry/config.go @@ -8,9 +8,9 @@ import ( "sync" "github.com/containers/common/pkg/config" - "github.com/containers/podman/v4/pkg/domain/entities" - "github.com/containers/podman/v4/pkg/rootless" - "github.com/containers/podman/v4/pkg/util" + "github.com/containers/podman/v5/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/rootless" + "github.com/containers/podman/v5/pkg/util" "github.com/spf13/cobra" "github.com/spf13/pflag" ) @@ -72,7 +72,7 @@ func containersConfModules() ([]string, error) { fs.ParseErrorsWhitelist.UnknownFlags = true fs.Usage = func() {} fs.SetInterspersed(false) - fs.StringSliceVar(&modules, "module", nil, "") + fs.StringArrayVar(&modules, "module", nil, "") fs.BoolP("help", "h", false, "") // Need a fake help flag to avoid the `pflag: help requested` error return modules, fs.Parse(os.Args[index:]) } @@ -134,7 +134,7 @@ func setXdgDirs() error { // Set up XDG_RUNTIME_DIR if _, found := os.LookupEnv("XDG_RUNTIME_DIR"); !found { - dir, err := util.GetRuntimeDir() + dir, err := util.GetRootlessRuntimeDir() if err != nil { return err } @@ -166,3 +166,19 @@ func setXdgDirs() error { } return nil } + +func RetryDefault() uint { + if IsRemote() { + return 0 + } + + return PodmanConfig().ContainersConfDefaultsRO.Engine.Retry +} + +func RetryDelayDefault() string { + if IsRemote() { + return "" + } + + return PodmanConfig().ContainersConfDefaultsRO.Engine.RetryDelay +} diff --git a/vendor/github.com/containers/podman/v4/cmd/podman/registry/config_abi.go b/vendor/github.com/containers/podman/v5/cmd/podman/registry/config_abi.go similarity index 80% rename from vendor/github.com/containers/podman/v4/cmd/podman/registry/config_abi.go rename to vendor/github.com/containers/podman/v5/cmd/podman/registry/config_abi.go index 7babfa169..cdac89f38 100644 --- a/vendor/github.com/containers/podman/v4/cmd/podman/registry/config_abi.go +++ b/vendor/github.com/containers/podman/v5/cmd/podman/registry/config_abi.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package registry diff --git a/vendor/github.com/containers/podman/v4/cmd/podman/registry/config_tunnel.go b/vendor/github.com/containers/podman/v5/cmd/podman/registry/config_tunnel.go similarity index 81% rename from vendor/github.com/containers/podman/v4/cmd/podman/registry/config_tunnel.go rename to vendor/github.com/containers/podman/v5/cmd/podman/registry/config_tunnel.go index dfdbab8f8..9b80fb24c 100644 --- a/vendor/github.com/containers/podman/v4/cmd/podman/registry/config_tunnel.go +++ b/vendor/github.com/containers/podman/v5/cmd/podman/registry/config_tunnel.go @@ -1,5 +1,4 @@ //go:build remote -// +build remote package registry diff --git a/vendor/github.com/containers/podman/v4/cmd/podman/registry/json.go b/vendor/github.com/containers/podman/v5/cmd/podman/registry/json.go similarity index 100% rename from vendor/github.com/containers/podman/v4/cmd/podman/registry/json.go rename to vendor/github.com/containers/podman/v5/cmd/podman/registry/json.go diff --git a/vendor/github.com/containers/podman/v4/cmd/podman/registry/registry.go b/vendor/github.com/containers/podman/v5/cmd/podman/registry/registry.go similarity index 92% rename from vendor/github.com/containers/podman/v4/cmd/podman/registry/registry.go rename to vendor/github.com/containers/podman/v5/cmd/podman/registry/registry.go index 867d145db..3d7f93867 100644 --- a/vendor/github.com/containers/podman/v4/cmd/podman/registry/registry.go +++ b/vendor/github.com/containers/podman/v5/cmd/podman/registry/registry.go @@ -4,10 +4,10 @@ import ( "context" "path/filepath" - "github.com/containers/podman/v4/pkg/domain/entities" - "github.com/containers/podman/v4/pkg/domain/infra" - "github.com/containers/podman/v4/pkg/rootless" - "github.com/containers/podman/v4/pkg/util" + "github.com/containers/podman/v5/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/domain/infra" + "github.com/containers/podman/v5/pkg/rootless" + "github.com/containers/podman/v5/pkg/util" "github.com/sirupsen/logrus" "github.com/spf13/cobra" ) @@ -107,7 +107,7 @@ func GetContext() context.Context { func DefaultAPIAddress() string { if rootless.IsRootless() { - xdg, err := util.GetRuntimeDir() + xdg, err := util.GetRootlessRuntimeDir() if err != nil { logrus.Warnf("Failed to get rootless runtime dir for DefaultAPIAddress: %s", err) return DefaultRootAPIAddress diff --git a/vendor/github.com/containers/podman/v4/cmd/podman/registry/registry_common.go b/vendor/github.com/containers/podman/v5/cmd/podman/registry/registry_common.go similarity index 88% rename from vendor/github.com/containers/podman/v4/cmd/podman/registry/registry_common.go rename to vendor/github.com/containers/podman/v5/cmd/podman/registry/registry_common.go index 2b0cdb16f..75a92cd93 100644 --- a/vendor/github.com/containers/podman/v4/cmd/podman/registry/registry_common.go +++ b/vendor/github.com/containers/podman/v5/cmd/podman/registry/registry_common.go @@ -1,5 +1,4 @@ //go:build !freebsd -// +build !freebsd package registry diff --git a/vendor/github.com/containers/podman/v4/cmd/podman/registry/registry_freebsd.go b/vendor/github.com/containers/podman/v5/cmd/podman/registry/registry_freebsd.go similarity index 100% rename from vendor/github.com/containers/podman/v4/cmd/podman/registry/registry_freebsd.go rename to vendor/github.com/containers/podman/v5/cmd/podman/registry/registry_freebsd.go diff --git a/vendor/github.com/containers/podman/v4/cmd/podman/registry/remote.go b/vendor/github.com/containers/podman/v5/cmd/podman/registry/remote.go similarity index 96% rename from vendor/github.com/containers/podman/v4/cmd/podman/registry/remote.go rename to vendor/github.com/containers/podman/v5/cmd/podman/registry/remote.go index 7427becca..0000722f9 100644 --- a/vendor/github.com/containers/podman/v4/cmd/podman/registry/remote.go +++ b/vendor/github.com/containers/podman/v5/cmd/podman/registry/remote.go @@ -6,7 +6,7 @@ import ( "strings" "sync" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/domain/entities" "github.com/spf13/pflag" ) diff --git a/vendor/github.com/containers/podman/v4/libpod/boltdb_state.go b/vendor/github.com/containers/podman/v5/libpod/boltdb_state.go similarity index 99% rename from vendor/github.com/containers/podman/v4/libpod/boltdb_state.go rename to vendor/github.com/containers/podman/v5/libpod/boltdb_state.go index a2740b289..44428df08 100644 --- a/vendor/github.com/containers/podman/v4/libpod/boltdb_state.go +++ b/vendor/github.com/containers/podman/v5/libpod/boltdb_state.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -7,14 +6,16 @@ import ( "bytes" "errors" "fmt" + "io/fs" "net" + "os" "strconv" "strings" "sync" "time" "github.com/containers/common/libnetwork/types" - "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v5/libpod/define" "github.com/sirupsen/logrus" bolt "go.etcd.io/bbolt" ) @@ -79,6 +80,19 @@ func NewBoltState(path string, runtime *Runtime) (State, error) { logrus.Debugf("Initializing boltdb state at %s", path) + // BoltDB is deprecated and, as of Podman 5.0, we no longer allow the + // creation of new Bolt states. + // If the DB does not already exist, error out. + // To continue testing in CI, allow creation iff an undocumented env + // var is set. + if os.Getenv("CI_DESIRED_DATABASE") != "boltdb" { + if _, err := os.Stat(path); err != nil && errors.Is(err, fs.ErrNotExist) { + return nil, fmt.Errorf("the BoltDB backend has been deprecated, no new BoltDB databases can be created: %w", define.ErrInvalidArg) + } + } else { + logrus.Debugf("Allowing deprecated database backend due to CI_DESIRED_DATABASE.") + } + db, err := bolt.Open(path, 0600, nil) if err != nil { return nil, fmt.Errorf("opening database %s: %w", path, err) diff --git a/vendor/github.com/containers/podman/v4/libpod/boltdb_state_internal.go b/vendor/github.com/containers/podman/v5/libpod/boltdb_state_internal.go similarity index 99% rename from vendor/github.com/containers/podman/v4/libpod/boltdb_state_internal.go rename to vendor/github.com/containers/podman/v5/libpod/boltdb_state_internal.go index 75888410a..3a2c66136 100644 --- a/vendor/github.com/containers/podman/v4/libpod/boltdb_state_internal.go +++ b/vendor/github.com/containers/podman/v5/libpod/boltdb_state_internal.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -12,8 +11,7 @@ import ( "runtime" "strings" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/rootless" + "github.com/containers/podman/v5/libpod/define" "github.com/containers/storage" "github.com/sirupsen/logrus" bolt "go.etcd.io/bbolt" @@ -103,7 +101,7 @@ type dbConfigValidation struct { // configuration of the runtime opening it // If there is no runtime configuration loaded, load our own func checkRuntimeConfig(db *bolt.DB, rt *Runtime) error { - storeOpts, err := storage.DefaultStoreOptions(rootless.IsRootless(), rootless.GetRootlessUID()) + storeOpts, err := storage.DefaultStoreOptions() if err != nil { return err } diff --git a/vendor/github.com/containers/podman/v4/libpod/container.go b/vendor/github.com/containers/podman/v5/libpod/container.go similarity index 96% rename from vendor/github.com/containers/podman/v4/libpod/container.go rename to vendor/github.com/containers/podman/v5/libpod/container.go index 9bd802836..45ec75a2f 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container.go +++ b/vendor/github.com/containers/podman/v5/libpod/container.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -13,14 +12,12 @@ import ( "strings" "time" - types040 "github.com/containernetworking/cni/pkg/types/040" - "github.com/containers/common/libnetwork/cni" "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/secrets" "github.com/containers/image/v5/manifest" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/libpod/lock" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/libpod/lock" "github.com/containers/storage" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" @@ -176,13 +173,6 @@ type ContainerState struct { LegacyExecSessions map[string]*legacyExecSession `json:"execSessions,omitempty"` // NetNS is the path or name of the NetNS NetNS string `json:"netns,omitempty"` - // NetworkStatusOld contains the configuration results for all networks - // the pod is attached to. Only populated if we created a network - // namespace for the container, and the network namespace is currently - // active. - // These are DEPRECATED and will be removed in a future release. - // This field is only used for backwarts compatibility. - NetworkStatusOld []*types040.Result `json:"networkResults,omitempty"` // NetworkStatus contains the network Status for all networks // the container is attached to. Only populated if we created a network // namespace for the container, and the network namespace is currently @@ -1374,39 +1364,10 @@ func (c *Container) GetNetworkStatus() (map[string]types.StatusBlock, error) { return c.getNetworkStatus(), nil } -// getNetworkStatus get the current network status from the state. If the container -// still uses the old network status it is converted to the new format. This function +// getNetworkStatus get the current network status from the state. This function // should be used instead of reading c.state.NetworkStatus directly. func (c *Container) getNetworkStatus() map[string]types.StatusBlock { - if c.state.NetworkStatus != nil { - return c.state.NetworkStatus - } - if c.state.NetworkStatusOld != nil { - networks, err := c.networks() - if err != nil { - return nil - } - if len(networks) != len(c.state.NetworkStatusOld) { - return nil - } - result := make(map[string]types.StatusBlock, len(c.state.NetworkStatusOld)) - i := 0 - // Note: NetworkStatusOld does not contain the network names so we get them extra - // We cannot guarantee the same order but after a state refresh it should work - for netName := range networks { - status, err := cni.CNIResultToStatus(c.state.NetworkStatusOld[i]) - if err != nil { - return nil - } - result[netName] = status - i++ - } - c.state.NetworkStatus = result - _ = c.save() - - return result - } - return nil + return c.state.NetworkStatus } func (c *Container) NamespaceMode(ns spec.LinuxNamespaceType, ctrSpec *spec.Spec) string { diff --git a/vendor/github.com/containers/podman/v4/libpod/container_api.go b/vendor/github.com/containers/podman/v5/libpod/container_api.go similarity index 98% rename from vendor/github.com/containers/podman/v4/libpod/container_api.go rename to vendor/github.com/containers/podman/v5/libpod/container_api.go index b0e06566d..b48cf8482 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container_api.go +++ b/vendor/github.com/containers/podman/v5/libpod/container_api.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -14,9 +13,9 @@ import ( "time" "github.com/containers/common/pkg/resize" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/libpod/events" - "github.com/containers/podman/v4/pkg/signal" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/libpod/events" + "github.com/containers/podman/v5/pkg/signal" "github.com/containers/storage/pkg/archive" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" @@ -173,7 +172,7 @@ func (c *Container) StartAndAttach(ctx context.Context, streams *define.AttachSt // Attach to the container before starting it go func() { // Start resizing - if c.LogDriver() != define.PassthroughLogging { + if c.LogDriver() != define.PassthroughLogging && c.LogDriver() != define.PassthroughTTYLogging { registerResizeFunc(resize, c.bundlePath()) } @@ -305,6 +304,9 @@ func (c *Container) Attach(streams *define.AttachStreams, keys string, resize <- if c.LogDriver() == define.PassthroughLogging { return fmt.Errorf("this container is using the 'passthrough' log driver, cannot attach: %w", define.ErrNoLogs) } + if c.LogDriver() == define.PassthroughTTYLogging { + return fmt.Errorf("this container is using the 'passthrough-tty' log driver, cannot attach: %w", define.ErrNoLogs) + } if !c.batched { c.lock.Lock() if err := c.syncContainer(); err != nil { @@ -337,7 +339,7 @@ func (c *Container) Attach(streams *define.AttachStreams, keys string, resize <- } // Start resizing - if c.LogDriver() != define.PassthroughLogging { + if c.LogDriver() != define.PassthroughLogging && c.LogDriver() != define.PassthroughTTYLogging { registerResizeFunc(resize, c.bundlePath()) } diff --git a/vendor/github.com/containers/podman/v4/libpod/container_commit.go b/vendor/github.com/containers/podman/v5/libpod/container_commit.go similarity index 96% rename from vendor/github.com/containers/podman/v4/libpod/container_commit.go rename to vendor/github.com/containers/podman/v5/libpod/container_commit.go index 5817f3a18..832b1499a 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container_commit.go +++ b/vendor/github.com/containers/podman/v5/libpod/container_commit.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -13,8 +12,8 @@ import ( "github.com/containers/common/libimage" is "github.com/containers/image/v5/storage" "github.com/containers/image/v5/types" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/libpod/events" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/libpod/events" "github.com/sirupsen/logrus" ) @@ -98,8 +97,8 @@ func (c *Container) Commit(ctx context.Context, destImage string, options Contai // Should we store the ENV we actually want in the spec separately? if c.config.Spec.Process != nil { for _, e := range c.config.Spec.Process.Env { - splitEnv := strings.SplitN(e, "=", 2) - importBuilder.SetEnv(splitEnv[0], splitEnv[1]) + key, val, _ := strings.Cut(e, "=") + importBuilder.SetEnv(key, val) } } // Expose ports diff --git a/vendor/github.com/containers/podman/v4/libpod/container_config.go b/vendor/github.com/containers/podman/v5/libpod/container_config.go similarity index 96% rename from vendor/github.com/containers/podman/v4/libpod/container_config.go rename to vendor/github.com/containers/podman/v5/libpod/container_config.go index aefce6721..262a4befa 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container_config.go +++ b/vendor/github.com/containers/podman/v5/libpod/container_config.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -10,9 +9,9 @@ import ( "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/secrets" "github.com/containers/image/v5/manifest" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/namespaces" - "github.com/containers/podman/v4/pkg/specgen" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/namespaces" + "github.com/containers/podman/v5/pkg/specgen" "github.com/containers/storage" spec "github.com/opencontainers/runtime-spec/specs-go" ) @@ -161,7 +160,7 @@ type ContainerRootFSConfig struct { // pre-1.8, which was used in very old Podman versions to determine how // image volumes were handled in Libpod (support for these eventually // moved out of Libpod into pkg/specgen). - // Please DO NOT re-use the `imageVolumes` name in container JSON again. + // Please DO NOT reuse the `imageVolumes` name in container JSON again. ImageVolumes []*ContainerImageVolume `json:"ctrImageVolumes,omitempty"` // CreateWorkingDir indicates that Libpod should create the container's // working directory if it does not exist. Some OCI runtimes do this by @@ -291,6 +290,12 @@ type ContainerNetworkConfig struct { // bind-mounted inside the container. // Conflicts with HostAdd. UseImageHosts bool + // BaseHostsFile is the path to a hosts file, the entries from this file + // are added to the containers hosts file. As special value "image" is + // allowed which uses the /etc/hosts file from within the image and "none" + // which uses no base file at all. If it is empty we should default + // to the base_hosts_file configuration in containers.conf. + BaseHostsFile string `json:"baseHostsFile,omitempty"` // Hosts to add in container // Will be appended to host's host file HostAdd []string `json:"hostsAdd,omitempty"` @@ -416,6 +421,9 @@ type ContainerMiscConfig struct { // to 0, 1, 2) that will be passed to the executed process. The total FDs // passed will be 3 + PreserveFDs. PreserveFDs uint `json:"preserveFds,omitempty"` + // PreserveFD is a list of additional file descriptors (in addition + // to 0, 1, 2) that will be passed to the executed process. + PreserveFD []uint `json:"preserveFd,omitempty"` // Timezone is the timezone inside the container. // Local means it has the same timezone as the host machine Timezone string `json:"timezone,omitempty"` diff --git a/vendor/github.com/containers/podman/v4/libpod/container_copy_common.go b/vendor/github.com/containers/podman/v5/libpod/container_copy_common.go similarity index 97% rename from vendor/github.com/containers/podman/v4/libpod/container_copy_common.go rename to vendor/github.com/containers/podman/v5/libpod/container_copy_common.go index c8156a630..e32f79dc0 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container_copy_common.go +++ b/vendor/github.com/containers/podman/v5/libpod/container_copy_common.go @@ -1,6 +1,4 @@ //go:build !remote && (linux || freebsd) -// +build !remote -// +build linux freebsd package libpod @@ -13,8 +11,8 @@ import ( buildahCopiah "github.com/containers/buildah/copier" "github.com/containers/buildah/pkg/chrootuser" "github.com/containers/buildah/util" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/rootless" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/rootless" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/idtools" "github.com/opencontainers/runtime-spec/specs-go" diff --git a/vendor/github.com/containers/podman/v4/libpod/container_copy_freebsd.go b/vendor/github.com/containers/podman/v5/libpod/container_copy_freebsd.go similarity index 96% rename from vendor/github.com/containers/podman/v4/libpod/container_copy_freebsd.go rename to vendor/github.com/containers/podman/v5/libpod/container_copy_freebsd.go index d19acc23a..9697222d3 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container_copy_freebsd.go +++ b/vendor/github.com/containers/podman/v5/libpod/container_copy_freebsd.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod diff --git a/vendor/github.com/containers/podman/v4/libpod/container_copy_linux.go b/vendor/github.com/containers/podman/v5/libpod/container_copy_linux.go similarity index 96% rename from vendor/github.com/containers/podman/v4/libpod/container_copy_linux.go rename to vendor/github.com/containers/podman/v5/libpod/container_copy_linux.go index 9096be810..149da9b38 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container_copy_linux.go +++ b/vendor/github.com/containers/podman/v5/libpod/container_copy_linux.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -8,7 +7,7 @@ import ( "os" "runtime" - "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v5/libpod/define" "golang.org/x/sys/unix" ) diff --git a/vendor/github.com/containers/podman/v4/libpod/container_exec.go b/vendor/github.com/containers/podman/v5/libpod/container_exec.go similarity index 97% rename from vendor/github.com/containers/podman/v4/libpod/container_exec.go rename to vendor/github.com/containers/podman/v5/libpod/container_exec.go index df52993c1..42f6eae9e 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container_exec.go +++ b/vendor/github.com/containers/podman/v5/libpod/container_exec.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -15,8 +14,8 @@ import ( "github.com/containers/common/pkg/resize" "github.com/containers/common/pkg/util" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/libpod/events" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/libpod/events" "github.com/containers/storage/pkg/stringid" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" @@ -66,6 +65,9 @@ type ExecConfig struct { // given is the number that will be passed into the exec session, // starting at 3. PreserveFDs uint `json:"preserveFds,omitempty"` + // PreserveFD is a list of additional file descriptors (in addition + // to 0, 1, 2) that will be passed to the executed process. + PreserveFD []uint `json:"preserveFd,omitempty"` // ExitCommand is the exec session's exit command. // This command will be executed when the exec session exits. // If unset, no command will be executed. @@ -820,7 +822,7 @@ func (c *Container) exec(config *ExecConfig, streams *define.AttachStreams, resi if err != nil { return -1, fmt.Errorf("retrieving exec session %s exit code: %w", sessionID, err) } - return diedEvent.ContainerExitCode, nil + return *diedEvent.ContainerExitCode, nil } return -1, err } @@ -888,6 +890,13 @@ func (c *Container) execExitFileDir(sessionID string) string { return filepath.Join(c.execBundlePath(sessionID), "exit") } +// execPersistDir gets the path to the container's persist directory +// The persist directory container the exit file and oom file (if oomkilled) +// of a container +func (c *Container) execPersistDir(sessionID string) string { + return filepath.Join(c.execBundlePath(sessionID), "persist", c.ID()) +} + // execOCILog returns the file path for the exec sessions oci log func (c *Container) execOCILog(sessionID string) string { if !c.ociRuntime.SupportsJSONErrors() { @@ -915,6 +924,9 @@ func (c *Container) createExecBundle(sessionID string) (retErr error) { return fmt.Errorf("creating OCI runtime exit file path %s: %w", c.execExitFileDir(sessionID), err) } } + if err := os.MkdirAll(c.execPersistDir(sessionID), execDirPermission); err != nil { + return fmt.Errorf("creating OCI runtime persist directory path %s: %w", c.execPersistDir(sessionID), err) + } return nil } @@ -1092,6 +1104,7 @@ func prepareForExec(c *Container, session *ExecSession) (*ExecOptions, error) { opts.Cwd = session.Config.WorkDir opts.User = session.Config.User opts.PreserveFDs = session.Config.PreserveFDs + opts.PreserveFD = session.Config.PreserveFD opts.DetachKeys = session.Config.DetachKeys opts.ExitCommand = session.Config.ExitCommand opts.ExitCommandDelay = session.Config.ExitCommandDelay diff --git a/vendor/github.com/containers/podman/v4/libpod/container_freebsd.go b/vendor/github.com/containers/podman/v5/libpod/container_freebsd.go similarity index 92% rename from vendor/github.com/containers/podman/v4/libpod/container_freebsd.go rename to vendor/github.com/containers/podman/v5/libpod/container_freebsd.go index 7164eefd8..51d8e9bf3 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container_freebsd.go +++ b/vendor/github.com/containers/podman/v5/libpod/container_freebsd.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod diff --git a/vendor/github.com/containers/podman/v4/libpod/container_graph.go b/vendor/github.com/containers/podman/v5/libpod/container_graph.go similarity index 99% rename from vendor/github.com/containers/podman/v4/libpod/container_graph.go rename to vendor/github.com/containers/podman/v5/libpod/container_graph.go index 6fae1c29a..4f07043bf 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container_graph.go +++ b/vendor/github.com/containers/podman/v5/libpod/container_graph.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -8,7 +7,7 @@ import ( "fmt" "strings" - "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v5/libpod/define" "github.com/sirupsen/logrus" ) diff --git a/vendor/github.com/containers/podman/v4/libpod/container_inspect.go b/vendor/github.com/containers/podman/v5/libpod/container_inspect.go similarity index 95% rename from vendor/github.com/containers/podman/v4/libpod/container_inspect.go rename to vendor/github.com/containers/podman/v5/libpod/container_inspect.go index d823c7749..da858e820 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container_inspect.go +++ b/vendor/github.com/containers/podman/v5/libpod/container_inspect.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -8,9 +7,10 @@ import ( "fmt" "strings" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/libpod/driver" - "github.com/containers/podman/v4/pkg/util" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/libpod/driver" + "github.com/containers/podman/v5/pkg/signal" + "github.com/containers/podman/v5/pkg/util" "github.com/containers/storage/types" "github.com/docker/go-units" spec "github.com/opencontainers/runtime-spec/specs-go" @@ -54,8 +54,8 @@ func (c *Container) volumesFrom() ([]string, error) { if err != nil { return nil, err } - if ctrs, ok := ctrSpec.Annotations[define.InspectAnnotationVolumesFrom]; ok { - return strings.Split(ctrs, ","), nil + if ctrs, ok := ctrSpec.Annotations[define.VolumesFromAnnotation]; ok { + return strings.Split(ctrs, ";"), nil } return nil, nil } @@ -190,15 +190,20 @@ func (c *Container) getContainerInspectData(size bool, driverData *define.Driver data.OCIConfigPath = c.state.ConfigPath } - if c.config.HealthCheckConfig != nil { + // Check if healthcheck is not nil and --no-healthcheck option is not set. + // If --no-healthcheck is set Test will be always set to `[NONE]`, so the + // inspect status should be set to nil. + if c.config.HealthCheckConfig != nil && !(len(c.config.HealthCheckConfig.Test) == 1 && c.config.HealthCheckConfig.Test[0] == "NONE") { // This container has a healthcheck defined in it; we need to add its state healthCheckState, err := c.getHealthCheckLog() if err != nil { // An error here is not considered fatal; no health state will be displayed logrus.Error(err) } else { - data.State.Health = healthCheckState + data.State.Health = &healthCheckState } + } else { + data.State.Health = nil } networkConfig, err := c.getContainerNetworkInfo() @@ -316,7 +321,7 @@ func (c *Container) GetSecurityOptions() []string { if apparmor, ok := ctrSpec.Annotations[define.InspectAnnotationApparmor]; ok { SecurityOpt = append(SecurityOpt, fmt.Sprintf("apparmor=%s", apparmor)) } - if c.config.Spec.Linux.MaskedPaths == nil { + if c.config.Spec != nil && c.config.Spec.Linux != nil && c.config.Spec.Linux.MaskedPaths == nil { SecurityOpt = append(SecurityOpt, "unmask=all") } @@ -384,7 +389,7 @@ func (c *Container) generateInspectContainerConfig(spec *spec.Spec) *define.Insp // Leave empty if not explicitly overwritten by user if len(c.config.Entrypoint) != 0 { - ctrConfig.Entrypoint = strings.Join(c.config.Entrypoint, " ") + ctrConfig.Entrypoint = c.config.Entrypoint } if len(c.config.Labels) != 0 { @@ -400,8 +405,7 @@ func (c *Container) generateInspectContainerConfig(spec *spec.Spec) *define.Insp ctrConfig.Annotations[k] = v } } - - ctrConfig.StopSignal = c.config.StopSignal + ctrConfig.StopSignal = signal.ToDockerFormat(c.config.StopSignal) // TODO: should JSON deep copy this to ensure internal pointers don't // leak. ctrConfig.Healthcheck = c.config.HealthCheckConfig @@ -506,8 +510,8 @@ func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, named if ctrSpec.Annotations[define.InspectAnnotationAutoremove] == define.InspectResponseTrue { hostConfig.AutoRemove = true } - if ctrs, ok := ctrSpec.Annotations[define.InspectAnnotationVolumesFrom]; ok { - hostConfig.VolumesFrom = strings.Split(ctrs, ",") + if ctrs, ok := ctrSpec.Annotations[define.VolumesFromAnnotation]; ok { + hostConfig.VolumesFrom = strings.Split(ctrs, ";") } if ctrSpec.Annotations[define.InspectAnnotationPrivileged] == define.InspectResponseTrue { hostConfig.Privileged = true diff --git a/vendor/github.com/containers/podman/v4/libpod/container_inspect_freebsd.go b/vendor/github.com/containers/podman/v5/libpod/container_inspect_freebsd.go similarity index 87% rename from vendor/github.com/containers/podman/v4/libpod/container_inspect_freebsd.go rename to vendor/github.com/containers/podman/v5/libpod/container_inspect_freebsd.go index b0f8bae54..a913b7e7d 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container_inspect_freebsd.go +++ b/vendor/github.com/containers/podman/v5/libpod/container_inspect_freebsd.go @@ -1,10 +1,9 @@ //go:build !remote -// +build !remote package libpod import ( - "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v5/libpod/define" spec "github.com/opencontainers/runtime-spec/specs-go" ) diff --git a/vendor/github.com/containers/podman/v4/libpod/container_inspect_linux.go b/vendor/github.com/containers/podman/v5/libpod/container_inspect_linux.go similarity index 98% rename from vendor/github.com/containers/podman/v4/libpod/container_inspect_linux.go rename to vendor/github.com/containers/podman/v5/libpod/container_inspect_linux.go index 402634284..909be3d31 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container_inspect_linux.go +++ b/vendor/github.com/containers/podman/v5/libpod/container_inspect_linux.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -9,8 +8,8 @@ import ( "strings" "github.com/containers/common/pkg/config" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/util" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/util" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" "github.com/opencontainers/runtime-tools/validate/capabilities" diff --git a/vendor/github.com/containers/podman/v4/libpod/container_internal.go b/vendor/github.com/containers/podman/v5/libpod/container_internal.go similarity index 96% rename from vendor/github.com/containers/podman/v4/libpod/container_internal.go rename to vendor/github.com/containers/podman/v5/libpod/container_internal.go index 48c5c970c..a7d07da53 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container_internal.go +++ b/vendor/github.com/containers/podman/v5/libpod/container_internal.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -26,16 +25,17 @@ import ( "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/hooks" "github.com/containers/common/pkg/hooks/exec" + "github.com/containers/common/pkg/timezone" cutil "github.com/containers/common/pkg/util" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/libpod/events" - "github.com/containers/podman/v4/libpod/shutdown" - "github.com/containers/podman/v4/pkg/ctime" - "github.com/containers/podman/v4/pkg/lookup" - "github.com/containers/podman/v4/pkg/rootless" - "github.com/containers/podman/v4/pkg/selinux" - "github.com/containers/podman/v4/pkg/systemd/notifyproxy" - "github.com/containers/podman/v4/pkg/util" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/libpod/events" + "github.com/containers/podman/v5/libpod/shutdown" + "github.com/containers/podman/v5/pkg/ctime" + "github.com/containers/podman/v5/pkg/lookup" + "github.com/containers/podman/v5/pkg/rootless" + "github.com/containers/podman/v5/pkg/selinux" + "github.com/containers/podman/v5/pkg/systemd/notifyproxy" + "github.com/containers/podman/v5/pkg/util" "github.com/containers/storage" "github.com/containers/storage/pkg/chrootarchive" "github.com/containers/storage/pkg/idmap" @@ -48,6 +48,7 @@ import ( "github.com/opencontainers/runtime-tools/generate" "github.com/opencontainers/selinux/go-selinux/label" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" "golang.org/x/sys/unix" ) @@ -145,6 +146,10 @@ func (c *Container) exitFilePath() (string, error) { return c.ociRuntime.ExitFilePath(c) } +func (c *Container) oomFilePath() (string, error) { + return c.ociRuntime.OOMFilePath(c) +} + // Wait for the container's exit file to appear. // When it does, update our state based on it. func (c *Container) waitForExitFileAndSync() error { @@ -181,6 +186,7 @@ func (c *Container) waitForExitFileAndSync() error { // Handle the container exit file. // The exit file is used to supply container exit time and exit code. // This assumes the exit file already exists. +// Also check for an oom file to determine if the container was oom killed or not. func (c *Container) handleExitFile(exitFile string, fi os.FileInfo) error { c.state.FinishedTime = ctime.Created(fi) statusCodeStr, err := os.ReadFile(exitFile) @@ -194,7 +200,10 @@ func (c *Container) handleExitFile(exitFile string, fi os.FileInfo) error { } c.state.ExitCode = int32(statusCode) - oomFilePath := filepath.Join(c.bundlePath(), "oom") + oomFilePath, err := c.oomFilePath() + if err != nil { + return err + } if _, err = os.Stat(oomFilePath); err == nil { c.state.OOMKilled = true } @@ -306,9 +315,13 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (_ bool, retErr err return false, err } - // set up slirp4netns again because slirp4netns will die when conmon exits - if err := c.setupRootlessNetwork(); err != nil { - return false, err + // only do this if the container is not in a userns, if we are the cleanupNetwork() + // was called above and a proper network setup is needed which is part of the init() below. + if !c.config.PostConfigureNetNS { + // set up slirp4netns again because slirp4netns will die when conmon exits + if err := c.setupRootlessNetwork(); err != nil { + return false, err + } } if c.state.State == define.ContainerStateStopped { @@ -535,11 +548,11 @@ func (c *Container) setupStorage(ctx context.Context) error { c.state.RunDir = containerInfo.RunDir if len(c.config.IDMappings.UIDMap) != 0 || len(c.config.IDMappings.GIDMap) != 0 { - if err := os.Chown(containerInfo.RunDir, c.RootUID(), c.RootGID()); err != nil { + if err := idtools.SafeChown(containerInfo.RunDir, c.RootUID(), c.RootGID()); err != nil { return err } - if err := os.Chown(containerInfo.Dir, c.RootUID(), c.RootGID()); err != nil { + if err := idtools.SafeChown(containerInfo.Dir, c.RootUID(), c.RootGID()); err != nil { return err } } @@ -622,7 +635,19 @@ func resetContainerState(state *ContainerState) { state.ConmonPID = 0 state.Mountpoint = "" state.Mounted = false - if state.State != define.ContainerStateExited { + // Reset state. + // Almost all states are reset to either Configured or Exited, + // except ContainerStateRemoving which is preserved. + switch state.State { + case define.ContainerStateStopped, define.ContainerStateExited, define.ContainerStateStopping, define.ContainerStateRunning, define.ContainerStatePaused: + // All containers that ran at any point during the last boot + // must be placed in the Exited state. + state.State = define.ContainerStateExited + case define.ContainerStateConfigured, define.ContainerStateCreated: + state.State = define.ContainerStateConfigured + case define.ContainerStateUnknown: + // Something really strange must have happened to get us here. + // Reset to configured, maybe the reboot cleared things up? state.State = define.ContainerStateConfigured } state.ExecSessions = make(map[string]*ExecSession) @@ -643,7 +668,6 @@ func resetContainerState(state *ContainerState) { state.StartupHCFailureCount = 0 state.NetNS = "" state.NetworkStatus = nil - state.NetworkStatusOld = nil } // Refresh refreshes the container's state after a restart. @@ -681,7 +705,7 @@ func (c *Container) refresh() error { if err := os.MkdirAll(root, 0755); err != nil { return fmt.Errorf("creating userNS tmpdir for container %s: %w", c.ID(), err) } - if err := os.Chown(root, c.RootUID(), c.RootGID()); err != nil { + if err := idtools.SafeChown(root, c.RootUID(), c.RootGID()); err != nil { return err } } @@ -694,7 +718,6 @@ func (c *Container) refresh() error { c.lock = lock c.state.NetworkStatus = nil - c.state.NetworkStatusOld = nil // Rewrite the config if necessary. // Podman 4.0 uses a new port format in the config. @@ -744,11 +767,6 @@ func (c *Container) removeConmonFiles() error { return fmt.Errorf("removing container %s winsz file: %w", c.ID(), err) } - oomFile := filepath.Join(c.bundlePath(), "oom") - if err := os.Remove(oomFile); err != nil && !os.IsNotExist(err) { - return fmt.Errorf("removing container %s OOM file: %w", c.ID(), err) - } - // Remove the exit file so we don't leak memory in tmpfs exitFile, err := c.exitFilePath() if err != nil { @@ -758,6 +776,15 @@ func (c *Container) removeConmonFiles() error { return fmt.Errorf("removing container %s exit file: %w", c.ID(), err) } + // Remove the oom file + oomFile, err := c.oomFilePath() + if err != nil { + return err + } + if err := os.Remove(oomFile); err != nil && !errors.Is(err, fs.ErrNotExist) { + return fmt.Errorf("removing container %s oom file: %w", c.ID(), err) + } + return nil } @@ -1578,7 +1605,7 @@ func (c *Container) mountStorage() (_ string, deferredErr error) { if err := c.mountSHM(shmOptions); err != nil { return "", err } - if err := os.Chown(c.config.ShmDir, c.RootUID(), c.RootGID()); err != nil { + if err := idtools.SafeChown(c.config.ShmDir, c.RootUID(), c.RootGID()); err != nil { return "", fmt.Errorf("failed to chown %s: %w", c.config.ShmDir, err) } defer func() { @@ -1708,45 +1735,18 @@ func (c *Container) mountStorage() (_ string, deferredErr error) { } tz := c.Timezone() - if tz != "" { - timezonePath := filepath.Join("/usr/share/zoneinfo", tz) - if tz == "local" { - timezonePath, err = filepath.EvalSymlinks("/etc/localtime") - if err != nil { - return "", fmt.Errorf("finding local timezone for container %s: %w", c.ID(), err) - } - } - // make sure to remove any existing localtime file in the container to not create invalid links - err = unix.Unlinkat(etcInTheContainerFd, "localtime", 0) - if err != nil && !errors.Is(err, fs.ErrNotExist) { - return "", fmt.Errorf("removing /etc/localtime: %w", err) - } - - hostPath, err := securejoin.SecureJoin(mountPoint, timezonePath) - if err != nil { - return "", fmt.Errorf("resolve zoneinfo path in the container: %w", err) + localTimePath, err := timezone.ConfigureContainerTimeZone(tz, c.state.RunDir, mountPoint, etcInTheContainerPath, c.ID()) + if err != nil { + return "", fmt.Errorf("configuring timezone for container %s: %w", c.ID(), err) + } + if localTimePath != "" { + if err := c.relabel(localTimePath, c.config.MountLabel, false); err != nil { + return "", err } - - _, err = os.Stat(hostPath) - if err != nil { - // file does not exists which means tzdata is not installed in the container, just create /etc/locatime which a copy from the host - logrus.Debugf("Timezone %s does not exist in the container, create our own copy from the host", timezonePath) - localtimePath, err := c.copyTimezoneFile(timezonePath) - if err != nil { - return "", fmt.Errorf("setting timezone for container %s: %w", c.ID(), err) - } - if c.state.BindMounts == nil { - c.state.BindMounts = make(map[string]string) - } - c.state.BindMounts["/etc/localtime"] = localtimePath - } else { - // file exists lets just symlink according to localtime(5) - logrus.Debugf("Create locatime symlink for %s", timezonePath) - err = unix.Symlinkat(".."+timezonePath, etcInTheContainerFd, "localtime") - if err != nil { - return "", fmt.Errorf("creating /etc/localtime symlink: %w", err) - } + if c.state.BindMounts == nil { + c.state.BindMounts = make(map[string]string) } + c.state.BindMounts["/etc/localtime"] = localTimePath } // Request a mount of all named volumes @@ -1797,7 +1797,7 @@ func (c *Container) mountNamedVolume(v *ContainerNamedVolume, mountpoint string) return nil, err } _, hasNoCopy := vol.config.Options["nocopy"] - if vol.state.NeedsCopyUp && !cutil.StringInSlice("nocopy", v.Options) && !hasNoCopy { + if vol.state.NeedsCopyUp && !slices.Contains(v.Options, "nocopy") && !hasNoCopy { logrus.Debugf("Copying up contents from container %s to volume %s", c.ID(), vol.Name()) srcDir, err := securejoin.SecureJoin(mountpoint, v.Dest) @@ -2325,7 +2325,7 @@ func (c *Container) mount() (string, error) { if err != nil { return "", fmt.Errorf("resolving storage path for container %s: %w", c.ID(), err) } - if err := os.Chown(mountPoint, c.RootUID(), c.RootGID()); err != nil { + if err := idtools.SafeChown(mountPoint, c.RootUID(), c.RootGID()); err != nil { return "", fmt.Errorf("cannot chown %s to %d:%d: %w", mountPoint, c.RootUID(), c.RootGID(), err) } return mountPoint, nil @@ -2508,13 +2508,13 @@ func (c *Container) extractSecretToCtrStorage(secr *ContainerSecret) error { if err != nil { return fmt.Errorf("unable to create %s: %w", secretFile, err) } - if err := os.Lchown(secretFile, int(hostUID), int(hostGID)); err != nil { + if err := idtools.SafeLchown(secretFile, int(hostUID), int(hostGID)); err != nil { return err } if err := os.Chmod(secretFile, os.FileMode(secr.Mode)); err != nil { return err } - if err := label.Relabel(secretFile, c.config.MountLabel, false); err != nil { + if err := c.relabel(secretFile, c.config.MountLabel, false); err != nil { return err } return nil diff --git a/vendor/github.com/containers/podman/v4/libpod/container_internal_common.go b/vendor/github.com/containers/podman/v5/libpod/container_internal_common.go similarity index 97% rename from vendor/github.com/containers/podman/v4/libpod/container_internal_common.go rename to vendor/github.com/containers/podman/v5/libpod/container_internal_common.go index cd19089e2..c6fa07e08 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container_internal_common.go +++ b/vendor/github.com/containers/podman/v5/libpod/container_internal_common.go @@ -1,6 +1,4 @@ //go:build !remote && (linux || freebsd) -// +build !remote -// +build linux freebsd package libpod @@ -34,28 +32,29 @@ import ( "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/subscriptions" "github.com/containers/common/pkg/umask" - cutil "github.com/containers/common/pkg/util" is "github.com/containers/image/v5/storage" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/libpod/events" - "github.com/containers/podman/v4/pkg/annotations" - "github.com/containers/podman/v4/pkg/checkpoint/crutils" - "github.com/containers/podman/v4/pkg/criu" - "github.com/containers/podman/v4/pkg/lookup" - "github.com/containers/podman/v4/pkg/rootless" - "github.com/containers/podman/v4/pkg/util" - "github.com/containers/podman/v4/version" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/libpod/events" + "github.com/containers/podman/v5/pkg/annotations" + "github.com/containers/podman/v5/pkg/checkpoint/crutils" + "github.com/containers/podman/v5/pkg/criu" + "github.com/containers/podman/v5/pkg/lookup" + "github.com/containers/podman/v5/pkg/rootless" + "github.com/containers/podman/v5/pkg/util" + "github.com/containers/podman/v5/version" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/lockfile" + "github.com/containers/storage/pkg/unshare" stypes "github.com/containers/storage/types" securejoin "github.com/cyphar/filepath-securejoin" - runcuser "github.com/opencontainers/runc/libcontainer/user" + runcuser "github.com/moby/sys/user" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" "github.com/opencontainers/selinux/go-selinux" "github.com/opencontainers/selinux/go-selinux/label" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" "golang.org/x/sys/unix" cdi "tags.cncf.io/container-device-interface/pkg/cdi" ) @@ -86,7 +85,7 @@ func parseOptionIDs(ctrMappings []idtools.IDMap, option string) ([]idtools.IDMap if relative { found := false for _, m := range ctrMappings { - if v.ContainerID >= m.ContainerID && v.ContainerID < m.ContainerID+m.Size { + if v.HostID >= m.ContainerID && v.HostID < m.ContainerID+m.Size { v.HostID += m.HostID - m.ContainerID found = true break @@ -195,7 +194,7 @@ func (c *Container) generateSpec(ctx context.Context) (s *spec.Spec, cleanupFunc overrides := c.getUserOverrides() execUser, err := lookup.GetUserGroupInfo(c.state.Mountpoint, c.config.User, overrides) if err != nil { - if cutil.StringInSlice(c.config.User, c.config.HostUsers) { + if slices.Contains(c.config.HostUsers, c.config.User) { execUser, err = lookupHostUser(c.config.User) } if err != nil { @@ -366,7 +365,11 @@ func (c *Container) generateSpec(ctx context.Context) (s *spec.Spec, cleanupFunc if err := c.relabel(m.Source, c.MountLabel(), label.IsShared(o)); err != nil { return nil, nil, err } - + case "no-dereference": + // crun calls the option `copy-symlink`. + // Podman decided for --no-dereference as many + // bin-utils tools (e..g, touch, chown, cp) do. + options = append(options, "copy-symlink") default: options = append(options, o) } @@ -605,9 +608,6 @@ func (c *Container) generateSpec(ctx context.Context) (s *spec.Spec, cleanupFunc if err != nil { return nil, nil, err } - if err != nil { - return nil, nil, err - } for name, secr := range c.config.EnvSecrets { _, data, err := manager.LookupSecretData(secr.Name) if err != nil { @@ -633,14 +633,15 @@ func (c *Container) generateSpec(ctx context.Context) (s *spec.Spec, cleanupFunc nofileSet := false nprocSet := false isRootless := rootless.IsRootless() - if isRootless { - if g.Config.Process != nil && g.Config.Process.OOMScoreAdj != nil { - var err error - *g.Config.Process.OOMScoreAdj, err = maybeClampOOMScoreAdj(*g.Config.Process.OOMScoreAdj) - if err != nil { - return nil, nil, err - } + isRunningInUserNs := unshare.IsRootless() + if isRunningInUserNs && g.Config.Process != nil && g.Config.Process.OOMScoreAdj != nil { + var err error + *g.Config.Process.OOMScoreAdj, err = maybeClampOOMScoreAdj(*g.Config.Process.OOMScoreAdj) + if err != nil { + return nil, nil, err } + } + if isRootless { for _, rlimit := range c.config.Spec.Process.Rlimits { if rlimit.Type == "RLIMIT_NOFILE" { nofileSet = true @@ -791,7 +792,7 @@ func (c *Container) resolveWorkDir() error { if err != nil { return fmt.Errorf("looking up %s inside of the container %s: %w", c.User(), c.ID(), err) } - if err := os.Chown(resolvedWorkdir, int(uid), int(gid)); err != nil { + if err := idtools.SafeChown(resolvedWorkdir, int(uid), int(gid)); err != nil { return fmt.Errorf("chowning container %s workdir to container root: %w", c.ID(), err) } @@ -865,7 +866,7 @@ func (c *Container) mountNotifySocket(g generate.Generator) error { return fmt.Errorf("unable to create notify %q dir: %w", notifyDir, err) } } - if err := label.Relabel(notifyDir, c.MountLabel(), true); err != nil { + if err := c.relabel(notifyDir, c.MountLabel(), true); err != nil { return fmt.Errorf("relabel failed %q: %w", notifyDir, err) } logrus.Debugf("Add bindmount notify %q dir", notifyDir) @@ -1816,7 +1817,7 @@ func (c *Container) mountIntoRootDirs(mountName string, mountPath string) error // Make standard bind mounts to include in the container func (c *Container) makeBindMounts() error { - if err := os.Chown(c.state.RunDir, c.RootUID(), c.RootGID()); err != nil { + if err := idtools.SafeChown(c.state.RunDir, c.RootUID(), c.RootGID()); err != nil { return fmt.Errorf("cannot chown run directory: %w", err) } @@ -2263,7 +2264,14 @@ func (c *Container) addHosts() error { if err != nil { return fmt.Errorf("failed to get container ip host entries: %w", err) } - baseHostFile, err := etchosts.GetBaseHostFile(c.runtime.config.Containers.BaseHostsFile, c.state.Mountpoint) + + // Consider container level BaseHostsFile configuration first. + // If it is empty, fallback to containers.conf level configuration. + baseHostsFileConf := c.config.BaseHostsFile + if baseHostsFileConf == "" { + baseHostsFileConf = c.runtime.config.Containers.BaseHostsFile + } + baseHostFile, err := etchosts.GetBaseHostFile(baseHostsFileConf, c.state.Mountpoint) if err != nil { return err } @@ -2281,10 +2289,10 @@ func (c *Container) addHosts() error { // It will also add the path to the container bind mount map. // source is the path on the host, dest is the path in the container. func (c *Container) bindMountRootFile(source, dest string) error { - if err := os.Chown(source, c.RootUID(), c.RootGID()); err != nil { + if err := idtools.SafeChown(source, c.RootUID(), c.RootGID()); err != nil { return err } - if err := label.Relabel(source, c.MountLabel(), false); err != nil { + if err := c.relabel(source, c.MountLabel(), false); err != nil { return err } @@ -2486,7 +2494,7 @@ func (c *Container) setHomeEnvIfNeeded() error { overrides := c.getUserOverrides() execUser, err := lookup.GetUserGroupInfo(c.state.Mountpoint, c.config.User, overrides) if err != nil { - if cutil.StringInSlice(c.config.User, c.config.HostUsers) { + if slices.Contains(c.config.HostUsers, c.config.User) { execUser, err = lookupHostUser(c.config.User) } @@ -2776,38 +2784,6 @@ func (c *Container) generatePasswdAndGroup() (string, string, error) { return passwdPath, groupPath, nil } -func (c *Container) copyTimezoneFile(zonePath string) (string, error) { - localtimeCopy := filepath.Join(c.state.RunDir, "localtime") - file, err := os.Stat(zonePath) - if err != nil { - return "", err - } - if file.IsDir() { - return "", errors.New("invalid timezone: is a directory") - } - src, err := os.Open(zonePath) - if err != nil { - return "", err - } - defer src.Close() - dest, err := os.Create(localtimeCopy) - if err != nil { - return "", err - } - defer dest.Close() - _, err = io.Copy(dest, src) - if err != nil { - return "", err - } - if err := c.relabel(localtimeCopy, c.config.MountLabel, false); err != nil { - return "", err - } - if err := dest.Chown(c.RootUID(), c.RootGID()); err != nil { - return "", err - } - return localtimeCopy, err -} - func (c *Container) cleanupOverlayMounts() error { return overlay.CleanupContent(c.config.StaticDir) } @@ -2820,10 +2796,10 @@ func (c *Container) createSecretMountDir(runPath string) error { if err := umask.MkdirAllIgnoreUmask(src, os.FileMode(0o755)); err != nil { return err } - if err := label.Relabel(src, c.config.MountLabel, false); err != nil { + if err := c.relabel(src, c.config.MountLabel, false); err != nil { return err } - if err := os.Chown(src, c.RootUID(), c.RootGID()); err != nil { + if err := idtools.SafeChown(src, c.RootUID(), c.RootGID()); err != nil { return err } c.state.BindMounts[filepath.Join(runPath, "secrets")] = src @@ -2882,7 +2858,7 @@ func (c *Container) fixVolumePermissions(v *ContainerNamedVolume) error { return err } - if err := os.Lchown(mountPoint, uid, gid); err != nil { + if err := idtools.SafeLchown(mountPoint, uid, gid); err != nil { return err } @@ -2891,7 +2867,22 @@ func (c *Container) fixVolumePermissions(v *ContainerNamedVolume) error { st, err := os.Lstat(filepath.Join(c.state.Mountpoint, v.Dest)) if err == nil { if stat, ok := st.Sys().(*syscall.Stat_t); ok { - if err := os.Lchown(mountPoint, int(stat.Uid), int(stat.Gid)); err != nil { + uid, gid := int(stat.Uid), int(stat.Gid) + + if c.config.IDMappings.UIDMap != nil { + p := idtools.IDPair{ + UID: uid, + GID: gid, + } + mappings := idtools.NewIDMappingsFromMaps(c.config.IDMappings.UIDMap, c.config.IDMappings.GIDMap) + newUID, newGID, err := mappings.ToContainer(p) + if err != nil { + return fmt.Errorf("mapping user %d:%d: %w", uid, gid, err) + } + uid, gid = newUID, newGID + } + + if err := idtools.SafeLchown(mountPoint, uid, gid); err != nil { return err } } @@ -2923,7 +2914,12 @@ func (c *Container) relabel(src, mountLabel string, shared bool) error { return nil } } - return label.Relabel(src, mountLabel, shared) + err := label.Relabel(src, mountLabel, shared) + if errors.Is(err, unix.ENOTSUP) { + logrus.Debugf("Labeling not supported on %q", src) + return nil + } + return err } func (c *Container) ChangeHostPathOwnership(src string, recurse bool, uid, gid int) error { diff --git a/vendor/github.com/containers/podman/v4/libpod/container_internal_freebsd.go b/vendor/github.com/containers/podman/v5/libpod/container_internal_freebsd.go similarity index 94% rename from vendor/github.com/containers/podman/v4/libpod/container_internal_freebsd.go rename to vendor/github.com/containers/podman/v5/libpod/container_internal_freebsd.go index 152de9295..0f8bca0a3 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container_internal_freebsd.go +++ b/vendor/github.com/containers/podman/v5/libpod/container_internal_freebsd.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -13,7 +12,7 @@ import ( "time" "github.com/containers/common/libnetwork/types" - "github.com/containers/podman/v4/pkg/rootless" + "github.com/containers/podman/v5/pkg/rootless" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" "github.com/sirupsen/logrus" @@ -194,15 +193,18 @@ func openDirectory(path string) (fd int, err error) { func (c *Container) addNetworkNamespace(g *generate.Generator) error { if c.config.CreateNetNS { - if c.state.NetNS == "" { - // This should not happen since network setup - // errors should be propagated correctly from - // (*Runtime).createNetNS. Check for it anyway - // since it caused nil pointer dereferences in - // the past (see #16333). - return fmt.Errorf("Inconsistent state: c.config.CreateNetNS is set but c.state.NetNS is nil") + // If PostConfigureNetNS is set (which is true on FreeBSD 13.3 + // and later), we can manage a container's network settings + // without an extra parent jail to own the vnew. + // + // In this case, the OCI runtime creates a new vnet for the + // container jail, otherwise it creates the container jail as a + // child of the jail owning the vnet. + if c.config.PostConfigureNetNS { + g.AddAnnotation("org.freebsd.jail.vnet", "new") + } else { + g.AddAnnotation("org.freebsd.parentJail", c.state.NetNS) } - g.AddAnnotation("org.freebsd.parentJail", c.state.NetNS) } return nil } @@ -252,7 +254,7 @@ func (c *Container) addSharedNamespaces(g *generate.Generator) error { } needEnv := true for _, checkEnv := range g.Config.Process.Env { - if strings.SplitN(checkEnv, "=", 2)[0] == "HOSTNAME" { + if strings.HasPrefix(checkEnv, "HOSTNAME=") { needEnv = false break } diff --git a/vendor/github.com/containers/podman/v4/libpod/container_internal_linux.go b/vendor/github.com/containers/podman/v5/libpod/container_internal_linux.go similarity index 99% rename from vendor/github.com/containers/podman/v4/libpod/container_internal_linux.go rename to vendor/github.com/containers/podman/v5/libpod/container_internal_linux.go index 7f6508d38..9e279116c 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container_internal_linux.go +++ b/vendor/github.com/containers/podman/v5/libpod/container_internal_linux.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -19,9 +18,8 @@ import ( "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/cgroups" "github.com/containers/common/pkg/config" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/rootless" - "github.com/containers/podman/v4/utils" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/rootless" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" "github.com/opencontainers/selinux/go-selinux/label" @@ -177,7 +175,6 @@ func (c *Container) cleanupNetwork() error { c.state.NetNS = "" c.state.NetworkStatus = nil - c.state.NetworkStatusOld = nil if c.valid { return c.save() @@ -390,7 +387,7 @@ func (c *Container) getOCICgroupPath() (string, error) { case c.config.NoCgroups: return "", nil case c.config.CgroupsMode == cgroupSplit: - selfCgroup, err := utils.GetOwnCgroupDisallowRoot() + selfCgroup, err := cgroups.GetOwnCgroupDisallowRoot() if err != nil { return "", err } diff --git a/vendor/github.com/containers/podman/v4/libpod/container_linux.go b/vendor/github.com/containers/podman/v5/libpod/container_linux.go similarity index 95% rename from vendor/github.com/containers/podman/v4/libpod/container_linux.go rename to vendor/github.com/containers/podman/v5/libpod/container_linux.go index 8ff47f461..1101aaa8e 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container_linux.go +++ b/vendor/github.com/containers/podman/v5/libpod/container_linux.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod diff --git a/vendor/github.com/containers/podman/v4/libpod/container_log.go b/vendor/github.com/containers/podman/v5/libpod/container_log.go similarity index 95% rename from vendor/github.com/containers/podman/v4/libpod/container_log.go rename to vendor/github.com/containers/podman/v5/libpod/container_log.go index 55043456f..ea76c7ea3 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container_log.go +++ b/vendor/github.com/containers/podman/v5/libpod/container_log.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -10,10 +9,10 @@ import ( "os" "time" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/libpod/events" - "github.com/containers/podman/v4/libpod/logs" - systemdDefine "github.com/containers/podman/v4/pkg/systemd/define" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/libpod/events" + "github.com/containers/podman/v5/libpod/logs" + systemdDefine "github.com/containers/podman/v5/pkg/systemd/define" "github.com/nxadm/tail" "github.com/nxadm/tail/watch" "github.com/sirupsen/logrus" diff --git a/vendor/github.com/containers/podman/v4/libpod/container_log_linux.go b/vendor/github.com/containers/podman/v5/libpod/container_log_linux.go similarity index 97% rename from vendor/github.com/containers/podman/v4/libpod/container_log_linux.go rename to vendor/github.com/containers/podman/v5/libpod/container_log_linux.go index c3564fbf2..f874faa58 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container_log_linux.go +++ b/vendor/github.com/containers/podman/v5/libpod/container_log_linux.go @@ -1,5 +1,4 @@ //go:build !remote && linux && systemd -// +build !remote,linux,systemd package libpod @@ -11,10 +10,10 @@ import ( "strings" "time" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/libpod/events" - "github.com/containers/podman/v4/libpod/logs" - "github.com/containers/podman/v4/pkg/rootless" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/libpod/events" + "github.com/containers/podman/v5/libpod/logs" + "github.com/containers/podman/v5/pkg/rootless" "github.com/coreos/go-systemd/v22/sdjournal" "github.com/sirupsen/logrus" ) diff --git a/vendor/github.com/containers/podman/v4/libpod/container_log_unsupported.go b/vendor/github.com/containers/podman/v5/libpod/container_log_unsupported.go similarity index 69% rename from vendor/github.com/containers/podman/v4/libpod/container_log_unsupported.go rename to vendor/github.com/containers/podman/v5/libpod/container_log_unsupported.go index 6f89f4612..ebda51ff6 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container_log_unsupported.go +++ b/vendor/github.com/containers/podman/v5/libpod/container_log_unsupported.go @@ -1,6 +1,4 @@ //go:build !remote && (!linux || !systemd) -// +build !remote -// +build !linux !systemd package libpod @@ -8,8 +6,8 @@ import ( "context" "fmt" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/libpod/logs" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/libpod/logs" ) func (c *Container) readFromJournal(_ context.Context, _ *logs.LogOptions, _ chan *logs.LogLine, _ int64, _ string) error { diff --git a/vendor/github.com/containers/podman/v4/libpod/container_path_resolution.go b/vendor/github.com/containers/podman/v5/libpod/container_path_resolution.go similarity index 99% rename from vendor/github.com/containers/podman/v4/libpod/container_path_resolution.go rename to vendor/github.com/containers/podman/v5/libpod/container_path_resolution.go index 414837997..682aa9328 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container_path_resolution.go +++ b/vendor/github.com/containers/podman/v5/libpod/container_path_resolution.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -8,7 +7,7 @@ import ( "path/filepath" "strings" - "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v5/libpod/define" securejoin "github.com/cyphar/filepath-securejoin" "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" diff --git a/vendor/github.com/containers/podman/v4/libpod/container_stat_common.go b/vendor/github.com/containers/podman/v5/libpod/container_stat_common.go similarity index 97% rename from vendor/github.com/containers/podman/v4/libpod/container_stat_common.go rename to vendor/github.com/containers/podman/v5/libpod/container_stat_common.go index b2e7eacd7..9ad7acb4e 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container_stat_common.go +++ b/vendor/github.com/containers/podman/v5/libpod/container_stat_common.go @@ -1,6 +1,4 @@ //go:build !remote && (linux || freebsd) -// +build !remote -// +build linux freebsd package libpod @@ -12,8 +10,8 @@ import ( "strings" "github.com/containers/buildah/copier" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/copy" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/copy" ) // statOnHost stats the specified path *on the host*. It returns the file info diff --git a/vendor/github.com/containers/podman/v4/libpod/container_stat_freebsd.go b/vendor/github.com/containers/podman/v5/libpod/container_stat_freebsd.go similarity index 96% rename from vendor/github.com/containers/podman/v4/libpod/container_stat_freebsd.go rename to vendor/github.com/containers/podman/v5/libpod/container_stat_freebsd.go index 24288d798..16464689e 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container_stat_freebsd.go +++ b/vendor/github.com/containers/podman/v5/libpod/container_stat_freebsd.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod diff --git a/vendor/github.com/containers/podman/v4/libpod/container_stat_linux.go b/vendor/github.com/containers/podman/v5/libpod/container_stat_linux.go similarity index 94% rename from vendor/github.com/containers/podman/v4/libpod/container_stat_linux.go rename to vendor/github.com/containers/podman/v5/libpod/container_stat_linux.go index ef0e88c32..5a67c0ea3 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container_stat_linux.go +++ b/vendor/github.com/containers/podman/v5/libpod/container_stat_linux.go @@ -1,11 +1,10 @@ //go:build !remote -// +build !remote package libpod import ( "github.com/containers/buildah/copier" - "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v5/libpod/define" ) // statInsideMount stats the specified path *inside* the container's mount and PID diff --git a/vendor/github.com/containers/podman/v4/libpod/container_top_freebsd.go b/vendor/github.com/containers/podman/v5/libpod/container_top_freebsd.go similarity index 96% rename from vendor/github.com/containers/podman/v4/libpod/container_top_freebsd.go rename to vendor/github.com/containers/podman/v5/libpod/container_top_freebsd.go index 801814bb6..21be8250d 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container_top_freebsd.go +++ b/vendor/github.com/containers/podman/v5/libpod/container_top_freebsd.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -11,8 +10,8 @@ import ( "strings" "sync" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/util" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/util" "github.com/google/shlex" "github.com/sirupsen/logrus" ) diff --git a/vendor/github.com/containers/podman/v4/libpod/container_top_linux.c b/vendor/github.com/containers/podman/v5/libpod/container_top_linux.c similarity index 99% rename from vendor/github.com/containers/podman/v4/libpod/container_top_linux.c rename to vendor/github.com/containers/podman/v5/libpod/container_top_linux.c index 735becc64..a7192c54c 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container_top_linux.c +++ b/vendor/github.com/containers/podman/v5/libpod/container_top_linux.c @@ -1,5 +1,5 @@ //go:build !remote -// +build !remote + #define _GNU_SOURCE #include diff --git a/vendor/github.com/containers/podman/v4/libpod/container_top_linux.go b/vendor/github.com/containers/podman/v5/libpod/container_top_linux.go similarity index 97% rename from vendor/github.com/containers/podman/v4/libpod/container_top_linux.go rename to vendor/github.com/containers/podman/v5/libpod/container_top_linux.go index 85cec315e..c1d78cd4e 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container_top_linux.go +++ b/vendor/github.com/containers/podman/v5/libpod/container_top_linux.go @@ -1,5 +1,4 @@ //go:build !remote && linux && cgo -// +build !remote,linux,cgo package libpod @@ -17,13 +16,13 @@ import ( "syscall" "unsafe" - "github.com/containers/common/pkg/util" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/rootless" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/rootless" "github.com/containers/psgo" "github.com/containers/storage/pkg/reexec" "github.com/google/shlex" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" "golang.org/x/sys/unix" ) @@ -232,7 +231,7 @@ func (c *Container) Top(descriptors []string) ([]string, error) { // Only use ps(1) from the host when we know the container was not started with CAP_SYS_PTRACE, // with it the container can access /proc/$pid/ files and potentially escape the container fs. if c.config.Spec.Process.Capabilities != nil && - !util.StringInSlice("CAP_SYS_PTRACE", c.config.Spec.Process.Capabilities.Effective) { + !slices.Contains(c.config.Spec.Process.Capabilities.Effective, "CAP_SYS_PTRACE") { var retry bool output, retry, err = c.execPS(psDescriptors) if err != nil { @@ -328,7 +327,7 @@ func (c *Container) execPS(psArgs []string) ([]string, bool, error) { cmd.Stdout = wPipe cmd.Stderr = &errBuf // nil means use current env so explicitly unset all, to not leak any sensitive env vars - cmd.Env = []string{} + cmd.Env = []string{fmt.Sprintf("HOME=%s", os.Getenv("HOME"))} retryContainerExec := true err = cmd.Run() diff --git a/vendor/github.com/containers/podman/v4/libpod/container_top_unsupported.go b/vendor/github.com/containers/podman/v5/libpod/container_top_unsupported.go similarity index 84% rename from vendor/github.com/containers/podman/v4/libpod/container_top_unsupported.go rename to vendor/github.com/containers/podman/v5/libpod/container_top_unsupported.go index 66fd66887..1c47b69d5 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container_top_unsupported.go +++ b/vendor/github.com/containers/podman/v5/libpod/container_top_unsupported.go @@ -1,7 +1,4 @@ //go:build !remote && !(linux && cgo) && !freebsd -// +build !remote -// +build !linux !cgo -// +build !freebsd package libpod diff --git a/vendor/github.com/containers/podman/v4/libpod/container_validate.go b/vendor/github.com/containers/podman/v5/libpod/container_validate.go similarity index 99% rename from vendor/github.com/containers/podman/v4/libpod/container_validate.go rename to vendor/github.com/containers/podman/v5/libpod/container_validate.go index e01d8ef09..0c2b604dd 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container_validate.go +++ b/vendor/github.com/containers/podman/v5/libpod/container_validate.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -9,7 +8,7 @@ import ( "github.com/containers/image/v5/docker" "github.com/containers/image/v5/pkg/shortnames" "github.com/containers/image/v5/transports/alltransports" - "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v5/libpod/define" spec "github.com/opencontainers/runtime-spec/specs-go" ) diff --git a/vendor/github.com/containers/podman/v4/libpod/define/annotations.go b/vendor/github.com/containers/podman/v5/libpod/define/annotations.go similarity index 88% rename from vendor/github.com/containers/podman/v4/libpod/define/annotations.go rename to vendor/github.com/containers/podman/v5/libpod/define/annotations.go index 29796533b..a9d4031ae 100644 --- a/vendor/github.com/containers/podman/v4/libpod/define/annotations.go +++ b/vendor/github.com/containers/podman/v5/libpod/define/annotations.go @@ -18,13 +18,6 @@ const ( // the two supported boolean values (InspectResponseTrue and // InspectResponseFalse) it will be used in the output of Inspect(). InspectAnnotationAutoremove = "io.podman.annotations.autoremove" - // InspectAnnotationVolumesFrom is used by Inspect to identify - // containers whose volumes are being used by this container. - // It is expected to be a comma-separated list of container names and/or - // IDs. - // If an annotation with this key is found in the OCI spec, it will be - // used in the output of Inspect(). - InspectAnnotationVolumesFrom = "io.podman.annotations.volumes-from" // InspectAnnotationPrivileged is used by Inspect to identify containers // which are privileged (IE, running with elevated privileges). // It is expected to be a boolean, populated by one of @@ -149,19 +142,33 @@ const ( // pod creation InfraNameAnnotation = "io.podman.annotations.infra.name" + // UserNsAnnotation is used by play kube when playing a kube yaml to specify userns + // of the container + UserNsAnnotation = "io.podman.annotations.userns" + // UlimitAnnotation is used by kube play when playing a kube yaml to specify the ulimits // of the container UlimitAnnotation = "io.podman.annotations.ulimit" - // MaxKubeAnnotation is the max length of annotations allowed by Kubernetes. - MaxKubeAnnotation = 63 + // VolumesFromAnnotation is used by by play kube when playing a kube + // yaml to specify volumes-from of the container + // It is expected to be a semicolon-separated list of container names and/or + // IDs optionally with colon separated mount options. + VolumesFromAnnotation = "io.podman.annotations.volumes-from" + + // KubeHealthCheckAnnotation is used by kube play to tell podman that any health checks should follow + // the k8s behavior of waiting for the intialDelaySeconds to be over before updating the status + KubeHealthCheckAnnotation = "io.podman.annotations.kube.health.check" + + // TotalAnnotationSizeLimitB is the max length of annotations allowed by Kubernetes. + TotalAnnotationSizeLimitB int = 256 * (1 << 10) // 256 kB ) // IsReservedAnnotation returns true if the specified value corresponds to an // already reserved annotation that Podman sets during container creation. func IsReservedAnnotation(value string) bool { switch value { - case InspectAnnotationCIDFile, InspectAnnotationAutoremove, InspectAnnotationVolumesFrom, InspectAnnotationPrivileged, InspectAnnotationPublishAll, InspectAnnotationInit, InspectAnnotationLabel, InspectAnnotationSeccomp, InspectAnnotationApparmor, InspectResponseTrue, InspectResponseFalse: + case InspectAnnotationCIDFile, InspectAnnotationAutoremove, InspectAnnotationPrivileged, InspectAnnotationPublishAll, InspectAnnotationInit, InspectAnnotationLabel, InspectAnnotationSeccomp, InspectAnnotationApparmor, InspectResponseTrue, InspectResponseFalse, VolumesFromAnnotation: return true default: diff --git a/vendor/github.com/containers/podman/v4/libpod/define/autoupdate.go b/vendor/github.com/containers/podman/v5/libpod/define/autoupdate.go similarity index 100% rename from vendor/github.com/containers/podman/v4/libpod/define/autoupdate.go rename to vendor/github.com/containers/podman/v5/libpod/define/autoupdate.go diff --git a/vendor/github.com/containers/podman/v4/libpod/define/checkpoint_restore.go b/vendor/github.com/containers/podman/v5/libpod/define/checkpoint_restore.go similarity index 100% rename from vendor/github.com/containers/podman/v4/libpod/define/checkpoint_restore.go rename to vendor/github.com/containers/podman/v5/libpod/define/checkpoint_restore.go diff --git a/vendor/github.com/containers/podman/v4/libpod/define/config.go b/vendor/github.com/containers/podman/v5/libpod/define/config.go similarity index 94% rename from vendor/github.com/containers/podman/v4/libpod/define/config.go rename to vendor/github.com/containers/podman/v5/libpod/define/config.go index e5729d47e..cf9fd3812 100644 --- a/vendor/github.com/containers/podman/v4/libpod/define/config.go +++ b/vendor/github.com/containers/podman/v5/libpod/define/config.go @@ -82,6 +82,9 @@ const NoLogging = "none" // PassthroughLogging is the string conmon expects when specifying to use the passthrough driver const PassthroughLogging = "passthrough" +// PassthroughTTYLogging is the string conmon expects when specifying to use the passthrough driver even on a tty. +const PassthroughTTYLogging = "passthrough-tty" + // DefaultRlimitValue is the value set by default for nofile and nproc const RLimitDefaultValue = uint64(1048576) diff --git a/vendor/github.com/containers/podman/v4/libpod/define/container.go b/vendor/github.com/containers/podman/v5/libpod/define/container.go similarity index 100% rename from vendor/github.com/containers/podman/v4/libpod/define/container.go rename to vendor/github.com/containers/podman/v5/libpod/define/container.go diff --git a/vendor/github.com/containers/podman/v4/libpod/define/container_inspect.go b/vendor/github.com/containers/podman/v5/libpod/define/container_inspect.go similarity index 92% rename from vendor/github.com/containers/podman/v4/libpod/define/container_inspect.go rename to vendor/github.com/containers/podman/v5/libpod/define/container_inspect.go index 879dd1395..1d3692795 100644 --- a/vendor/github.com/containers/podman/v4/libpod/define/container_inspect.go +++ b/vendor/github.com/containers/podman/v5/libpod/define/container_inspect.go @@ -1,9 +1,13 @@ package define import ( + "encoding/json" + "fmt" + "strings" "time" "github.com/containers/image/v5/manifest" + "github.com/containers/podman/v5/pkg/signal" ) type InspectIDMappings struct { @@ -44,7 +48,7 @@ type InspectContainerConfig struct { // Container working directory WorkingDir string `json:"WorkingDir"` // Container entrypoint - Entrypoint string `json:"Entrypoint"` + Entrypoint []string `json:"Entrypoint"` // On-build arguments - presently unused. More of Buildah's domain. OnBuild *string `json:"OnBuild"` // Container labels @@ -52,7 +56,7 @@ type InspectContainerConfig struct { // Container annotations Annotations map[string]string `json:"Annotations"` // Container stop signal - StopSignal uint `json:"StopSignal"` + StopSignal string `json:"StopSignal"` // Configured healthcheck for the container Healthcheck *manifest.Schema2HealthConfig `json:"Healthcheck,omitempty"` // HealthcheckOnFailureAction defines an action to take once the container turns unhealthy. @@ -87,6 +91,51 @@ type InspectContainerConfig struct { SdNotifySocket string `json:"sdNotifySocket,omitempty"` } +// UnmarshalJSON allow compatibility with podman V4 API +func (insp *InspectContainerConfig) UnmarshalJSON(data []byte) error { + type Alias InspectContainerConfig + aux := &struct { + Entrypoint interface{} `json:"Entrypoint"` + StopSignal interface{} `json:"StopSignal"` + *Alias + }{ + Alias: (*Alias)(insp), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + + switch entrypoint := aux.Entrypoint.(type) { + case string: + insp.Entrypoint = strings.Split(entrypoint, " ") + case []string: + insp.Entrypoint = entrypoint + case []interface{}: + insp.Entrypoint = []string{} + for _, entry := range entrypoint { + if str, ok := entry.(string); ok { + insp.Entrypoint = append(insp.Entrypoint, str) + } + } + case nil: + insp.Entrypoint = []string{} + default: + return fmt.Errorf("cannot unmarshal Config.Entrypoint of type %T", entrypoint) + } + + switch stopsignal := aux.StopSignal.(type) { + case string: + insp.StopSignal = stopsignal + case float64: + insp.StopSignal = signal.ToDockerFormat(uint(stopsignal)) + case nil: + break + default: + return fmt.Errorf("cannot unmarshal Config.StopSignal of type %T", stopsignal) + } + return nil +} + // InspectRestartPolicy holds information about the container's restart policy. type InspectRestartPolicy struct { // Name contains the container's restart policy. @@ -206,34 +255,34 @@ type InspectMount struct { // Docker, but here we see more fields that are unused (nonsensical in the // context of Libpod). type InspectContainerState struct { - OciVersion string `json:"OciVersion"` - Status string `json:"Status"` - Running bool `json:"Running"` - Paused bool `json:"Paused"` - Restarting bool `json:"Restarting"` // TODO - OOMKilled bool `json:"OOMKilled"` - Dead bool `json:"Dead"` - Pid int `json:"Pid"` - ConmonPid int `json:"ConmonPid,omitempty"` - ExitCode int32 `json:"ExitCode"` - Error string `json:"Error"` // TODO - StartedAt time.Time `json:"StartedAt"` - FinishedAt time.Time `json:"FinishedAt"` - Health HealthCheckResults `json:"Health,omitempty"` - Checkpointed bool `json:"Checkpointed,omitempty"` - CgroupPath string `json:"CgroupPath,omitempty"` - CheckpointedAt time.Time `json:"CheckpointedAt,omitempty"` - RestoredAt time.Time `json:"RestoredAt,omitempty"` - CheckpointLog string `json:"CheckpointLog,omitempty"` - CheckpointPath string `json:"CheckpointPath,omitempty"` - RestoreLog string `json:"RestoreLog,omitempty"` - Restored bool `json:"Restored,omitempty"` - StoppedByUser bool `json:"StoppedByUser,omitempty"` + OciVersion string `json:"OciVersion"` + Status string `json:"Status"` + Running bool `json:"Running"` + Paused bool `json:"Paused"` + Restarting bool `json:"Restarting"` // TODO + OOMKilled bool `json:"OOMKilled"` + Dead bool `json:"Dead"` + Pid int `json:"Pid"` + ConmonPid int `json:"ConmonPid,omitempty"` + ExitCode int32 `json:"ExitCode"` + Error string `json:"Error"` // TODO + StartedAt time.Time `json:"StartedAt"` + FinishedAt time.Time `json:"FinishedAt"` + Health *HealthCheckResults `json:"Health,omitempty"` + Checkpointed bool `json:"Checkpointed,omitempty"` + CgroupPath string `json:"CgroupPath,omitempty"` + CheckpointedAt time.Time `json:"CheckpointedAt,omitempty"` + RestoredAt time.Time `json:"RestoredAt,omitempty"` + CheckpointLog string `json:"CheckpointLog,omitempty"` + CheckpointPath string `json:"CheckpointPath,omitempty"` + RestoreLog string `json:"RestoreLog,omitempty"` + Restored bool `json:"Restored,omitempty"` + StoppedByUser bool `json:"StoppedByUser,omitempty"` } // Healthcheck returns the HealthCheckResults. This is used for old podman compat // to make the "Healthcheck" key available in the go template. -func (s *InspectContainerState) Healthcheck() HealthCheckResults { +func (s *InspectContainerState) Healthcheck() *HealthCheckResults { return s.Health } diff --git a/vendor/github.com/containers/podman/v4/libpod/define/containerstate.go b/vendor/github.com/containers/podman/v5/libpod/define/containerstate.go similarity index 91% rename from vendor/github.com/containers/podman/v4/libpod/define/containerstate.go rename to vendor/github.com/containers/podman/v5/libpod/define/containerstate.go index 4520dc41d..e96798886 100644 --- a/vendor/github.com/containers/podman/v4/libpod/define/containerstate.go +++ b/vendor/github.com/containers/podman/v5/libpod/define/containerstate.go @@ -141,11 +141,23 @@ type ContainerStats struct { MemUsage uint64 MemLimit uint64 MemPerc float64 - NetInput uint64 - NetOutput uint64 - BlockInput uint64 - BlockOutput uint64 - PIDs uint64 - UpTime time.Duration - Duration uint64 + // Map of interface name to network statistics for that interface. + Network map[string]ContainerNetworkStats + BlockInput uint64 + BlockOutput uint64 + PIDs uint64 + UpTime time.Duration + Duration uint64 +} + +// Statistics for an individual container network interface +type ContainerNetworkStats struct { + RxBytes uint64 + RxDropped uint64 + RxErrors uint64 + RxPackets uint64 + TxBytes uint64 + TxDropped uint64 + TxErrors uint64 + TxPackets uint64 } diff --git a/vendor/github.com/containers/podman/v4/libpod/define/diff.go b/vendor/github.com/containers/podman/v5/libpod/define/diff.go similarity index 100% rename from vendor/github.com/containers/podman/v4/libpod/define/diff.go rename to vendor/github.com/containers/podman/v5/libpod/define/diff.go diff --git a/vendor/github.com/containers/podman/v4/libpod/define/errors.go b/vendor/github.com/containers/podman/v5/libpod/define/errors.go similarity index 100% rename from vendor/github.com/containers/podman/v4/libpod/define/errors.go rename to vendor/github.com/containers/podman/v5/libpod/define/errors.go diff --git a/vendor/github.com/containers/podman/v4/libpod/define/exec_codes.go b/vendor/github.com/containers/podman/v5/libpod/define/exec_codes.go similarity index 100% rename from vendor/github.com/containers/podman/v4/libpod/define/exec_codes.go rename to vendor/github.com/containers/podman/v5/libpod/define/exec_codes.go diff --git a/vendor/github.com/containers/podman/v4/libpod/define/exit_code_propagation.go b/vendor/github.com/containers/podman/v5/libpod/define/exit_code_propagation.go similarity index 100% rename from vendor/github.com/containers/podman/v4/libpod/define/exit_code_propagation.go rename to vendor/github.com/containers/podman/v5/libpod/define/exit_code_propagation.go diff --git a/vendor/github.com/containers/podman/v4/libpod/define/fileinfo.go b/vendor/github.com/containers/podman/v5/libpod/define/fileinfo.go similarity index 100% rename from vendor/github.com/containers/podman/v4/libpod/define/fileinfo.go rename to vendor/github.com/containers/podman/v5/libpod/define/fileinfo.go diff --git a/vendor/github.com/containers/podman/v4/libpod/define/healthchecks.go b/vendor/github.com/containers/podman/v5/libpod/define/healthchecks.go similarity index 100% rename from vendor/github.com/containers/podman/v4/libpod/define/healthchecks.go rename to vendor/github.com/containers/podman/v5/libpod/define/healthchecks.go diff --git a/vendor/github.com/containers/podman/v4/libpod/define/info.go b/vendor/github.com/containers/podman/v5/libpod/define/info.go similarity index 100% rename from vendor/github.com/containers/podman/v4/libpod/define/info.go rename to vendor/github.com/containers/podman/v5/libpod/define/info.go diff --git a/vendor/github.com/containers/podman/v4/libpod/define/mount.go b/vendor/github.com/containers/podman/v5/libpod/define/mount.go similarity index 100% rename from vendor/github.com/containers/podman/v4/libpod/define/mount.go rename to vendor/github.com/containers/podman/v5/libpod/define/mount.go diff --git a/vendor/github.com/containers/podman/v4/libpod/define/mount_freebsd.go b/vendor/github.com/containers/podman/v5/libpod/define/mount_freebsd.go similarity index 100% rename from vendor/github.com/containers/podman/v4/libpod/define/mount_freebsd.go rename to vendor/github.com/containers/podman/v5/libpod/define/mount_freebsd.go diff --git a/vendor/github.com/containers/podman/v4/libpod/define/mount_linux.go b/vendor/github.com/containers/podman/v5/libpod/define/mount_linux.go similarity index 100% rename from vendor/github.com/containers/podman/v4/libpod/define/mount_linux.go rename to vendor/github.com/containers/podman/v5/libpod/define/mount_linux.go diff --git a/vendor/github.com/containers/podman/v4/libpod/define/mount_unsupported.go b/vendor/github.com/containers/podman/v5/libpod/define/mount_unsupported.go similarity index 100% rename from vendor/github.com/containers/podman/v4/libpod/define/mount_unsupported.go rename to vendor/github.com/containers/podman/v5/libpod/define/mount_unsupported.go diff --git a/vendor/github.com/containers/podman/v4/libpod/define/pod_inspect.go b/vendor/github.com/containers/podman/v5/libpod/define/pod_inspect.go similarity index 100% rename from vendor/github.com/containers/podman/v4/libpod/define/pod_inspect.go rename to vendor/github.com/containers/podman/v5/libpod/define/pod_inspect.go diff --git a/vendor/github.com/containers/podman/v4/libpod/define/podstate.go b/vendor/github.com/containers/podman/v5/libpod/define/podstate.go similarity index 100% rename from vendor/github.com/containers/podman/v4/libpod/define/podstate.go rename to vendor/github.com/containers/podman/v5/libpod/define/podstate.go diff --git a/vendor/github.com/containers/podman/v4/libpod/define/runtime.go b/vendor/github.com/containers/podman/v5/libpod/define/runtime.go similarity index 100% rename from vendor/github.com/containers/podman/v4/libpod/define/runtime.go rename to vendor/github.com/containers/podman/v5/libpod/define/runtime.go diff --git a/vendor/github.com/containers/podman/v4/libpod/define/sdnotify.go b/vendor/github.com/containers/podman/v5/libpod/define/sdnotify.go similarity index 100% rename from vendor/github.com/containers/podman/v4/libpod/define/sdnotify.go rename to vendor/github.com/containers/podman/v5/libpod/define/sdnotify.go diff --git a/vendor/github.com/containers/podman/v4/libpod/define/version.go b/vendor/github.com/containers/podman/v5/libpod/define/version.go similarity index 96% rename from vendor/github.com/containers/podman/v4/libpod/define/version.go rename to vendor/github.com/containers/podman/v5/libpod/define/version.go index 2c17e6e92..13a8fdb77 100644 --- a/vendor/github.com/containers/podman/v4/libpod/define/version.go +++ b/vendor/github.com/containers/podman/v5/libpod/define/version.go @@ -5,7 +5,7 @@ import ( "strconv" "time" - "github.com/containers/podman/v4/version" + "github.com/containers/podman/v5/version" ) // Overwritten at build time diff --git a/vendor/github.com/containers/podman/v4/libpod/define/volume_inspect.go b/vendor/github.com/containers/podman/v5/libpod/define/volume_inspect.go similarity index 100% rename from vendor/github.com/containers/podman/v4/libpod/define/volume_inspect.go rename to vendor/github.com/containers/podman/v5/libpod/define/volume_inspect.go diff --git a/vendor/github.com/containers/podman/v4/libpod/diff.go b/vendor/github.com/containers/podman/v5/libpod/diff.go similarity index 94% rename from vendor/github.com/containers/podman/v4/libpod/diff.go rename to vendor/github.com/containers/podman/v5/libpod/diff.go index 906329d78..b32881fac 100644 --- a/vendor/github.com/containers/podman/v4/libpod/diff.go +++ b/vendor/github.com/containers/podman/v5/libpod/diff.go @@ -1,13 +1,12 @@ //go:build !remote -// +build !remote package libpod import ( "fmt" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/libpod/layers" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/libpod/layers" "github.com/containers/storage/pkg/archive" ) diff --git a/vendor/github.com/containers/podman/v4/libpod/doc.go b/vendor/github.com/containers/podman/v5/libpod/doc.go similarity index 100% rename from vendor/github.com/containers/podman/v4/libpod/doc.go rename to vendor/github.com/containers/podman/v5/libpod/doc.go diff --git a/vendor/github.com/containers/podman/v4/libpod/driver/driver.go b/vendor/github.com/containers/podman/v5/libpod/driver/driver.go similarity index 92% rename from vendor/github.com/containers/podman/v4/libpod/driver/driver.go rename to vendor/github.com/containers/podman/v5/libpod/driver/driver.go index 8694685ff..789089a23 100644 --- a/vendor/github.com/containers/podman/v4/libpod/driver/driver.go +++ b/vendor/github.com/containers/podman/v5/libpod/driver/driver.go @@ -1,7 +1,7 @@ package driver import ( - "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v5/libpod/define" "github.com/containers/storage" ) diff --git a/vendor/github.com/containers/podman/v4/libpod/events.go b/vendor/github.com/containers/podman/v5/libpod/events.go similarity index 95% rename from vendor/github.com/containers/podman/v4/libpod/events.go rename to vendor/github.com/containers/podman/v5/libpod/events.go index 6e8c409e3..1ca42ffd7 100644 --- a/vendor/github.com/containers/podman/v4/libpod/events.go +++ b/vendor/github.com/containers/podman/v5/libpod/events.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -9,7 +8,7 @@ import ( "path/filepath" "sync" - "github.com/containers/podman/v4/libpod/events" + "github.com/containers/podman/v5/libpod/events" "github.com/sirupsen/logrus" ) @@ -44,7 +43,6 @@ func (c *Container) newContainerEventWithInspectData(status events.Status, inspe e.Type = events.Container e.Details = events.Details{ - ID: e.ID, PodID: c.PodID(), Attributes: c.Labels(), } @@ -77,6 +75,14 @@ func (c *Container) newContainerEventWithInspectData(status events.Status, inspe e.HealthStatus = containerHealthStatus } + if status == events.Remove { + exitCode, err := c.runtime.state.GetContainerExitCode(c.ID()) + if err == nil { + intExitCode := int(exitCode) + e.ContainerExitCode = &intExitCode + } + } + return c.runtime.eventer.Write(e) } @@ -88,10 +94,10 @@ func (c *Container) newContainerExitedEvent(exitCode int32) { e.Image = c.config.RootfsImageName e.Type = events.Container e.PodID = c.PodID() - e.ContainerExitCode = int(exitCode) + intExitCode := int(exitCode) + e.ContainerExitCode = &intExitCode e.Details = events.Details{ - ID: e.ID, Attributes: c.Labels(), } @@ -107,12 +113,12 @@ func (c *Container) newExecDiedEvent(sessionID string, exitCode int) { e.Name = c.Name() e.Image = c.config.RootfsImageName e.Type = events.Container - e.ContainerExitCode = exitCode + intExitCode := exitCode + e.ContainerExitCode = &intExitCode e.Attributes = make(map[string]string) e.Attributes["execID"] = sessionID e.Details = events.Details{ - ID: e.ID, Attributes: c.Labels(), } diff --git a/vendor/github.com/containers/podman/v4/libpod/events/config.go b/vendor/github.com/containers/podman/v5/libpod/events/config.go similarity index 96% rename from vendor/github.com/containers/podman/v4/libpod/events/config.go rename to vendor/github.com/containers/podman/v5/libpod/events/config.go index 309a49574..0be17acc0 100644 --- a/vendor/github.com/containers/podman/v4/libpod/events/config.go +++ b/vendor/github.com/containers/podman/v5/libpod/events/config.go @@ -24,7 +24,7 @@ const ( type Event struct { // ContainerExitCode is for storing the exit code of a container which can // be used for "internal" event notification - ContainerExitCode int `json:",omitempty"` + ContainerExitCode *int `json:",omitempty"` // ID can be for the container, image, volume, etc ID string `json:",omitempty"` // Image used where applicable @@ -41,6 +41,8 @@ type Event struct { Type Type // Health status of the current container HealthStatus string `json:"health_status,omitempty"` + // Error code for certain events involving errors. + Error string `json:"error,omitempty"` Details } @@ -48,8 +50,6 @@ type Event struct { // Details describes specifics about certain events, specifically around // container events type Details struct { - // ID is the event ID - ID string // ContainerInspectData includes the payload of the container's inspect // data. Only set when events_container_create_inspect_data is set true // in containers.conf. @@ -172,6 +172,8 @@ const ( Prune Status = "prune" // Pull ... Pull Status = "pull" + // PullError is an error pulling an image + PullError Status = "pull-error" // Push ... Push Status = "push" // Refresh indicates that the system refreshed the state after a diff --git a/vendor/github.com/containers/podman/v4/libpod/events/events.go b/vendor/github.com/containers/podman/v5/libpod/events/events.go similarity index 95% rename from vendor/github.com/containers/podman/v4/libpod/events/events.go rename to vendor/github.com/containers/podman/v5/libpod/events/events.go index 2105a3b89..80d8f0c08 100644 --- a/vendor/github.com/containers/podman/v4/libpod/events/events.go +++ b/vendor/github.com/containers/podman/v5/libpod/events/events.go @@ -54,13 +54,6 @@ func NewEvent(status Status) Event { } } -// Recycle checks if the event log has reach a limit and if so -// renames the current log and starts a new one. The remove bool -// indicates the old log file should be deleted. -func (e *Event) Recycle(path string, remove bool) error { - return errors.New("not implemented") -} - // ToJSONString returns the event as a json'ified string func (e *Event) ToJSONString() (string, error) { b, err := json.Marshal(e) @@ -69,6 +62,9 @@ func (e *Event) ToJSONString() (string, error) { // ToHumanReadable returns human-readable event as a formatted string func (e *Event) ToHumanReadable(truncate bool) string { + if e == nil { + return "" + } var humanFormat string id := e.ID if truncate { @@ -94,6 +90,9 @@ func (e *Event) ToHumanReadable(truncate bool) string { humanFormat = fmt.Sprintf("%s %s %s %s (container=%s, name=%s)", e.Time, e.Type, e.Status, id, id, e.Network) case Image: humanFormat = fmt.Sprintf("%s %s %s %s %s", e.Time, e.Type, e.Status, id, e.Name) + if e.Error != "" { + humanFormat += " " + e.Error + } case System: if e.Name != "" { humanFormat = fmt.Sprintf("%s %s %s %s", e.Time, e.Type, e.Status, e.Name) @@ -198,6 +197,8 @@ func StringToStatus(name string) (Status, error) { return Prune, nil case Pull.String(): return Pull, nil + case PullError.String(): + return PullError, nil case Push.String(): return Push, nil case Refresh.String(): diff --git a/vendor/github.com/containers/podman/v4/libpod/events/events_freebsd.go b/vendor/github.com/containers/podman/v5/libpod/events/events_freebsd.go similarity index 100% rename from vendor/github.com/containers/podman/v4/libpod/events/events_freebsd.go rename to vendor/github.com/containers/podman/v5/libpod/events/events_freebsd.go diff --git a/vendor/github.com/containers/podman/v4/libpod/events/events_linux.go b/vendor/github.com/containers/podman/v5/libpod/events/events_linux.go similarity index 100% rename from vendor/github.com/containers/podman/v4/libpod/events/events_linux.go rename to vendor/github.com/containers/podman/v5/libpod/events/events_linux.go diff --git a/vendor/github.com/containers/podman/v4/libpod/events/events_unsupported.go b/vendor/github.com/containers/podman/v5/libpod/events/events_unsupported.go similarity index 90% rename from vendor/github.com/containers/podman/v4/libpod/events/events_unsupported.go rename to vendor/github.com/containers/podman/v5/libpod/events/events_unsupported.go index 01031c225..e1be1b015 100644 --- a/vendor/github.com/containers/podman/v4/libpod/events/events_unsupported.go +++ b/vendor/github.com/containers/podman/v5/libpod/events/events_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux && !freebsd -// +build !linux,!freebsd package events diff --git a/vendor/github.com/containers/podman/v4/libpod/events/filters.go b/vendor/github.com/containers/podman/v5/libpod/events/filters.go similarity index 98% rename from vendor/github.com/containers/podman/v4/libpod/events/filters.go rename to vendor/github.com/containers/podman/v5/libpod/events/filters.go index 6d08dc4ed..e502546be 100644 --- a/vendor/github.com/containers/podman/v4/libpod/events/filters.go +++ b/vendor/github.com/containers/podman/v5/libpod/events/filters.go @@ -5,7 +5,7 @@ import ( "strings" "time" - "github.com/containers/podman/v4/pkg/util" + "github.com/containers/podman/v5/pkg/util" ) func generateEventFilter(filter, filterValue string) (func(e *Event) bool, error) { diff --git a/vendor/github.com/containers/podman/v4/libpod/events/journal_linux.go b/vendor/github.com/containers/podman/v5/libpod/events/journal_linux.go similarity index 95% rename from vendor/github.com/containers/podman/v4/libpod/events/journal_linux.go rename to vendor/github.com/containers/podman/v5/libpod/events/journal_linux.go index debb49a8d..2ee94090f 100644 --- a/vendor/github.com/containers/podman/v4/libpod/events/journal_linux.go +++ b/vendor/github.com/containers/podman/v5/libpod/events/journal_linux.go @@ -1,5 +1,4 @@ //go:build systemd -// +build systemd package events @@ -11,8 +10,8 @@ import ( "strconv" "time" - "github.com/containers/podman/v4/pkg/rootless" - "github.com/containers/podman/v4/pkg/util" + "github.com/containers/podman/v5/pkg/rootless" + "github.com/containers/podman/v5/pkg/util" "github.com/coreos/go-systemd/v22/journal" "github.com/coreos/go-systemd/v22/sdjournal" "github.com/sirupsen/logrus" @@ -44,12 +43,15 @@ func (e EventJournalD) Write(ee Event) error { case Image: m["PODMAN_NAME"] = ee.Name m["PODMAN_ID"] = ee.ID + if ee.Error != "" { + m["ERROR"] = ee.Error + } case Container, Pod: m["PODMAN_IMAGE"] = ee.Image m["PODMAN_NAME"] = ee.Name m["PODMAN_ID"] = ee.ID - if ee.ContainerExitCode != 0 { - m["PODMAN_EXIT_CODE"] = strconv.Itoa(ee.ContainerExitCode) + if ee.ContainerExitCode != nil { + m["PODMAN_EXIT_CODE"] = strconv.Itoa(*ee.ContainerExitCode) } if ee.PodID != "" { m["PODMAN_POD_ID"] = ee.PodID @@ -206,7 +208,7 @@ func newEventFromJournalEntry(entry *sdjournal.JournalEntry) (*Event, error) { if err != nil { logrus.Errorf("Parsing event exit code %s", code) } else { - newEvent.ContainerExitCode = intCode + newEvent.ContainerExitCode = &intCode } } @@ -229,6 +231,9 @@ func newEventFromJournalEntry(entry *sdjournal.JournalEntry) (*Event, error) { newEvent.Network = entry.Fields["PODMAN_NETWORK_NAME"] case Image: newEvent.ID = entry.Fields["PODMAN_ID"] + if val, ok := entry.Fields["ERROR"]; ok { + newEvent.Error = val + } } return &newEvent, nil } diff --git a/vendor/github.com/containers/podman/v4/libpod/events/journal_unsupported.go b/vendor/github.com/containers/podman/v5/libpod/events/journal_unsupported.go similarity index 94% rename from vendor/github.com/containers/podman/v4/libpod/events/journal_unsupported.go rename to vendor/github.com/containers/podman/v5/libpod/events/journal_unsupported.go index 6ed39792b..5cbd75acd 100644 --- a/vendor/github.com/containers/podman/v4/libpod/events/journal_unsupported.go +++ b/vendor/github.com/containers/podman/v5/libpod/events/journal_unsupported.go @@ -1,5 +1,4 @@ //go:build !systemd -// +build !systemd package events diff --git a/vendor/github.com/containers/podman/v4/libpod/events/logfile.go b/vendor/github.com/containers/podman/v5/libpod/events/logfile.go similarity index 99% rename from vendor/github.com/containers/podman/v4/libpod/events/logfile.go rename to vendor/github.com/containers/podman/v5/libpod/events/logfile.go index 17c93e960..9b69d15f6 100644 --- a/vendor/github.com/containers/podman/v4/libpod/events/logfile.go +++ b/vendor/github.com/containers/podman/v5/libpod/events/logfile.go @@ -1,5 +1,4 @@ //go:build linux || freebsd -// +build linux freebsd package events @@ -14,7 +13,7 @@ import ( "path/filepath" "time" - "github.com/containers/podman/v4/pkg/util" + "github.com/containers/podman/v5/pkg/util" "github.com/containers/storage/pkg/lockfile" "github.com/nxadm/tail" "github.com/sirupsen/logrus" diff --git a/vendor/github.com/containers/podman/v4/libpod/events/memory.go b/vendor/github.com/containers/podman/v5/libpod/events/memory.go similarity index 100% rename from vendor/github.com/containers/podman/v4/libpod/events/memory.go rename to vendor/github.com/containers/podman/v5/libpod/events/memory.go diff --git a/vendor/github.com/containers/podman/v4/libpod/events/nullout.go b/vendor/github.com/containers/podman/v5/libpod/events/nullout.go similarity index 100% rename from vendor/github.com/containers/podman/v4/libpod/events/nullout.go rename to vendor/github.com/containers/podman/v5/libpod/events/nullout.go diff --git a/vendor/github.com/containers/podman/v4/libpod/healthcheck.go b/vendor/github.com/containers/podman/v5/libpod/healthcheck.go similarity index 96% rename from vendor/github.com/containers/podman/v4/libpod/healthcheck.go rename to vendor/github.com/containers/podman/v5/libpod/healthcheck.go index f906562bc..47ddc0494 100644 --- a/vendor/github.com/containers/podman/v4/libpod/healthcheck.go +++ b/vendor/github.com/containers/podman/v5/libpod/healthcheck.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -13,8 +12,8 @@ import ( "strings" "time" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/libpod/events" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/libpod/events" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -167,7 +166,7 @@ func (c *Container) runHealthCheck(ctx context.Context, isStartup bool) (define. } hcl := newHealthCheckLog(timeStart, timeEnd, returnCode, eventLog) - logStatus, err := c.updateHealthCheckLog(hcl, inStartPeriod) + logStatus, err := c.updateHealthCheckLog(hcl, inStartPeriod, isStartup) if err != nil { return hcResult, "", fmt.Errorf("unable to update health check log %s for %s: %w", c.healthCheckLogPath(), c.ID(), err) } @@ -375,10 +374,17 @@ func (c *Container) isUnhealthy() (bool, error) { } // UpdateHealthCheckLog parses the health check results and writes the log -func (c *Container) updateHealthCheckLog(hcl define.HealthCheckLog, inStartPeriod bool) (string, error) { +func (c *Container) updateHealthCheckLog(hcl define.HealthCheckLog, inStartPeriod, isStartup bool) (string, error) { c.lock.Lock() defer c.lock.Unlock() + // If we are playing a kube yaml then let's honor the start period time for + // both failing and succeeding cases to match kube behavior. + // So don't update the health check log till the start period is over + if _, ok := c.config.Spec.Annotations[define.KubeHealthCheckAnnotation]; ok && inStartPeriod && !isStartup { + return "", nil + } + healthCheck, err := c.getHealthCheckLog() if err != nil { return "", err diff --git a/vendor/github.com/containers/podman/v4/libpod/healthcheck_linux.go b/vendor/github.com/containers/podman/v5/libpod/healthcheck_linux.go similarity index 94% rename from vendor/github.com/containers/podman/v4/libpod/healthcheck_linux.go rename to vendor/github.com/containers/podman/v5/libpod/healthcheck_linux.go index 53ec0c1de..b7b66a939 100644 --- a/vendor/github.com/containers/podman/v4/libpod/healthcheck_linux.go +++ b/vendor/github.com/containers/podman/v5/libpod/healthcheck_linux.go @@ -1,5 +1,4 @@ //go:build !remote && systemd -// +build !remote,systemd package libpod @@ -10,10 +9,10 @@ import ( "os/exec" "strings" - "github.com/containers/podman/v4/pkg/errorhandling" - "github.com/containers/podman/v4/pkg/rootless" - "github.com/containers/podman/v4/pkg/systemd" - "github.com/containers/podman/v4/utils" + systemdCommon "github.com/containers/common/pkg/systemd" + "github.com/containers/podman/v5/pkg/errorhandling" + "github.com/containers/podman/v5/pkg/rootless" + "github.com/containers/podman/v5/pkg/systemd" "github.com/sirupsen/logrus" ) @@ -138,7 +137,7 @@ func (c *Container) removeTransientFiles(ctx context.Context, isStartup bool) er } func (c *Container) disableHealthCheckSystemd(isStartup bool) bool { - if !utils.RunsOnSystemd() || os.Getenv("DISABLE_HC_SYSTEMD") == "true" { + if !systemdCommon.RunsOnSystemd() || os.Getenv("DISABLE_HC_SYSTEMD") == "true" { return true } if isStartup { diff --git a/vendor/github.com/containers/podman/v4/libpod/healthcheck_nosystemd_linux.go b/vendor/github.com/containers/podman/v5/libpod/healthcheck_nosystemd_linux.go similarity index 95% rename from vendor/github.com/containers/podman/v4/libpod/healthcheck_nosystemd_linux.go rename to vendor/github.com/containers/podman/v5/libpod/healthcheck_nosystemd_linux.go index cde5f0c10..cd8503f82 100644 --- a/vendor/github.com/containers/podman/v4/libpod/healthcheck_nosystemd_linux.go +++ b/vendor/github.com/containers/podman/v5/libpod/healthcheck_nosystemd_linux.go @@ -1,5 +1,4 @@ //go:build !remote && !systemd -// +build !remote,!systemd package libpod diff --git a/vendor/github.com/containers/podman/v4/libpod/healthcheck_unsupported.go b/vendor/github.com/containers/podman/v5/libpod/healthcheck_unsupported.go similarity index 95% rename from vendor/github.com/containers/podman/v4/libpod/healthcheck_unsupported.go rename to vendor/github.com/containers/podman/v5/libpod/healthcheck_unsupported.go index 5e2c151f4..0517465db 100644 --- a/vendor/github.com/containers/podman/v4/libpod/healthcheck_unsupported.go +++ b/vendor/github.com/containers/podman/v5/libpod/healthcheck_unsupported.go @@ -1,5 +1,4 @@ //go:build !remote && !linux -// +build !remote,!linux package libpod diff --git a/vendor/github.com/containers/podman/v4/libpod/info.go b/vendor/github.com/containers/podman/v5/libpod/info.go similarity index 97% rename from vendor/github.com/containers/podman/v4/libpod/info.go rename to vendor/github.com/containers/podman/v5/libpod/info.go index 8f00dbdeb..140b535ef 100644 --- a/vendor/github.com/containers/podman/v4/libpod/info.go +++ b/vendor/github.com/containers/podman/v5/libpod/info.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -20,9 +19,8 @@ import ( "github.com/containers/buildah/pkg/util" "github.com/containers/common/pkg/version" "github.com/containers/image/v5/pkg/sysregistriesv2" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/libpod/linkmode" - "github.com/containers/podman/v4/pkg/rootless" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/libpod/linkmode" "github.com/containers/storage" "github.com/containers/storage/pkg/system" "github.com/sirupsen/logrus" @@ -214,7 +212,7 @@ func (r *Runtime) getContainerStoreInfo() (define.ContainerStore, error) { // top-level "store" info func (r *Runtime) storeInfo() (*define.StoreInfo, error) { // let's say storage driver in use, number of images, number of containers - configFile, err := storage.DefaultConfigFile(rootless.IsRootless()) + configFile, err := storage.DefaultConfigFile() if err != nil { return nil, err } diff --git a/vendor/github.com/containers/podman/v4/libpod/info_freebsd.go b/vendor/github.com/containers/podman/v5/libpod/info_freebsd.go similarity index 93% rename from vendor/github.com/containers/podman/v4/libpod/info_freebsd.go rename to vendor/github.com/containers/podman/v5/libpod/info_freebsd.go index 19b4257e3..2feab40dd 100644 --- a/vendor/github.com/containers/podman/v4/libpod/info_freebsd.go +++ b/vendor/github.com/containers/podman/v5/libpod/info_freebsd.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -7,7 +6,7 @@ import ( "fmt" "unsafe" - "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v5/libpod/define" "golang.org/x/sys/unix" ) diff --git a/vendor/github.com/containers/podman/v4/libpod/info_linux.go b/vendor/github.com/containers/podman/v5/libpod/info_linux.go similarity index 97% rename from vendor/github.com/containers/podman/v4/libpod/info_linux.go rename to vendor/github.com/containers/podman/v5/libpod/info_linux.go index e01510857..ee34c8e20 100644 --- a/vendor/github.com/containers/podman/v4/libpod/info_linux.go +++ b/vendor/github.com/containers/podman/v5/libpod/info_linux.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -17,8 +16,8 @@ import ( "github.com/containers/common/pkg/cgroups" "github.com/containers/common/pkg/seccomp" "github.com/containers/common/pkg/version" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/rootless" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/rootless" "github.com/opencontainers/selinux/go-selinux" "github.com/sirupsen/logrus" ) diff --git a/vendor/github.com/containers/podman/v4/libpod/kube.go b/vendor/github.com/containers/podman/v5/libpod/kube.go similarity index 92% rename from vendor/github.com/containers/podman/v4/libpod/kube.go rename to vendor/github.com/containers/podman/v5/libpod/kube.go index ba445e7c1..cd57763f4 100644 --- a/vendor/github.com/containers/podman/v4/libpod/kube.go +++ b/vendor/github.com/containers/podman/v5/libpod/kube.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -14,37 +13,36 @@ import ( "strconv" "strings" "time" - "unicode/utf8" "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/config" - cutil "github.com/containers/common/pkg/util" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/annotations" - "github.com/containers/podman/v4/pkg/domain/entities" - "github.com/containers/podman/v4/pkg/env" - v1 "github.com/containers/podman/v4/pkg/k8s.io/api/core/v1" - "github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/api/resource" - v12 "github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/util/intstr" - "github.com/containers/podman/v4/pkg/lookup" - "github.com/containers/podman/v4/pkg/namespaces" - "github.com/containers/podman/v4/pkg/specgen" - "github.com/containers/podman/v4/pkg/util" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/annotations" + "github.com/containers/podman/v5/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/env" + v1 "github.com/containers/podman/v5/pkg/k8s.io/api/core/v1" + "github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/api/resource" + v12 "github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/util/intstr" + "github.com/containers/podman/v5/pkg/lookup" + "github.com/containers/podman/v5/pkg/namespaces" + "github.com/containers/podman/v5/pkg/specgen" + "github.com/containers/podman/v5/pkg/util" "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" ) // GenerateForKube takes a slice of libpod containers and generates // one v1.Pod description that includes just a single container. -func GenerateForKube(ctx context.Context, ctrs []*Container, getService, useLongAnnotations, podmanOnly bool) (*v1.Pod, error) { +func GenerateForKube(ctx context.Context, ctrs []*Container, getService, podmanOnly bool) (*v1.Pod, error) { // Generate the v1.Pod yaml description - return simplePodWithV1Containers(ctx, ctrs, getService, useLongAnnotations, podmanOnly) + return simplePodWithV1Containers(ctx, ctrs, getService, podmanOnly) } // GenerateForKube takes a slice of libpod containers and generates // one v1.Pod description -func (p *Pod) GenerateForKube(ctx context.Context, getService, useLongAnnotations, podmanOnly bool) (*v1.Pod, []v1.ServicePort, error) { +func (p *Pod) GenerateForKube(ctx context.Context, getService, podmanOnly bool) (*v1.Pod, []v1.ServicePort, error) { // Generate the v1.Pod yaml description var ( ports []v1.ContainerPort @@ -74,13 +72,13 @@ func (p *Pod) GenerateForKube(ctx context.Context, getService, useLongAnnotation return nil, servicePorts, err } for _, host := range infraContainer.config.ContainerNetworkConfig.HostAdd { - hostSli := strings.SplitN(host, ":", 2) - if len(hostSli) != 2 { + hostname, ip, hasIP := strings.Cut(host, ":") + if !hasIP { return nil, servicePorts, errors.New("invalid hostAdd") } extraHost = append(extraHost, v1.HostAlias{ - IP: hostSli[1], - Hostnames: []string{hostSli[0]}, + IP: ip, + Hostnames: []string{hostname}, }) } ports, err = portMappingToContainerPort(infraContainer.config.PortMappings, getService) @@ -96,7 +94,7 @@ func (p *Pod) GenerateForKube(ctx context.Context, getService, useLongAnnotation hostUsers = infraContainer.IDMappings().HostUIDMapping && infraContainer.IDMappings().HostGIDMapping infraName = infraContainer.config.Name } - pod, err := p.podWithContainers(ctx, allContainers, ports, hostNetwork, hostUsers, getService, useLongAnnotations, podmanOnly, infraName) + pod, err := p.podWithContainers(ctx, allContainers, ports, hostNetwork, hostUsers, getService, podmanOnly, infraName) if err != nil { return nil, servicePorts, err } @@ -452,16 +450,6 @@ func newServicePortState() servicePortState { } } -func truncateKubeAnnotation(str string, useLongAnnotations bool) string { - str = strings.TrimSpace(str) - if useLongAnnotations || utf8.RuneCountInString(str) < define.MaxKubeAnnotation { - return str - } - trunc := string([]rune(str)[:define.MaxKubeAnnotation]) - logrus.Warnf("Truncation Annotation: %q to %q: Kubernetes only allows %d characters", str, trunc, define.MaxKubeAnnotation) - return trunc -} - // containerPortsToServicePorts takes a slice of containerports and generates a // slice of service ports func (state *servicePortState) containerPortsToServicePorts(containerPorts []v1.ContainerPort) ([]v1.ServicePort, error) { @@ -508,7 +496,7 @@ func containersToServicePorts(containers []v1.Container) ([]v1.ServicePort, erro return sps, nil } -func (p *Pod) podWithContainers(ctx context.Context, containers []*Container, ports []v1.ContainerPort, hostNetwork, hostUsers, getService, useLongAnnotations, podmanOnly bool, infraName string) (*v1.Pod, error) { +func (p *Pod) podWithContainers(ctx context.Context, containers []*Container, ports []v1.ContainerPort, hostNetwork, hostUsers, getService, podmanOnly bool, infraName string) (*v1.Pod, error) { deDupPodVolumes := make(map[string]*v1.Volume) first := true podContainers := make([]v1.Container, 0, len(containers)) @@ -530,11 +518,11 @@ func (p *Pod) podWithContainers(ctx context.Context, containers []*Container, po if !podmanOnly && (define.IsReservedAnnotation(k) || annotations.IsReservedAnnotation(k)) { continue } - podAnnotations[fmt.Sprintf("%s/%s", k, removeUnderscores(ctr.Name()))] = truncateKubeAnnotation(v, useLongAnnotations) + podAnnotations[fmt.Sprintf("%s/%s", k, removeUnderscores(ctr.Name()))] = v } // Convert auto-update labels into kube annotations - for k, v := range getAutoUpdateAnnotations(ctr.Name(), ctr.Labels(), useLongAnnotations) { - podAnnotations[k] = truncateKubeAnnotation(v, useLongAnnotations) + for k, v := range getAutoUpdateAnnotations(ctr.Name(), ctr.Labels()) { + podAnnotations[k] = v } isInit := ctr.IsInitCtr() // Since hostname is only set at pod level, set the hostname to the hostname of the first container we encounter @@ -557,7 +545,7 @@ func (p *Pod) podWithContainers(ctx context.Context, containers []*Container, po return nil, err } for k, v := range annotations { - podAnnotations[define.BindMountPrefix] = truncateKubeAnnotation(k+":"+v, useLongAnnotations) + podAnnotations[define.BindMountPrefix] = k + ":" + v } // Since port bindings for the pod are handled by the // infra container, wipe them here only if we are sharing the net namespace @@ -606,7 +594,7 @@ func (p *Pod) podWithContainers(ctx context.Context, containers []*Container, po // If the infraName is not the podID-infra, that means the user set another infra name using // --infra-name during pod creation if infraName != "" && infraName != p.ID()[:12]+"-infra" { - podAnnotations[define.InfraNameAnnotation] = truncateKubeAnnotation(infraName, useLongAnnotations) + podAnnotations[define.InfraNameAnnotation] = infraName } } } @@ -675,7 +663,7 @@ func newPodObject(podName string, annotations map[string]string, initCtrs, conta // simplePodWithV1Containers is a function used by inspect when kube yaml needs to be generated // for a single container. we "insert" that container description in a pod. -func simplePodWithV1Containers(ctx context.Context, ctrs []*Container, getService, useLongAnnotations, podmanOnly bool) (*v1.Pod, error) { +func simplePodWithV1Containers(ctx context.Context, ctrs []*Container, getService, podmanOnly bool) (*v1.Pod, error) { kubeCtrs := make([]v1.Container, 0, len(ctrs)) kubeInitCtrs := []v1.Container{} kubeVolumes := make([]v1.Volume, 0) @@ -695,12 +683,12 @@ func simplePodWithV1Containers(ctx context.Context, ctrs []*Container, getServic if !podmanOnly && (define.IsReservedAnnotation(k) || annotations.IsReservedAnnotation(k)) { continue } - kubeAnnotations[fmt.Sprintf("%s/%s", k, removeUnderscores(ctr.Name()))] = truncateKubeAnnotation(v, useLongAnnotations) + kubeAnnotations[fmt.Sprintf("%s/%s", k, removeUnderscores(ctr.Name()))] = v } // Convert auto-update labels into kube annotations - for k, v := range getAutoUpdateAnnotations(ctr.Name(), ctr.Labels(), useLongAnnotations) { - kubeAnnotations[k] = truncateKubeAnnotation(v, useLongAnnotations) + for k, v := range getAutoUpdateAnnotations(ctr.Name(), ctr.Labels()) { + kubeAnnotations[k] = v } isInit := ctr.IsInitCtr() @@ -730,7 +718,7 @@ func simplePodWithV1Containers(ctx context.Context, ctrs []*Container, getServic for _, ulimit := range ctr.config.Spec.Process.Rlimits { finalUlimit := strings.ToLower(strings.ReplaceAll(ulimit.Type, "RLIMIT_", "")) + "=" + strconv.Itoa(int(ulimit.Soft)) + ":" + strconv.Itoa(int(ulimit.Hard)) // compare ulimit with default list so we don't add it twice - if cutil.StringInSlice(finalUlimit, defaultUlimits) { + if slices.Contains(defaultUlimits, finalUlimit) { continue } @@ -753,7 +741,7 @@ func simplePodWithV1Containers(ctx context.Context, ctrs []*Container, getServic return nil, err } for k, v := range annotations { - kubeAnnotations[define.BindMountPrefix] = truncateKubeAnnotation(k+":"+v, useLongAnnotations) + kubeAnnotations[define.BindMountPrefix] = k + ":" + v } if isInit { kubeInitCtrs = append(kubeInitCtrs, kubeCtr) @@ -769,7 +757,7 @@ func simplePodWithV1Containers(ctx context.Context, ctrs []*Container, getServic podDNS.Nameservers = make([]string, 0) } for _, s := range servers { - if !cutil.StringInSlice(s, podDNS.Nameservers) { // only append if it does not exist + if !slices.Contains(podDNS.Nameservers, s) { // only append if it does not exist podDNS.Nameservers = append(podDNS.Nameservers, s) } } @@ -780,7 +768,7 @@ func simplePodWithV1Containers(ctx context.Context, ctrs []*Container, getServic podDNS.Searches = make([]string, 0) } for _, d := range domains { - if !cutil.StringInSlice(d, podDNS.Searches) { // only append if it does not exist + if !slices.Contains(podDNS.Searches, d) { // only append if it does not exist podDNS.Searches = append(podDNS.Searches, d) } } @@ -797,7 +785,7 @@ func simplePodWithV1Containers(ctx context.Context, ctrs []*Container, getServic podName := removeUnderscores(ctrs[0].Name()) // Check if the pod name and container name will end up conflicting // Append -pod if so - if cutil.StringInSlice(podName, ctrNames) { + if slices.Contains(ctrNames, podName) { podName += "-pod" } @@ -1002,10 +990,10 @@ func containerToV1Container(ctx context.Context, c *Container, getService bool) dnsOptions := make([]v1.PodDNSConfigOption, 0) for _, option := range options { // the option can be "k:v" or just "k", no delimiter is required - opts := strings.SplitN(option, ":", 2) + name, value, _ := strings.Cut(option, ":") dnsOpt := v1.PodDNSConfigOption{ - Name: opts[0], - Value: &opts[1], + Name: name, + Value: &value, } dnsOptions = append(dnsOptions, dnsOpt) } @@ -1056,23 +1044,23 @@ func libpodEnvVarsToKubeEnvVars(envs []string, imageEnvs []string) ([]v1.EnvVar, envVars := make([]v1.EnvVar, 0, len(envs)) imageMap := make(map[string]string, len(imageEnvs)) for _, ie := range imageEnvs { - split := strings.SplitN(ie, "=", 2) - imageMap[split[0]] = split[1] + key, val, _ := strings.Cut(ie, "=") + imageMap[key] = val } for _, e := range envs { - split := strings.SplitN(e, "=", 2) - if len(split) != 2 { + envName, envValue, hasValue := strings.Cut(e, "=") + if !hasValue { return envVars, fmt.Errorf("environment variable %s is malformed; should be key=value", e) } - if defaultEnv[split[0]] == split[1] { + if defaultEnv[envName] == envValue { continue } - if imageMap[split[0]] == split[1] { + if imageMap[envName] == envValue { continue } ev := v1.EnvVar{ - Name: split[0], - Value: split[1], + Name: envName, + Value: envValue, } envVars = append(envVars, ev) } @@ -1115,7 +1103,7 @@ func libpodMountsToKubeVolumeMounts(c *Container) ([]v1.VolumeMount, []v1.Volume // generateKubePersistentVolumeClaim converts a ContainerNamedVolume to a Kubernetes PersistentVolumeClaim func generateKubePersistentVolumeClaim(v *ContainerNamedVolume) (v1.VolumeMount, v1.Volume) { - ro := cutil.StringInSlice("ro", v.Options) + ro := slices.Contains(v.Options, "ro") // To avoid naming conflicts with any host path mounts, add a unique suffix to the volume's name. name := v.Name + "-pvc" @@ -1176,7 +1164,7 @@ func generateKubeVolumeMount(m specs.Mount) (v1.VolumeMount, v1.Volume, error) { } vm.Name = name vm.MountPath = m.Destination - if cutil.StringInSlice("ro", m.Options) { + if slices.Contains(m.Options, "ro") { vm.ReadOnly = true } @@ -1217,7 +1205,7 @@ func determineCapAddDropFromCapabilities(defaultCaps, containerCaps []string) *v // Find caps in the defaultCaps but not in the container's // those indicate a dropped cap for _, capability := range defaultCaps { - if !cutil.StringInSlice(capability, containerCaps) { + if !slices.Contains(containerCaps, capability) { if _, ok := dedupDrop[capability]; !ok { drop = append(drop, v1.Capability(capability)) dedupDrop[capability] = true @@ -1227,7 +1215,7 @@ func determineCapAddDropFromCapabilities(defaultCaps, containerCaps []string) *v // Find caps in the container but not in the defaults; those indicate // an added cap for _, capability := range containerCaps { - if !cutil.StringInSlice(capability, defaultCaps) { + if !slices.Contains(defaultCaps, capability) { if _, ok := dedupAdd[capability]; !ok { add = append(add, v1.Capability(capability)) dedupAdd[capability] = true @@ -1287,25 +1275,22 @@ func generateKubeSecurityContext(c *Container) (*v1.SecurityContext, bool, error var selinuxOpts v1.SELinuxOptions selinuxHasData := false for _, label := range strings.Split(c.config.Spec.Annotations[define.InspectAnnotationLabel], ",label=") { - opts := strings.SplitN(label, ":", 2) - switch len(opts) { - case 2: - switch opts[0] { + opt, val, hasVal := strings.Cut(label, ":") + if hasVal { + switch opt { case "filetype": - selinuxOpts.FileType = opts[1] + selinuxOpts.FileType = val selinuxHasData = true case "type": - selinuxOpts.Type = opts[1] + selinuxOpts.Type = val selinuxHasData = true case "level": - selinuxOpts.Level = opts[1] - selinuxHasData = true - } - case 1: - if opts[0] == "disable" { - selinuxOpts.Type = "spc_t" + selinuxOpts.Level = val selinuxHasData = true } + } else if opt == "disable" { + selinuxOpts.Type = "spc_t" + selinuxHasData = true } } if selinuxHasData { @@ -1388,7 +1373,7 @@ func removeUnderscores(s string) string { // getAutoUpdateAnnotations searches for auto-update container labels // and returns them as kube annotations -func getAutoUpdateAnnotations(ctrName string, ctrLabels map[string]string, useLongAnnotations bool) map[string]string { +func getAutoUpdateAnnotations(ctrName string, ctrLabels map[string]string) map[string]string { autoUpdateLabel := "io.containers.autoupdate" annotations := make(map[string]string) @@ -1398,7 +1383,7 @@ func getAutoUpdateAnnotations(ctrName string, ctrLabels map[string]string, useLo // since labels can variate between containers within a pod, they will be // identified with the container name when converted into kube annotations kc := fmt.Sprintf("%s/%s", k, ctrName) - annotations[kc] = truncateKubeAnnotation(v, useLongAnnotations) + annotations[kc] = v } } diff --git a/vendor/github.com/containers/podman/v4/libpod/layers/layer.go b/vendor/github.com/containers/podman/v5/libpod/layers/layer.go similarity index 100% rename from vendor/github.com/containers/podman/v4/libpod/layers/layer.go rename to vendor/github.com/containers/podman/v5/libpod/layers/layer.go diff --git a/vendor/github.com/containers/podman/v4/libpod/linkmode/linkmode_dynamic.go b/vendor/github.com/containers/podman/v5/libpod/linkmode/linkmode_dynamic.go similarity index 89% rename from vendor/github.com/containers/podman/v4/libpod/linkmode/linkmode_dynamic.go rename to vendor/github.com/containers/podman/v5/libpod/linkmode/linkmode_dynamic.go index f020fa53e..81b222e5c 100644 --- a/vendor/github.com/containers/podman/v4/libpod/linkmode/linkmode_dynamic.go +++ b/vendor/github.com/containers/podman/v5/libpod/linkmode/linkmode_dynamic.go @@ -1,5 +1,4 @@ //go:build !static -// +build !static package linkmode diff --git a/vendor/github.com/containers/podman/v4/libpod/linkmode/linkmode_static.go b/vendor/github.com/containers/podman/v5/libpod/linkmode/linkmode_static.go similarity index 89% rename from vendor/github.com/containers/podman/v4/libpod/linkmode/linkmode_static.go rename to vendor/github.com/containers/podman/v5/libpod/linkmode/linkmode_static.go index b181ad285..bc2a72200 100644 --- a/vendor/github.com/containers/podman/v4/libpod/linkmode/linkmode_static.go +++ b/vendor/github.com/containers/podman/v5/libpod/linkmode/linkmode_static.go @@ -1,5 +1,4 @@ //go:build static -// +build static package linkmode diff --git a/vendor/github.com/containers/podman/v4/libpod/lock/file/file_lock.go b/vendor/github.com/containers/podman/v5/libpod/lock/file/file_lock.go similarity index 100% rename from vendor/github.com/containers/podman/v4/libpod/lock/file/file_lock.go rename to vendor/github.com/containers/podman/v5/libpod/lock/file/file_lock.go diff --git a/vendor/github.com/containers/podman/v4/libpod/lock/file_lock_manager.go b/vendor/github.com/containers/podman/v5/libpod/lock/file_lock_manager.go similarity index 96% rename from vendor/github.com/containers/podman/v4/libpod/lock/file_lock_manager.go rename to vendor/github.com/containers/podman/v5/libpod/lock/file_lock_manager.go index 4a82e687c..0f6757a6d 100644 --- a/vendor/github.com/containers/podman/v4/libpod/lock/file_lock_manager.go +++ b/vendor/github.com/containers/podman/v5/libpod/lock/file_lock_manager.go @@ -1,8 +1,8 @@ package lock import ( - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/libpod/lock/file" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/libpod/lock/file" ) // FileLockManager manages shared memory locks. diff --git a/vendor/github.com/containers/podman/v4/libpod/lock/in_memory_locks.go b/vendor/github.com/containers/podman/v5/libpod/lock/in_memory_locks.go similarity index 100% rename from vendor/github.com/containers/podman/v4/libpod/lock/in_memory_locks.go rename to vendor/github.com/containers/podman/v5/libpod/lock/in_memory_locks.go diff --git a/vendor/github.com/containers/podman/v4/libpod/lock/lock.go b/vendor/github.com/containers/podman/v5/libpod/lock/lock.go similarity index 100% rename from vendor/github.com/containers/podman/v4/libpod/lock/lock.go rename to vendor/github.com/containers/podman/v5/libpod/lock/lock.go diff --git a/vendor/github.com/containers/podman/v4/libpod/lock/shm/shm_lock.c b/vendor/github.com/containers/podman/v5/libpod/lock/shm/shm_lock.c similarity index 100% rename from vendor/github.com/containers/podman/v4/libpod/lock/shm/shm_lock.c rename to vendor/github.com/containers/podman/v5/libpod/lock/shm/shm_lock.c diff --git a/vendor/github.com/containers/podman/v4/libpod/lock/shm/shm_lock.go b/vendor/github.com/containers/podman/v5/libpod/lock/shm/shm_lock.go similarity index 99% rename from vendor/github.com/containers/podman/v4/libpod/lock/shm/shm_lock.go rename to vendor/github.com/containers/podman/v5/libpod/lock/shm/shm_lock.go index 239d0e47a..24031a8ca 100644 --- a/vendor/github.com/containers/podman/v4/libpod/lock/shm/shm_lock.go +++ b/vendor/github.com/containers/podman/v5/libpod/lock/shm/shm_lock.go @@ -1,6 +1,4 @@ //go:build (linux || freebsd) && cgo -// +build linux freebsd -// +build cgo package shm diff --git a/vendor/github.com/containers/podman/v4/libpod/lock/shm/shm_lock.h b/vendor/github.com/containers/podman/v5/libpod/lock/shm/shm_lock.h similarity index 100% rename from vendor/github.com/containers/podman/v4/libpod/lock/shm/shm_lock.h rename to vendor/github.com/containers/podman/v5/libpod/lock/shm/shm_lock.h diff --git a/vendor/github.com/containers/podman/v4/libpod/lock/shm/shm_lock_nocgo.go b/vendor/github.com/containers/podman/v5/libpod/lock/shm/shm_lock_nocgo.go similarity index 99% rename from vendor/github.com/containers/podman/v4/libpod/lock/shm/shm_lock_nocgo.go rename to vendor/github.com/containers/podman/v5/libpod/lock/shm/shm_lock_nocgo.go index 7e0ccd61f..604007938 100644 --- a/vendor/github.com/containers/podman/v4/libpod/lock/shm/shm_lock_nocgo.go +++ b/vendor/github.com/containers/podman/v5/libpod/lock/shm/shm_lock_nocgo.go @@ -1,5 +1,4 @@ //go:build linux && !cgo -// +build linux,!cgo package shm diff --git a/vendor/github.com/containers/podman/v4/libpod/lock/shm_lock_manager_linux.go b/vendor/github.com/containers/podman/v5/libpod/lock/shm_lock_manager_linux.go similarity index 98% rename from vendor/github.com/containers/podman/v4/libpod/lock/shm_lock_manager_linux.go rename to vendor/github.com/containers/podman/v5/libpod/lock/shm_lock_manager_linux.go index 344183eb5..ba438b9c6 100644 --- a/vendor/github.com/containers/podman/v4/libpod/lock/shm_lock_manager_linux.go +++ b/vendor/github.com/containers/podman/v5/libpod/lock/shm_lock_manager_linux.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package lock @@ -7,7 +6,7 @@ import ( "fmt" "syscall" - "github.com/containers/podman/v4/libpod/lock/shm" + "github.com/containers/podman/v5/libpod/lock/shm" ) // SHMLockManager manages shared memory locks. diff --git a/vendor/github.com/containers/podman/v4/libpod/lock/shm_lock_manager_unsupported.go b/vendor/github.com/containers/podman/v5/libpod/lock/shm_lock_manager_unsupported.go similarity index 98% rename from vendor/github.com/containers/podman/v4/libpod/lock/shm_lock_manager_unsupported.go rename to vendor/github.com/containers/podman/v5/libpod/lock/shm_lock_manager_unsupported.go index 9ca01d57d..88e25467d 100644 --- a/vendor/github.com/containers/podman/v4/libpod/lock/shm_lock_manager_unsupported.go +++ b/vendor/github.com/containers/podman/v5/libpod/lock/shm_lock_manager_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package lock diff --git a/vendor/github.com/containers/podman/v4/libpod/logs/log.go b/vendor/github.com/containers/podman/v5/libpod/logs/log.go similarity index 99% rename from vendor/github.com/containers/podman/v4/libpod/logs/log.go rename to vendor/github.com/containers/podman/v5/libpod/logs/log.go index 4229501cf..e6ad32f99 100644 --- a/vendor/github.com/containers/podman/v4/libpod/logs/log.go +++ b/vendor/github.com/containers/podman/v5/libpod/logs/log.go @@ -9,7 +9,7 @@ import ( "sync" "time" - "github.com/containers/podman/v4/libpod/logs/reversereader" + "github.com/containers/podman/v5/libpod/logs/reversereader" "github.com/nxadm/tail" "github.com/sirupsen/logrus" ) diff --git a/vendor/github.com/containers/podman/v4/libpod/logs/reversereader/reversereader.go b/vendor/github.com/containers/podman/v5/libpod/logs/reversereader/reversereader.go similarity index 100% rename from vendor/github.com/containers/podman/v4/libpod/logs/reversereader/reversereader.go rename to vendor/github.com/containers/podman/v5/libpod/logs/reversereader/reversereader.go diff --git a/vendor/github.com/containers/podman/v4/libpod/mounts_linux.go b/vendor/github.com/containers/podman/v5/libpod/mounts_linux.go similarity index 96% rename from vendor/github.com/containers/podman/v4/libpod/mounts_linux.go rename to vendor/github.com/containers/podman/v5/libpod/mounts_linux.go index a30136f8a..10dbdcb15 100644 --- a/vendor/github.com/containers/podman/v4/libpod/mounts_linux.go +++ b/vendor/github.com/containers/podman/v5/libpod/mounts_linux.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod diff --git a/vendor/github.com/containers/podman/v4/libpod/networking_common.go b/vendor/github.com/containers/podman/v5/libpod/networking_common.go similarity index 93% rename from vendor/github.com/containers/podman/v4/libpod/networking_common.go rename to vendor/github.com/containers/podman/v5/libpod/networking_common.go index 4e46671fc..5756b71f1 100644 --- a/vendor/github.com/containers/podman/v4/libpod/networking_common.go +++ b/vendor/github.com/containers/podman/v5/libpod/networking_common.go @@ -1,6 +1,4 @@ //go:build !remote && (linux || freebsd) -// +build !remote -// +build linux freebsd package libpod @@ -14,13 +12,13 @@ import ( "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/machine" - "github.com/containers/common/pkg/util" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/libpod/events" - "github.com/containers/podman/v4/pkg/namespaces" - "github.com/containers/podman/v4/pkg/rootless" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/libpod/events" + "github.com/containers/podman/v5/pkg/namespaces" + "github.com/containers/podman/v5/pkg/rootless" "github.com/containers/storage/pkg/lockfile" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" ) // convertPortMappings will remove the HostIP part from the ports when running inside podman machine. @@ -65,24 +63,7 @@ func (c *Container) getNetworkOptions(networkOpts map[string]types.PerNetworkOpt // setUpNetwork will set up the networks, on error it will also tear down the cni // networks. If rootless it will join/create the rootless network namespace. func (r *Runtime) setUpNetwork(ns string, opts types.NetworkOptions) (map[string]types.StatusBlock, error) { - rootlessNetNS, err := r.GetRootlessNetNs(true) - if err != nil { - return nil, err - } - var results map[string]types.StatusBlock - setUpPod := func() error { - results, err = r.network.Setup(ns, types.SetupOptions{NetworkOptions: opts}) - return err - } - // rootlessNetNS is nil if we are root - if rootlessNetNS != nil { - // execute the setup in the rootless net ns - err = rootlessNetNS.Do(setUpPod) - rootlessNetNS.Lock.Unlock() - } else { - err = setUpPod() - } - return results, err + return r.network.Setup(ns, types.SetupOptions{NetworkOptions: opts}) } // getNetworkPodName return the pod name (hostname) used by dns backend. @@ -100,29 +81,7 @@ func getNetworkPodName(c *Container) string { // Tear down a container's network configuration and joins the // rootless net ns as rootless user func (r *Runtime) teardownNetworkBackend(ns string, opts types.NetworkOptions) error { - rootlessNetNS, err := r.GetRootlessNetNs(false) - if err != nil { - return err - } - tearDownPod := func() error { - if err := r.network.Teardown(ns, types.TeardownOptions{NetworkOptions: opts}); err != nil { - return fmt.Errorf("tearing down network namespace configuration for container %s: %w", opts.ContainerID, err) - } - return nil - } - - // rootlessNetNS is nil if we are root - if rootlessNetNS != nil { - // execute the network setup in the rootless net ns - err = rootlessNetNS.Do(tearDownPod) - if cerr := rootlessNetNS.Cleanup(r); cerr != nil { - logrus.WithError(cerr).Error("failed to clean up rootless netns") - } - rootlessNetNS.Lock.Unlock() - } else { - err = tearDownPod() - } - return err + return r.network.Teardown(ns, types.TeardownOptions{NetworkOptions: opts}) } // Tear down a container's network backend configuration, but do not tear down the @@ -394,7 +353,7 @@ func (c *Container) NetworkDisconnect(nameOrID, netName string, force bool) erro // check if network exists and if the input is an ID we get the name // CNI and netavark and the libpod db only uses names so it is important that we only use the name - netName, err = c.runtime.normalizeNetworkName(netName) + netName, _, err = c.runtime.normalizeNetworkName(netName) if err != nil { return err } @@ -508,7 +467,8 @@ func (c *Container) NetworkConnect(nameOrID, netName string, netOpts types.PerNe // check if network exists and if the input is an ID we get the name // CNI and netavark and the libpod db only uses names so it is important that we only use the name - netName, err = c.runtime.normalizeNetworkName(netName) + var nicName string + netName, nicName, err = c.runtime.normalizeNetworkName(netName) if err != nil { return err } @@ -522,6 +482,13 @@ func (c *Container) NetworkConnect(nameOrID, netName string, netOpts types.PerNe netOpts.Aliases = append(netOpts.Aliases, getExtraNetworkAliases(c)...) + // check whether interface is to be named as the network_interface + // when name left unspecified + if netOpts.InterfaceName == "" { + netOpts.InterfaceName = nicName + } + + // set default interface name if netOpts.InterfaceName == "" { netOpts.InterfaceName = getFreeInterfaceName(networks) if netOpts.InterfaceName == "" { @@ -638,7 +605,7 @@ func getFreeInterfaceName(networks map[string]types.PerNetworkOptions) string { } for i := 0; i < 100000; i++ { ifName := fmt.Sprintf("eth%d", i) - if !util.StringInSlice(ifName, ifNames) { + if !slices.Contains(ifNames, ifName) { return ifName } } @@ -673,14 +640,24 @@ func (r *Runtime) ConnectContainerToNetwork(nameOrID, netName string, netOpts ty return ctr.NetworkConnect(nameOrID, netName, netOpts) } -// normalizeNetworkName takes a network name, a partial or a full network ID and returns the network name. +// normalizeNetworkName takes a network name, a partial or a full network ID and +// returns: 1) the network name and 2) the network_interface name for macvlan +// and ipvlan drivers if the naming pattern is "device" defined in the +// containers.conf file. Else, "". // If the network is not found an error is returned. -func (r *Runtime) normalizeNetworkName(nameOrID string) (string, error) { +func (r *Runtime) normalizeNetworkName(nameOrID string) (string, string, error) { net, err := r.network.NetworkInspect(nameOrID) if err != nil { - return "", err + return "", "", err } - return net.Name, nil + + netIface := "" + namingPattern := r.config.Containers.InterfaceName + if namingPattern == "device" && (net.Driver == types.MacVLANNetworkDriver || net.Driver == types.IPVLANNetworkDriver) { + netIface = net.NetworkInterface + } + + return net.Name, netIface, nil } // ocicniPortsToNetTypesPorts convert the old port format to the new one diff --git a/vendor/github.com/containers/podman/v4/libpod/networking_freebsd.go b/vendor/github.com/containers/podman/v5/libpod/networking_freebsd.go similarity index 76% rename from vendor/github.com/containers/podman/v4/libpod/networking_freebsd.go rename to vendor/github.com/containers/podman/v5/libpod/networking_freebsd.go index 75cb4fc80..2d5c5e488 100644 --- a/vendor/github.com/containers/podman/v4/libpod/networking_freebsd.go +++ b/vendor/github.com/containers/podman/v5/libpod/networking_freebsd.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -14,6 +13,7 @@ import ( "github.com/containers/buildah/pkg/jail" "github.com/containers/common/libnetwork/types" + "github.com/containers/podman/v5/libpod/define" "github.com/containers/storage/pkg/lockfile" "github.com/sirupsen/logrus" ) @@ -46,33 +46,6 @@ type NetstatAddress struct { Collisions uint64 `json:"collisions"` } -// copied from github.com/vishvanada/netlink which does not build on freebsd -type LinkStatistics64 struct { - RxPackets uint64 - TxPackets uint64 - RxBytes uint64 - TxBytes uint64 - RxErrors uint64 - TxErrors uint64 - RxDropped uint64 - TxDropped uint64 - Multicast uint64 - Collisions uint64 - RxLengthErrors uint64 - RxOverErrors uint64 - RxCrcErrors uint64 - RxFrameErrors uint64 - RxFifoErrors uint64 - RxMissedErrors uint64 - TxAbortedErrors uint64 - TxCarrierErrors uint64 - TxFifoErrors uint64 - TxHeartbeatErrors uint64 - TxWindowErrors uint64 - RxCompressed uint64 - TxCompressed uint64 -} - type RootlessNetNS struct { dir string Lock *lockfile.LockFile @@ -109,10 +82,14 @@ func getSlirp4netnsIP(subnet *net.IPNet) (*net.IP, error) { return nil, errors.New("not implemented GetSlirp4netnsIP") } -// While there is code in container_internal.go which calls this, in -// my testing network creation always seems to go through createNetNS. +// This is called after the container's jail is created but before its +// started. We can use this to initialise the container's vnet when we don't +// have a separate vnet jail (which is the case in FreeBSD 13.3 and later). func (r *Runtime) setupNetNS(ctr *Container) error { - return errors.New("not implemented (*Runtime) setupNetNS") + networkStatus, err := r.configureNetNS(ctr, ctr.ID()) + ctr.state.NetNS = ctr.ID() + ctr.state.NetworkStatus = networkStatus + return err } // Create and configure a new network namespace for a container @@ -197,28 +174,30 @@ func (r *Runtime) teardownNetNS(ctr *Container) error { } if ctr.state.NetNS != "" { - // Rather than destroying the jail immediately, reset the - // persist flag so that it will live until the container is - // done. - netjail, err := jail.FindByName(ctr.state.NetNS) - if err != nil { - return fmt.Errorf("finding network jail %s: %w", ctr.state.NetNS, err) - } - jconf := jail.NewConfig() - jconf.Set("persist", false) - if err := netjail.Set(jconf); err != nil { - return fmt.Errorf("releasing network jail %s: %w", ctr.state.NetNS, err) + // If PostConfigureNetNS is false, then we are running with a + // separate vnet jail so we need to clean that up now. + if !ctr.config.PostConfigureNetNS { + // Rather than destroying the jail immediately, reset the + // persist flag so that it will live until the container is + // done. + netjail, err := jail.FindByName(ctr.state.NetNS) + if err != nil { + return fmt.Errorf("finding network jail %s: %w", ctr.state.NetNS, err) + } + jconf := jail.NewConfig() + jconf.Set("persist", false) + if err := netjail.Set(jconf); err != nil { + return fmt.Errorf("releasing network jail %s: %w", ctr.state.NetNS, err) + } } - ctr.state.NetNS = "" } - return nil } // TODO (5.0): return the statistics per network interface // This would allow better compat with docker. -func getContainerNetIO(ctr *Container) (*LinkStatistics64, error) { +func getContainerNetIO(ctr *Container) (map[string]define.ContainerNetworkStats, error) { if ctr.state.NetNS == "" { // If NetNS is nil, it was set as none, and no netNS // was set up this is a valid state and thus return no @@ -226,18 +205,27 @@ func getContainerNetIO(ctr *Container) (*LinkStatistics64, error) { return nil, nil } - cmd := exec.Command("jexec", ctr.state.NetNS, "netstat", "-bi", "--libxo", "json") + // First try running 'netstat -j' - this lets us retrieve stats from + // containers which don't have a separate vnet jail. + cmd := exec.Command("netstat", "-j", ctr.state.NetNS, "-bi", "--libxo", "json") out, err := cmd.Output() if err != nil { - return nil, err + // Fall back to using jexec so that this still works on 13.2 + // which does not have the -j flag. + cmd := exec.Command("jexec", ctr.state.NetNS, "netstat", "-bi", "--libxo", "json") + out, err = cmd.Output() + } + if err != nil { + return nil, fmt.Errorf("failed to read network stats: %v", err) } stats := Netstat{} if err := jdec.Unmarshal(out, &stats); err != nil { return nil, err } + res := make(map[string]define.ContainerNetworkStats) + // Sum all the interface stats - in practice only Tx/TxBytes are needed - res := &LinkStatistics64{} for _, ifaddr := range stats.Statistics.Interface { // Each interface has two records, one for link-layer which has // an MTU field and one for IP which doesn't. We only want the @@ -247,14 +235,16 @@ func getContainerNetIO(ctr *Container) (*LinkStatistics64, error) { // if we move to per-interface stats in future, this can be // reported separately. if ifaddr.Mtu > 0 { - res.RxPackets += ifaddr.ReceivedPackets - res.TxPackets += ifaddr.SentPackets - res.RxBytes += ifaddr.ReceivedBytes - res.TxBytes += ifaddr.SentBytes - res.RxErrors += ifaddr.ReceivedErrors - res.TxErrors += ifaddr.SentErrors - res.RxDropped += ifaddr.DroppedPackets - res.Collisions += ifaddr.Collisions + linkStats := define.ContainerNetworkStats{ + RxPackets: ifaddr.ReceivedPackets, + TxPackets: ifaddr.SentPackets, + RxBytes: ifaddr.ReceivedBytes, + TxBytes: ifaddr.SentBytes, + RxErrors: ifaddr.ReceivedErrors, + TxErrors: ifaddr.SentErrors, + RxDropped: ifaddr.DroppedPackets, + } + res[ifaddr.Name] = linkStats } } diff --git a/vendor/github.com/containers/podman/v5/libpod/networking_linux.go b/vendor/github.com/containers/podman/v5/libpod/networking_linux.go new file mode 100644 index 000000000..a8a057d99 --- /dev/null +++ b/vendor/github.com/containers/podman/v5/libpod/networking_linux.go @@ -0,0 +1,312 @@ +//go:build !remote + +package libpod + +import ( + "crypto/rand" + "fmt" + "net" + "os" + "path/filepath" + + "github.com/containernetworking/plugins/pkg/ns" + "github.com/containers/common/libnetwork/types" + netUtil "github.com/containers/common/libnetwork/util" + "github.com/containers/common/pkg/netns" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/rootless" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/sirupsen/logrus" + "github.com/vishvananda/netlink" + "golang.org/x/sys/unix" +) + +// Create and configure a new network namespace for a container +func (r *Runtime) configureNetNS(ctr *Container, ctrNS string) (status map[string]types.StatusBlock, rerr error) { + if err := r.exposeMachinePorts(ctr.config.PortMappings); err != nil { + return nil, err + } + defer func() { + // make sure to unexpose the gvproxy ports when an error happens + if rerr != nil { + if err := r.unexposeMachinePorts(ctr.config.PortMappings); err != nil { + logrus.Errorf("failed to free gvproxy machine ports: %v", err) + } + } + }() + if ctr.config.NetMode.IsSlirp4netns() { + return nil, r.setupSlirp4netns(ctr, ctrNS) + } + if ctr.config.NetMode.IsPasta() { + return nil, r.setupPasta(ctr, ctrNS) + } + networks, err := ctr.networks() + if err != nil { + return nil, err + } + // All networks have been removed from the container. + // This is effectively forcing net=none. + if len(networks) == 0 { + return nil, nil + } + + netOpts := ctr.getNetworkOptions(networks) + netStatus, err := r.setUpNetwork(ctrNS, netOpts) + if err != nil { + return nil, err + } + defer func() { + // do not forget to tear down the netns when a later error happened. + if rerr != nil { + if err := r.teardownNetworkBackend(ctrNS, netOpts); err != nil { + logrus.Warnf("failed to teardown network after failed setup: %v", err) + } + } + }() + + // set up rootless port forwarder when rootless with ports and the network status is empty, + // if this is called from network reload the network status will not be empty and we should + // not set up port because they are still active + if rootless.IsRootless() && len(ctr.config.PortMappings) > 0 && ctr.getNetworkStatus() == nil { + // set up port forwarder for rootless netns + // TODO: support slirp4netns port forwarder as well + // make sure to fix this in container.handleRestartPolicy() as well + // Important we have to call this after r.setUpNetwork() so that + // we can use the proper netStatus + err = r.setupRootlessPortMappingViaRLK(ctr, ctrNS, netStatus) + } + return netStatus, err +} + +// Create and configure a new network namespace for a container +func (r *Runtime) createNetNS(ctr *Container) (n string, q map[string]types.StatusBlock, retErr error) { + ctrNS, err := netns.NewNS() + if err != nil { + return "", nil, fmt.Errorf("creating network namespace for container %s: %w", ctr.ID(), err) + } + defer func() { + if retErr != nil { + if err := netns.UnmountNS(ctrNS.Path()); err != nil { + logrus.Errorf("Unmounting partially created network namespace for container %s: %v", ctr.ID(), err) + } + if err := ctrNS.Close(); err != nil { + logrus.Errorf("Closing partially created network namespace for container %s: %v", ctr.ID(), err) + } + } + }() + + logrus.Debugf("Made network namespace at %s for container %s", ctrNS.Path(), ctr.ID()) + + var networkStatus map[string]types.StatusBlock + networkStatus, err = r.configureNetNS(ctr, ctrNS.Path()) + return ctrNS.Path(), networkStatus, err +} + +// Configure the network namespace using the container process +func (r *Runtime) setupNetNS(ctr *Container) error { + nsProcess := fmt.Sprintf("/proc/%d/ns/net", ctr.state.PID) + + b := make([]byte, 16) + + if _, err := rand.Reader.Read(b); err != nil { + return fmt.Errorf("failed to generate random netns name: %w", err) + } + nsPath, err := netns.GetNSRunDir() + if err != nil { + return err + } + nsPath = filepath.Join(nsPath, fmt.Sprintf("netns-%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:])) + + if err := os.MkdirAll(filepath.Dir(nsPath), 0711); err != nil { + return err + } + + mountPointFd, err := os.Create(nsPath) + if err != nil { + return err + } + if err := mountPointFd.Close(); err != nil { + return err + } + + if err := unix.Mount(nsProcess, nsPath, "none", unix.MS_BIND, ""); err != nil { + return fmt.Errorf("cannot mount %s: %w", nsPath, err) + } + + networkStatus, err := r.configureNetNS(ctr, nsPath) + + // Assign NetNS attributes to container + ctr.state.NetNS = nsPath + ctr.state.NetworkStatus = networkStatus + return err +} + +// Tear down a network namespace, undoing all state associated with it. +func (r *Runtime) teardownNetNS(ctr *Container) error { + if err := r.unexposeMachinePorts(ctr.config.PortMappings); err != nil { + // do not return an error otherwise we would prevent network cleanup + logrus.Errorf("failed to free gvproxy machine ports: %v", err) + } + + // Do not check the error here, we want to always umount the netns + // This will ensure that the container interface will be deleted + // even when there is a CNI or netavark bug. + prevErr := r.teardownNetwork(ctr) + + // First unmount the namespace + if err := netns.UnmountNS(ctr.state.NetNS); err != nil { + if prevErr != nil { + logrus.Error(prevErr) + } + return fmt.Errorf("unmounting network namespace for container %s: %w", ctr.ID(), err) + } + + ctr.state.NetNS = "" + + return prevErr +} + +func getContainerNetNS(ctr *Container) (string, *Container, error) { + if ctr.state.NetNS != "" { + return ctr.state.NetNS, nil, nil + } + if ctr.config.NetNsCtr != "" { + c, err := ctr.runtime.GetContainer(ctr.config.NetNsCtr) + if err != nil { + return "", nil, err + } + if err = c.syncContainer(); err != nil { + return "", c, err + } + netNs, c2, err := getContainerNetNS(c) + if c2 != nil { + c = c2 + } + return netNs, c, err + } + return "", nil, nil +} + +// Returns a map of interface name to statistics for that interface. +func getContainerNetIO(ctr *Container) (map[string]define.ContainerNetworkStats, error) { + perNetworkStats := make(map[string]define.ContainerNetworkStats) + + netNSPath, _, netPathErr := getContainerNetNS(ctr) + if netPathErr != nil { + return nil, netPathErr + } + if netNSPath == "" { + // If netNSPath is empty, it was set as none, and no netNS was set up + // this is a valid state and thus return no error, nor any statistics + return nil, nil + } + + err := ns.WithNetNSPath(netNSPath, func(_ ns.NetNS) error { + links, err := netlink.LinkList() + if err != nil { + return fmt.Errorf("retrieving all network interfaces: %w", err) + } + for _, link := range links { + attributes := link.Attrs() + if attributes.Flags&net.FlagLoopback != 0 { + continue + } + + if attributes.Statistics != nil { + perNetworkStats[attributes.Name] = getNetStatsFromNetlinkStats(attributes.Statistics) + } + } + return nil + }) + return perNetworkStats, err +} + +func getNetStatsFromNetlinkStats(stats *netlink.LinkStatistics) define.ContainerNetworkStats { + return define.ContainerNetworkStats{ + RxBytes: stats.RxBytes, + RxDropped: stats.RxDropped, + RxErrors: stats.RxErrors, + RxPackets: stats.RxPackets, + TxBytes: stats.TxBytes, + TxDropped: stats.TxDropped, + TxErrors: stats.TxErrors, + TxPackets: stats.TxPackets, + } +} + +// joinedNetworkNSPath returns netns path and bool if netns was set +func (c *Container) joinedNetworkNSPath() (string, bool) { + for _, namespace := range c.config.Spec.Linux.Namespaces { + if namespace.Type == specs.NetworkNamespace { + return namespace.Path, true + } + } + return "", false +} + +func (c *Container) inspectJoinedNetworkNS(networkns string) (q types.StatusBlock, retErr error) { + var result types.StatusBlock + err := ns.WithNetNSPath(networkns, func(_ ns.NetNS) error { + ifaces, err := net.Interfaces() + if err != nil { + return err + } + routes, err := netlink.RouteList(nil, netlink.FAMILY_ALL) + if err != nil { + return err + } + var gateway net.IP + for _, route := range routes { + // default gateway + if route.Dst == nil { + gateway = route.Gw + } + } + result.Interfaces = make(map[string]types.NetInterface) + for _, iface := range ifaces { + if iface.Flags&net.FlagLoopback != 0 { + continue + } + addrs, err := iface.Addrs() + if err != nil { + continue + } + if len(addrs) == 0 { + continue + } + subnets := make([]types.NetAddress, 0, len(addrs)) + for _, address := range addrs { + if ipnet, ok := address.(*net.IPNet); ok { + if ipnet.IP.IsLinkLocalMulticast() || ipnet.IP.IsLinkLocalUnicast() { + continue + } + subnet := types.NetAddress{ + IPNet: types.IPNet{ + IPNet: *ipnet, + }, + } + if ipnet.Contains(gateway) { + subnet.Gateway = gateway + } + subnets = append(subnets, subnet) + } + } + result.Interfaces[iface.Name] = types.NetInterface{ + Subnets: subnets, + MacAddress: types.HardwareAddr(iface.HardwareAddr), + } + } + return nil + }) + return result, err +} + +func getPastaIP(state *ContainerState) (net.IP, error) { + var ip string + err := ns.WithNetNSPath(state.NetNS, func(_ ns.NetNS) error { + // get the first ip in the netns + ip = netUtil.GetLocalIP() + return nil + }) + return net.ParseIP(ip), err +} diff --git a/vendor/github.com/containers/podman/v4/libpod/networking_machine.go b/vendor/github.com/containers/podman/v5/libpod/networking_machine.go similarity index 99% rename from vendor/github.com/containers/podman/v4/libpod/networking_machine.go rename to vendor/github.com/containers/podman/v5/libpod/networking_machine.go index 5fb8986a3..7804b53d6 100644 --- a/vendor/github.com/containers/podman/v4/libpod/networking_machine.go +++ b/vendor/github.com/containers/podman/v5/libpod/networking_machine.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod diff --git a/vendor/github.com/containers/podman/v4/libpod/networking_pasta_linux.go b/vendor/github.com/containers/podman/v5/libpod/networking_pasta_linux.go similarity index 96% rename from vendor/github.com/containers/podman/v4/libpod/networking_pasta_linux.go rename to vendor/github.com/containers/podman/v5/libpod/networking_pasta_linux.go index 905810a0f..97e8118e7 100644 --- a/vendor/github.com/containers/podman/v4/libpod/networking_pasta_linux.go +++ b/vendor/github.com/containers/podman/v5/libpod/networking_pasta_linux.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote // SPDX-License-Identifier: Apache-2.0 // diff --git a/vendor/github.com/containers/podman/v4/libpod/networking_slirp4netns.go b/vendor/github.com/containers/podman/v5/libpod/networking_slirp4netns.go similarity index 97% rename from vendor/github.com/containers/podman/v4/libpod/networking_slirp4netns.go rename to vendor/github.com/containers/podman/v5/libpod/networking_slirp4netns.go index e8eaa7682..0de8e439f 100644 --- a/vendor/github.com/containers/podman/v4/libpod/networking_slirp4netns.go +++ b/vendor/github.com/containers/podman/v5/libpod/networking_slirp4netns.go @@ -1,5 +1,4 @@ //go:build !remote && linux -// +build !remote,linux package libpod @@ -12,7 +11,7 @@ import ( "github.com/containers/common/libnetwork/slirp4netns" "github.com/containers/common/libnetwork/types" - "github.com/containers/podman/v4/pkg/errorhandling" + "github.com/containers/podman/v5/pkg/errorhandling" "github.com/sirupsen/logrus" ) diff --git a/vendor/github.com/containers/podman/v4/libpod/oci.go b/vendor/github.com/containers/podman/v5/libpod/oci.go similarity index 95% rename from vendor/github.com/containers/podman/v4/libpod/oci.go rename to vendor/github.com/containers/podman/v5/libpod/oci.go index 8b55e58ad..a10056779 100644 --- a/vendor/github.com/containers/podman/v4/libpod/oci.go +++ b/vendor/github.com/containers/podman/v5/libpod/oci.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -7,7 +6,7 @@ import ( "net/http" "github.com/containers/common/pkg/resize" - "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v5/libpod/define" "github.com/opencontainers/runtime-spec/specs-go" ) @@ -150,6 +149,12 @@ type OCIRuntime interface { //nolint:interfacebloat // This is the path to that file for a given container. ExitFilePath(ctr *Container) (string, error) + // OOMFilePath is the path to a container's oom file if it was oom killed. + // An oom file is only created when the container is oom killed. The existence + // of this file means that the container was oom killed. + // This is the path to that file for a given container. + OOMFilePath(ctr *Container) (string, error) + // RuntimeInfo returns verbose information about the runtime. RuntimeInfo() (*define.ConmonInfo, *define.OCIRuntimeInfo, error) @@ -202,6 +207,9 @@ type ExecOptions struct { // to 0, 1, 2) that will be passed to the executed process. The total FDs // passed will be 3 + PreserveFDs. PreserveFDs uint + // PreserveFD is a list of additional file descriptors (in addition + // to 0, 1, 2) that will be passed to the executed process. + PreserveFD []uint // DetachKeys is a set of keys that, when pressed in sequence, will // detach from the container. // If not provided, the default keys will be used. diff --git a/vendor/github.com/containers/podman/v4/libpod/oci_conmon.go b/vendor/github.com/containers/podman/v5/libpod/oci_conmon.go similarity index 91% rename from vendor/github.com/containers/podman/v4/libpod/oci_conmon.go rename to vendor/github.com/containers/podman/v5/libpod/oci_conmon.go index a31df444a..c083246e3 100644 --- a/vendor/github.com/containers/podman/v4/libpod/oci_conmon.go +++ b/vendor/github.com/containers/podman/v5/libpod/oci_conmon.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod diff --git a/vendor/github.com/containers/podman/v4/libpod/oci_conmon_attach_common.go b/vendor/github.com/containers/podman/v5/libpod/oci_conmon_attach_common.go similarity index 97% rename from vendor/github.com/containers/podman/v4/libpod/oci_conmon_attach_common.go rename to vendor/github.com/containers/podman/v5/libpod/oci_conmon_attach_common.go index 396e079dd..64fc0f41b 100644 --- a/vendor/github.com/containers/podman/v4/libpod/oci_conmon_attach_common.go +++ b/vendor/github.com/containers/podman/v5/libpod/oci_conmon_attach_common.go @@ -1,6 +1,4 @@ //go:build !remote && (linux || freebsd) -// +build !remote -// +build linux freebsd package libpod @@ -17,8 +15,8 @@ import ( "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/detach" "github.com/containers/common/pkg/resize" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/errorhandling" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/errorhandling" "github.com/moby/term" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" @@ -35,7 +33,7 @@ const ( // Does not check if state is appropriate. // started is only required if startContainer is true. func (r *ConmonOCIRuntime) Attach(c *Container, params *AttachOptions) error { - passthrough := c.LogDriver() == define.PassthroughLogging + passthrough := c.LogDriver() == define.PassthroughLogging || c.LogDriver() == define.PassthroughTTYLogging if params == nil || params.Streams == nil { return fmt.Errorf("must provide parameters to Attach: %w", define.ErrInternal) diff --git a/vendor/github.com/containers/podman/v4/libpod/oci_conmon_attach_freebsd.go b/vendor/github.com/containers/podman/v5/libpod/oci_conmon_attach_freebsd.go similarity index 96% rename from vendor/github.com/containers/podman/v4/libpod/oci_conmon_attach_freebsd.go rename to vendor/github.com/containers/podman/v5/libpod/oci_conmon_attach_freebsd.go index 38ea56662..cd5ccf8d3 100644 --- a/vendor/github.com/containers/podman/v4/libpod/oci_conmon_attach_freebsd.go +++ b/vendor/github.com/containers/podman/v5/libpod/oci_conmon_attach_freebsd.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod diff --git a/vendor/github.com/containers/podman/v4/libpod/oci_conmon_attach_linux.go b/vendor/github.com/containers/podman/v5/libpod/oci_conmon_attach_linux.go similarity index 95% rename from vendor/github.com/containers/podman/v4/libpod/oci_conmon_attach_linux.go rename to vendor/github.com/containers/podman/v5/libpod/oci_conmon_attach_linux.go index 242cd4672..10435ee5e 100644 --- a/vendor/github.com/containers/podman/v4/libpod/oci_conmon_attach_linux.go +++ b/vendor/github.com/containers/podman/v5/libpod/oci_conmon_attach_linux.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod diff --git a/vendor/github.com/containers/podman/v4/libpod/oci_conmon_common.go b/vendor/github.com/containers/podman/v5/libpod/oci_conmon_common.go similarity index 92% rename from vendor/github.com/containers/podman/v4/libpod/oci_conmon_common.go rename to vendor/github.com/containers/podman/v5/libpod/oci_conmon_common.go index 9fc281049..c544f0f75 100644 --- a/vendor/github.com/containers/podman/v4/libpod/oci_conmon_common.go +++ b/vendor/github.com/containers/podman/v5/libpod/oci_conmon_common.go @@ -1,6 +1,4 @@ //go:build !remote && (linux || freebsd) -// +build !remote -// +build linux freebsd package libpod @@ -28,14 +26,14 @@ import ( "github.com/containers/common/pkg/resize" "github.com/containers/common/pkg/version" conmonConfig "github.com/containers/conmon/runner/config" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/libpod/logs" - "github.com/containers/podman/v4/pkg/checkpoint/crutils" - "github.com/containers/podman/v4/pkg/errorhandling" - "github.com/containers/podman/v4/pkg/rootless" - "github.com/containers/podman/v4/pkg/specgenutil" - "github.com/containers/podman/v4/pkg/util" - "github.com/containers/podman/v4/utils" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/libpod/logs" + "github.com/containers/podman/v5/pkg/checkpoint/crutils" + "github.com/containers/podman/v5/pkg/errorhandling" + "github.com/containers/podman/v5/pkg/rootless" + "github.com/containers/podman/v5/pkg/specgenutil" + "github.com/containers/podman/v5/pkg/util" + "github.com/containers/podman/v5/utils" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" @@ -66,6 +64,7 @@ type ConmonOCIRuntime struct { supportsKVM bool supportsNoCgroups bool enableKeyring bool + persistDir string } // Make a new Conmon-based OCI runtime with the given options. @@ -145,13 +144,15 @@ func newConmonOCIRuntime(name string, paths []string, conmonPath string, runtime } runtime.exitsDir = filepath.Join(runtime.tmpDir, "exits") + // The persist-dir is where conmon writes the exit file and oom file (if oom killed), we join the container ID to this path later on + runtime.persistDir = filepath.Join(runtime.tmpDir, "persist") // Create the exit files and attach sockets directories if err := os.MkdirAll(runtime.exitsDir, 0750); err != nil { - // The directory is allowed to exist - if !os.IsExist(err) { - return nil, fmt.Errorf("creating OCI runtime exit files directory: %w", err) - } + return nil, fmt.Errorf("creating OCI runtime exit files directory: %w", err) + } + if err := os.MkdirAll(runtime.persistDir, 0750); err != nil { + return nil, fmt.Errorf("creating OCI runtime persist directory: %w", err) } return runtime, nil } @@ -209,7 +210,7 @@ func (r *ConmonOCIRuntime) CreateContainer(ctr *Container, restoreOptions *Conta // status, but will instead only check for the existence of the conmon exit file // and update state to stopped if it exists. func (r *ConmonOCIRuntime) UpdateContainerStatus(ctr *Container) error { - runtimeDir, err := util.GetRuntimeDir() + runtimeDir, err := util.GetRootlessRuntimeDir() if err != nil { return err } @@ -290,7 +291,7 @@ func (r *ConmonOCIRuntime) UpdateContainerStatus(ctr *Container) error { // Sets time the container was started, but does not save it. func (r *ConmonOCIRuntime) StartContainer(ctr *Container) error { // TODO: streams should probably *not* be our STDIN/OUT/ERR - redirect to buffers? - runtimeDir, err := util.GetRuntimeDir() + runtimeDir, err := util.GetRootlessRuntimeDir() if err != nil { return err } @@ -309,7 +310,7 @@ func (r *ConmonOCIRuntime) StartContainer(ctr *Container) error { // UpdateContainer updates the given container's cgroup configuration func (r *ConmonOCIRuntime) UpdateContainer(ctr *Container, resources *spec.LinuxResources) error { - runtimeDir, err := util.GetRuntimeDir() + runtimeDir, err := util.GetRootlessRuntimeDir() if err != nil { return err } @@ -368,7 +369,7 @@ func (r *ConmonOCIRuntime) KillContainer(ctr *Container, signal uint, all bool) // *bytes.buffer and returned; otherwise, it is set to os.Stderr. func (r *ConmonOCIRuntime) killContainer(ctr *Container, signal uint, all, captureStderr bool) (*bytes.Buffer, error) { logrus.Debugf("Sending signal %d to container %s", signal, ctr.ID()) - runtimeDir, err := util.GetRuntimeDir() + runtimeDir, err := util.GetRootlessRuntimeDir() if err != nil { return nil, err } @@ -504,7 +505,7 @@ func (r *ConmonOCIRuntime) StopContainer(ctr *Container, timeout uint, all bool) // DeleteContainer deletes a container from the OCI runtime. func (r *ConmonOCIRuntime) DeleteContainer(ctr *Container) error { - runtimeDir, err := util.GetRuntimeDir() + runtimeDir, err := util.GetRootlessRuntimeDir() if err != nil { return err } @@ -514,7 +515,7 @@ func (r *ConmonOCIRuntime) DeleteContainer(ctr *Container) error { // PauseContainer pauses the given container. func (r *ConmonOCIRuntime) PauseContainer(ctr *Container) error { - runtimeDir, err := util.GetRuntimeDir() + runtimeDir, err := util.GetRootlessRuntimeDir() if err != nil { return err } @@ -524,7 +525,7 @@ func (r *ConmonOCIRuntime) PauseContainer(ctr *Container) error { // UnpauseContainer unpauses the given container. func (r *ConmonOCIRuntime) UnpauseContainer(ctr *Container) error { - runtimeDir, err := util.GetRuntimeDir() + runtimeDir, err := util.GetRootlessRuntimeDir() if err != nil { return err } @@ -851,7 +852,7 @@ func (r *ConmonOCIRuntime) CheckpointContainer(ctr *Container, options Container args = append(args, ctr.ID()) logrus.Debugf("the args to checkpoint: %s %s", r.path, strings.Join(args, " ")) - runtimeDir, err := util.GetRuntimeDir() + runtimeDir, err := util.GetRootlessRuntimeDir() if err != nil { return 0, err } @@ -942,6 +943,12 @@ func (r *ConmonOCIRuntime) ExitFilePath(ctr *Container) (string, error) { return filepath.Join(r.exitsDir, ctr.ID()), nil } +// OOMFilePath is the path to a container's oom file. +// The oom file will only exist if the container was oom killed. +func (r *ConmonOCIRuntime) OOMFilePath(ctr *Container) (string, error) { + return filepath.Join(r.persistDir, ctr.ID(), "oom"), nil +} + // RuntimeInfo provides information on the runtime. func (r *ConmonOCIRuntime) RuntimeInfo() (*define.ConmonInfo, *define.OCIRuntimeInfo, error) { runtimePackage := version.Package(r.path) @@ -1038,6 +1045,39 @@ func (r *ConmonOCIRuntime) getLogTag(ctr *Container) (string, error) { return b.String(), nil } +func getPreserveFdExtraFiles(preserveFD []uint, preserveFDs uint) (uint, []*os.File, []*os.File, error) { + var filesToClose []*os.File + var extraFiles []*os.File + + preserveFDsMap := make(map[uint]struct{}) + for _, i := range preserveFD { + if i < 3 { + return 0, nil, nil, fmt.Errorf("cannot preserve FD %d, consider using the passthrough log-driver to pass STDIO streams into the container: %w", i, define.ErrInvalidArg) + } + if i-2 > preserveFDs { + // preserveFDs is the number of FDs above 2 to keep around. + // e.g. if the user specified FD=3, then preserveFDs must be 1. + preserveFDs = i - 2 + } + preserveFDsMap[i] = struct{}{} + } + + if preserveFDs > 0 { + for fd := 3; fd < int(3+preserveFDs); fd++ { + if len(preserveFDsMap) > 0 { + if _, ok := preserveFDsMap[uint(fd)]; !ok { + extraFiles = append(extraFiles, nil) + continue + } + } + f := os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd)) + filesToClose = append(filesToClose, f) + extraFiles = append(extraFiles, f) + } + } + return preserveFDs, filesToClose, extraFiles, nil +} + // createOCIContainer generates this container's main conmon instance and prepares it for starting func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *ContainerCheckpointOptions) (int64, error) { var stderrBuf bytes.Buffer @@ -1066,7 +1106,7 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co } if ctr.config.CgroupsMode == cgroupSplit { - if err := utils.MoveUnderCgroupSubtree("runtime"); err != nil { + if err := moveToRuntimeCgroup(); err != nil { return 0, err } } @@ -1076,7 +1116,11 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co pidfile = filepath.Join(ctr.state.RunDir, "pidfile") } - args := r.sharedConmonArgs(ctr, ctr.ID(), ctr.bundlePath(), pidfile, ctr.LogPath(), r.exitsDir, ociLog, ctr.LogDriver(), logTag) + persistDir := filepath.Join(r.persistDir, ctr.ID()) + args, err := r.sharedConmonArgs(ctr, ctr.ID(), ctr.bundlePath(), pidfile, ctr.LogPath(), r.exitsDir, persistDir, ociLog, ctr.LogDriver(), logTag) + if err != nil { + return 0, err + } if ctr.config.SdNotifyMode == define.SdNotifyModeContainer && ctr.config.SdNotifySocket != "" { args = append(args, fmt.Sprintf("--sdnotify-socket=%s", ctr.config.SdNotifySocket)) @@ -1114,10 +1158,11 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co args = append(args, []string{"--exit-command-arg", arg}...) } - // Pass down the LISTEN_* environment (see #10443). preserveFDs := ctr.config.PreserveFDs + + // Pass down the LISTEN_* environment (see #10443). if val := os.Getenv("LISTEN_FDS"); val != "" { - if ctr.config.PreserveFDs > 0 { + if preserveFDs > 0 || len(ctr.config.PreserveFD) > 0 { logrus.Warnf("Ignoring LISTEN_FDS to preserve custom user-specified FDs") } else { fds, err := strconv.Atoi(val) @@ -1128,6 +1173,10 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co } } + preserveFDs, filesToClose, extraFiles, err := getPreserveFdExtraFiles(ctr.config.PreserveFD, preserveFDs) + if err != nil { + return 0, err + } if preserveFDs > 0 { args = append(args, formatRuntimeOpts("--preserve-fds", strconv.FormatUint(uint64(preserveFDs), 10))...) } @@ -1189,14 +1238,7 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co return 0, fmt.Errorf("configuring conmon env: %w", err) } - var filesToClose []*os.File - if preserveFDs > 0 { - for fd := 3; fd < int(3+preserveFDs); fd++ { - f := os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd)) - filesToClose = append(filesToClose, f) - cmd.ExtraFiles = append(cmd.ExtraFiles, f) - } - } + cmd.ExtraFiles = extraFiles cmd.Env = r.conmonEnv // we don't want to step on users fds they asked to preserve @@ -1324,7 +1366,7 @@ func (r *ConmonOCIRuntime) configureConmonEnv() ([]string, error) { } res = append(res, v) } - runtimeDir, err := util.GetRuntimeDir() + runtimeDir, err := util.GetRootlessRuntimeDir() if err != nil { return nil, err } @@ -1334,7 +1376,16 @@ func (r *ConmonOCIRuntime) configureConmonEnv() ([]string, error) { } // sharedConmonArgs takes common arguments for exec and create/restore and formats them for the conmon CLI -func (r *ConmonOCIRuntime) sharedConmonArgs(ctr *Container, cuuid, bundlePath, pidPath, logPath, exitDir, ociLogPath, logDriver, logTag string) []string { +// func (r *ConmonOCIRuntime) sharedConmonArgs(ctr *Container, cuuid, bundlePath, pidPath, logPath, exitDir, persistDir, ociLogPath, logDriver, logTag string) ([]string, error) { +func (r *ConmonOCIRuntime) sharedConmonArgs(ctr *Container, cuuid, bundlePath, pidPath, logPath, exitDir, persistDir, ociLogPath, logDriver, logTag string) ([]string, error) { + // Make the persists directory for the container after the ctr ID is appended to it in the caller + // This is needed as conmon writes the exit and oom file in the given persist directory path as just "exit" and "oom" + // So creating a directory with the container ID under the persist dir will help keep track of which container the + // exit and oom files belong to. + if err := os.MkdirAll(persistDir, 0750); err != nil { + return nil, fmt.Errorf("creating OCI runtime oom files directory for ctr %q: %w", ctr.ID(), err) + } + // set the conmon API version to be able to use the correct sync struct keys args := []string{ "--api-version", "1", @@ -1345,6 +1396,7 @@ func (r *ConmonOCIRuntime) sharedConmonArgs(ctr *Container, cuuid, bundlePath, p "-p", pidPath, "-n", ctr.Name(), "--exit-dir", exitDir, + "--persist-dir", persistDir, "--full-attach", } if len(r.runtimeFlags) > 0 { @@ -1365,7 +1417,7 @@ func (r *ConmonOCIRuntime) sharedConmonArgs(ctr *Container, cuuid, bundlePath, p logDriverArg = define.JournaldLogging case define.NoLogging: logDriverArg = define.NoLogging - case define.PassthroughLogging: + case define.PassthroughLogging, define.PassthroughTTYLogging: logDriverArg = define.PassthroughLogging //lint:ignore ST1015 the default case has to be here default: //nolint:gocritic @@ -1407,7 +1459,7 @@ func (r *ConmonOCIRuntime) sharedConmonArgs(ctr *Container, cuuid, bundlePath, p logrus.Debugf("Running with no Cgroups") args = append(args, "--runtime-arg", "--cgroup-manager", "--runtime-arg", "disabled") } - return args + return args, nil } // newPipe creates a unix socket pair for communication. diff --git a/vendor/github.com/containers/podman/v4/libpod/oci_conmon_exec_common.go b/vendor/github.com/containers/podman/v5/libpod/oci_conmon_exec_common.go similarity index 96% rename from vendor/github.com/containers/podman/v4/libpod/oci_conmon_exec_common.go rename to vendor/github.com/containers/podman/v5/libpod/oci_conmon_exec_common.go index b44dbe0ef..ca917ff1a 100644 --- a/vendor/github.com/containers/podman/v4/libpod/oci_conmon_exec_common.go +++ b/vendor/github.com/containers/podman/v5/libpod/oci_conmon_exec_common.go @@ -1,6 +1,4 @@ //go:build !remote && (linux || freebsd) -// +build !remote -// +build linux freebsd package libpod @@ -19,9 +17,9 @@ import ( "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/detach" "github.com/containers/common/pkg/resize" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/errorhandling" - "github.com/containers/podman/v4/pkg/lookup" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/errorhandling" + "github.com/containers/podman/v5/pkg/lookup" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" @@ -389,10 +387,18 @@ func (r *ConmonOCIRuntime) startExec(c *Container, sessionID string, options *Ex } defer processFile.Close() - args := r.sharedConmonArgs(c, sessionID, c.execBundlePath(sessionID), c.execPidPath(sessionID), c.execLogPath(sessionID), c.execExitFileDir(sessionID), ociLog, define.NoLogging, c.config.LogTag) + args, err := r.sharedConmonArgs(c, sessionID, c.execBundlePath(sessionID), c.execPidPath(sessionID), c.execLogPath(sessionID), c.execExitFileDir(sessionID), c.execPersistDir(sessionID), ociLog, define.NoLogging, c.config.LogTag) + if err != nil { + return nil, nil, err + } - if options.PreserveFDs > 0 { - args = append(args, formatRuntimeOpts("--preserve-fds", strconv.FormatUint(uint64(options.PreserveFDs), 10))...) + preserveFDs, filesToClose, extraFiles, err := getPreserveFdExtraFiles(options.PreserveFD, options.PreserveFDs) + if err != nil { + return nil, nil, err + } + + if preserveFDs > 0 { + args = append(args, formatRuntimeOpts("--preserve-fds", strconv.FormatUint(uint64(preserveFDs), 10))...) } if options.Terminal { @@ -442,19 +448,12 @@ func (r *ConmonOCIRuntime) startExec(c *Container, sessionID string, options *Ex return nil, nil, fmt.Errorf("configuring conmon env: %w", err) } - var filesToClose []*os.File - if options.PreserveFDs > 0 { - for fd := 3; fd < int(3+options.PreserveFDs); fd++ { - f := os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd)) - filesToClose = append(filesToClose, f) - execCmd.ExtraFiles = append(execCmd.ExtraFiles, f) - } - } + execCmd.ExtraFiles = extraFiles // we don't want to step on users fds they asked to preserve // Since 0-2 are used for stdio, start the fds we pass in at preserveFDs+3 execCmd.Env = r.conmonEnv - execCmd.Env = append(execCmd.Env, fmt.Sprintf("_OCI_SYNCPIPE=%d", options.PreserveFDs+3), fmt.Sprintf("_OCI_STARTPIPE=%d", options.PreserveFDs+4), fmt.Sprintf("_OCI_ATTACHPIPE=%d", options.PreserveFDs+5)) + execCmd.Env = append(execCmd.Env, fmt.Sprintf("_OCI_SYNCPIPE=%d", preserveFDs+3), fmt.Sprintf("_OCI_STARTPIPE=%d", preserveFDs+4), fmt.Sprintf("_OCI_ATTACHPIPE=%d", preserveFDs+5)) execCmd.Env = append(execCmd.Env, conmonEnv...) execCmd.ExtraFiles = append(execCmd.ExtraFiles, childSyncPipe, childStartPipe, childAttachPipe) diff --git a/vendor/github.com/containers/podman/v4/libpod/oci_conmon_exec_freebsd.go b/vendor/github.com/containers/podman/v5/libpod/oci_conmon_exec_freebsd.go similarity index 78% rename from vendor/github.com/containers/podman/v4/libpod/oci_conmon_exec_freebsd.go rename to vendor/github.com/containers/podman/v5/libpod/oci_conmon_exec_freebsd.go index baf3ac4e4..d8bdf2177 100644 --- a/vendor/github.com/containers/podman/v4/libpod/oci_conmon_exec_freebsd.go +++ b/vendor/github.com/containers/podman/v5/libpod/oci_conmon_exec_freebsd.go @@ -1,10 +1,9 @@ //go:build !remote -// +build !remote package libpod import ( - "github.com/opencontainers/runc/libcontainer/user" + "github.com/moby/sys/user" spec "github.com/opencontainers/runtime-spec/specs-go" ) diff --git a/vendor/github.com/containers/podman/v4/libpod/oci_conmon_exec_linux.go b/vendor/github.com/containers/podman/v5/libpod/oci_conmon_exec_linux.go similarity index 94% rename from vendor/github.com/containers/podman/v4/libpod/oci_conmon_exec_linux.go rename to vendor/github.com/containers/podman/v5/libpod/oci_conmon_exec_linux.go index 8ef0d996c..e0d2f5842 100644 --- a/vendor/github.com/containers/podman/v4/libpod/oci_conmon_exec_linux.go +++ b/vendor/github.com/containers/podman/v5/libpod/oci_conmon_exec_linux.go @@ -1,11 +1,10 @@ //go:build !remote -// +build !remote package libpod import ( "github.com/containers/common/pkg/capabilities" - "github.com/opencontainers/runc/libcontainer/user" + "github.com/moby/sys/user" spec "github.com/opencontainers/runtime-spec/specs-go" ) diff --git a/vendor/github.com/containers/podman/v4/libpod/oci_conmon_freebsd.go b/vendor/github.com/containers/podman/v5/libpod/oci_conmon_freebsd.go similarity index 89% rename from vendor/github.com/containers/podman/v4/libpod/oci_conmon_freebsd.go rename to vendor/github.com/containers/podman/v5/libpod/oci_conmon_freebsd.go index e8cf6085a..5f113f5cb 100644 --- a/vendor/github.com/containers/podman/v4/libpod/oci_conmon_freebsd.go +++ b/vendor/github.com/containers/podman/v5/libpod/oci_conmon_freebsd.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -28,3 +27,7 @@ func (r *ConmonOCIRuntime) moveConmonToCgroupAndSignal(ctr *Container, cmd *exec } return nil } + +func moveToRuntimeCgroup() error { + return errors.New("moveToRuntimeCgroup not supported on freebsd") +} diff --git a/vendor/github.com/containers/podman/v4/libpod/oci_conmon_linux.go b/vendor/github.com/containers/podman/v5/libpod/oci_conmon_linux.go similarity index 96% rename from vendor/github.com/containers/podman/v4/libpod/oci_conmon_linux.go rename to vendor/github.com/containers/podman/v5/libpod/oci_conmon_linux.go index d68a30333..e62489400 100644 --- a/vendor/github.com/containers/podman/v4/libpod/oci_conmon_linux.go +++ b/vendor/github.com/containers/podman/v5/libpod/oci_conmon_linux.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -16,9 +15,9 @@ import ( "github.com/containers/common/pkg/cgroups" "github.com/containers/common/pkg/config" - "github.com/containers/podman/v4/pkg/errorhandling" - "github.com/containers/podman/v4/pkg/rootless" - "github.com/containers/podman/v4/utils" + "github.com/containers/common/pkg/systemd" + "github.com/containers/podman/v5/pkg/errorhandling" + "github.com/containers/podman/v5/pkg/rootless" pmount "github.com/containers/storage/pkg/mount" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/selinux/go-selinux/label" @@ -149,7 +148,7 @@ func (r *ConmonOCIRuntime) moveConmonToCgroupAndSignal(ctr *Container, cmd *exec } logrus.Infof("Running conmon under slice %s and unitName %s", realCgroupParent, unitName) - if err := utils.RunUnderSystemdScope(cmd.Process.Pid, realCgroupParent, unitName); err != nil { + if err := systemd.RunUnderSystemdScope(cmd.Process.Pid, realCgroupParent, unitName); err != nil { logrus.StandardLogger().Logf(logLevel, "Failed to add conmon to systemd sandbox cgroup: %v", err) } } else { @@ -326,3 +325,7 @@ func GetLimits(resource *spec.LinuxResources) (runcconfig.Resources, error) { final.Unified = resource.Unified return *final, nil } + +func moveToRuntimeCgroup() error { + return cgroups.MoveUnderCgroupSubtree("runtime") +} diff --git a/vendor/github.com/containers/podman/v4/libpod/oci_missing.go b/vendor/github.com/containers/podman/v5/libpod/oci_missing.go similarity index 94% rename from vendor/github.com/containers/podman/v4/libpod/oci_missing.go rename to vendor/github.com/containers/podman/v5/libpod/oci_missing.go index eedb5c6c6..0ac6417f5 100644 --- a/vendor/github.com/containers/podman/v4/libpod/oci_missing.go +++ b/vendor/github.com/containers/podman/v5/libpod/oci_missing.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -10,7 +9,7 @@ import ( "sync" "github.com/containers/common/pkg/resize" - "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v5/libpod/define" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" ) @@ -30,6 +29,8 @@ type MissingRuntime struct { name string // exitsDir is the directory for exit files. exitsDir string + // persistDir is the directory for exit and oom files. + persistDir string } // Get a new MissingRuntime for the given name. @@ -53,6 +54,7 @@ func getMissingRuntime(name string, r *Runtime) OCIRuntime { newRuntime := new(MissingRuntime) newRuntime.name = name newRuntime.exitsDir = filepath.Join(r.config.Engine.TmpDir, "exits") + newRuntime.persistDir = filepath.Join(r.config.Engine.TmpDir, "persist") missingRuntimes[name] = newRuntime @@ -223,6 +225,12 @@ func (r *MissingRuntime) ExitFilePath(ctr *Container) (string, error) { return filepath.Join(r.exitsDir, ctr.ID()), nil } +// OOMFilePath returns the oom file path for a container. +// The oom file will only exist if the container was oom killed. +func (r *MissingRuntime) OOMFilePath(ctr *Container) (string, error) { + return filepath.Join(r.persistDir, ctr.ID(), "oom"), nil +} + // RuntimeInfo returns information on the missing runtime func (r *MissingRuntime) RuntimeInfo() (*define.ConmonInfo, *define.OCIRuntimeInfo, error) { ocirt := define.OCIRuntimeInfo{ diff --git a/vendor/github.com/containers/podman/v4/libpod/oci_util.go b/vendor/github.com/containers/podman/v5/libpod/oci_util.go similarity index 98% rename from vendor/github.com/containers/podman/v4/libpod/oci_util.go rename to vendor/github.com/containers/podman/v5/libpod/oci_util.go index 981174561..31d6b7fbe 100644 --- a/vendor/github.com/containers/podman/v4/libpod/oci_util.go +++ b/vendor/github.com/containers/podman/v5/libpod/oci_util.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -12,7 +11,7 @@ import ( "time" "github.com/containers/common/libnetwork/types" - "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v5/libpod/define" "github.com/sirupsen/logrus" ) diff --git a/vendor/github.com/containers/podman/v4/libpod/options.go b/vendor/github.com/containers/podman/v5/libpod/options.go similarity index 98% rename from vendor/github.com/containers/podman/v4/libpod/options.go rename to vendor/github.com/containers/podman/v5/libpod/options.go index 4bd96f46c..5a6b59930 100644 --- a/vendor/github.com/containers/podman/v4/libpod/options.go +++ b/vendor/github.com/containers/podman/v5/libpod/options.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -18,12 +17,11 @@ import ( "github.com/containers/common/pkg/secrets" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/libpod/events" - "github.com/containers/podman/v4/pkg/namespaces" - "github.com/containers/podman/v4/pkg/rootless" - "github.com/containers/podman/v4/pkg/specgen" - "github.com/containers/podman/v4/pkg/util" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/libpod/events" + "github.com/containers/podman/v5/pkg/namespaces" + "github.com/containers/podman/v5/pkg/specgen" + "github.com/containers/podman/v5/pkg/util" "github.com/containers/storage" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/regexp" @@ -89,7 +87,7 @@ func WithStorageConfig(config storage.StoreOptions) RuntimeOption { // or graphdriveroptions are set, then GraphRoot and RunRoot // must be set if setField { - storeOpts, err := storage.DefaultStoreOptions(rootless.IsRootless(), rootless.GetRootlessUID()) + storeOpts, err := storage.DefaultStoreOptions() if err != nil { return err } @@ -366,15 +364,6 @@ func WithTmpDir(dir string) RuntimeOption { } } -// WithNoStore sets a bool on the runtime that we do not need -// any containers storage. -func WithNoStore() RuntimeOption { - return func(rt *Runtime) error { - rt.noStore = true - return nil - } -} - // WithNoPivotRoot sets the runtime to use MS_MOVE instead of PIVOT_ROOT when // starting containers. func WithNoPivotRoot() RuntimeOption { @@ -1112,7 +1101,7 @@ func WithLogDriver(driver string) CtrCreateOption { switch driver { case "": return fmt.Errorf("log driver must be set: %w", define.ErrInvalidArg) - case define.JournaldLogging, define.KubernetesLogging, define.JSONLogging, define.NoLogging, define.PassthroughLogging: + case define.JournaldLogging, define.KubernetesLogging, define.JSONLogging, define.NoLogging, define.PassthroughLogging, define.PassthroughTTYLogging: break default: return fmt.Errorf("invalid log driver: %w", define.ErrInvalidArg) @@ -1527,6 +1516,18 @@ func WithPreserveFDs(fd uint) CtrCreateOption { } } +// WithPreserveFD forwards from the process running Libpod into the container +// the given list of extra FDs to the created container +func WithPreserveFD(fds []uint) CtrCreateOption { + return func(ctr *Container) error { + if ctr.valid { + return define.ErrCtrFinalized + } + ctr.config.PreserveFD = fds + return nil + } +} + // WithCreateCommand adds the full command plus arguments of the current // process to the container config. func WithCreateCommand(cmd []string) CtrCreateOption { @@ -2333,6 +2334,19 @@ func WithGroupEntry(groupEntry string) CtrCreateOption { } } +// WithBaseHostsFile sets the option to copy /etc/hosts file. +func WithBaseHostsFile(baseHostsFile string) CtrCreateOption { + return func(ctr *Container) error { + if ctr.valid { + return define.ErrCtrFinalized + } + + ctr.config.BaseHostsFile = baseHostsFile + + return nil + } +} + // WithMountAllDevices sets the option to mount all of a privileged container's // host devices func WithMountAllDevices() CtrCreateOption { diff --git a/vendor/github.com/containers/podman/v4/libpod/plugin/volume_api.go b/vendor/github.com/containers/podman/v5/libpod/plugin/volume_api.go similarity index 99% rename from vendor/github.com/containers/podman/v4/libpod/plugin/volume_api.go rename to vendor/github.com/containers/podman/v5/libpod/plugin/volume_api.go index 2c2016685..bc9f675f4 100644 --- a/vendor/github.com/containers/podman/v4/libpod/plugin/volume_api.go +++ b/vendor/github.com/containers/podman/v5/libpod/plugin/volume_api.go @@ -15,7 +15,7 @@ import ( "time" "github.com/containers/common/pkg/config" - "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v5/libpod/define" "github.com/docker/go-plugins-helpers/sdk" "github.com/docker/go-plugins-helpers/volume" jsoniter "github.com/json-iterator/go" diff --git a/vendor/github.com/containers/podman/v4/libpod/pod.go b/vendor/github.com/containers/podman/v5/libpod/pod.go similarity index 98% rename from vendor/github.com/containers/podman/v4/libpod/pod.go rename to vendor/github.com/containers/podman/v5/libpod/pod.go index c83dba75a..605749241 100644 --- a/vendor/github.com/containers/podman/v4/libpod/pod.go +++ b/vendor/github.com/containers/podman/v5/libpod/pod.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -11,8 +10,8 @@ import ( "time" "github.com/containers/common/pkg/config" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/libpod/lock" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/libpod/lock" "github.com/opencontainers/runtime-spec/specs-go" ) @@ -278,8 +277,8 @@ func (p *Pod) VolumesFrom() []string { if err != nil { return nil } - if ctrs, ok := infra.config.Spec.Annotations[define.InspectAnnotationVolumesFrom]; ok { - return strings.Split(ctrs, ",") + if ctrs, ok := infra.config.Spec.Annotations[define.VolumesFromAnnotation]; ok { + return strings.Split(ctrs, ";") } return nil } diff --git a/vendor/github.com/containers/podman/v4/libpod/pod_api.go b/vendor/github.com/containers/podman/v5/libpod/pod_api.go similarity index 99% rename from vendor/github.com/containers/podman/v4/libpod/pod_api.go rename to vendor/github.com/containers/podman/v5/libpod/pod_api.go index c5bab8c2d..875d9ef3d 100644 --- a/vendor/github.com/containers/podman/v4/libpod/pod_api.go +++ b/vendor/github.com/containers/podman/v5/libpod/pod_api.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -9,10 +8,10 @@ import ( "fmt" "github.com/containers/common/pkg/cgroups" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/libpod/events" - "github.com/containers/podman/v4/pkg/parallel" - "github.com/containers/podman/v4/pkg/rootless" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/libpod/events" + "github.com/containers/podman/v5/pkg/parallel" + "github.com/containers/podman/v5/pkg/rootless" "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" ) diff --git a/vendor/github.com/containers/podman/v4/libpod/pod_internal.go b/vendor/github.com/containers/podman/v5/libpod/pod_internal.go similarity index 96% rename from vendor/github.com/containers/podman/v4/libpod/pod_internal.go rename to vendor/github.com/containers/podman/v5/libpod/pod_internal.go index 7554705d2..8af861564 100644 --- a/vendor/github.com/containers/podman/v4/libpod/pod_internal.go +++ b/vendor/github.com/containers/podman/v5/libpod/pod_internal.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -7,7 +6,7 @@ import ( "fmt" "time" - "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v5/libpod/define" "github.com/containers/storage/pkg/stringid" ) diff --git a/vendor/github.com/containers/podman/v4/libpod/pod_internal_freebsd.go b/vendor/github.com/containers/podman/v5/libpod/pod_internal_freebsd.go similarity index 83% rename from vendor/github.com/containers/podman/v4/libpod/pod_internal_freebsd.go rename to vendor/github.com/containers/podman/v5/libpod/pod_internal_freebsd.go index d8c1111d9..f510ef37b 100644 --- a/vendor/github.com/containers/podman/v4/libpod/pod_internal_freebsd.go +++ b/vendor/github.com/containers/podman/v5/libpod/pod_internal_freebsd.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod diff --git a/vendor/github.com/containers/podman/v4/libpod/pod_internal_linux.go b/vendor/github.com/containers/podman/v5/libpod/pod_internal_linux.go similarity index 89% rename from vendor/github.com/containers/podman/v4/libpod/pod_internal_linux.go rename to vendor/github.com/containers/podman/v5/libpod/pod_internal_linux.go index e8710d5b1..873e2c24a 100644 --- a/vendor/github.com/containers/podman/v4/libpod/pod_internal_linux.go +++ b/vendor/github.com/containers/podman/v5/libpod/pod_internal_linux.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -8,8 +7,8 @@ import ( "path/filepath" "github.com/containers/common/pkg/config" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/rootless" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/rootless" "github.com/sirupsen/logrus" ) diff --git a/vendor/github.com/containers/podman/v4/libpod/pod_status.go b/vendor/github.com/containers/podman/v5/libpod/pod_status.go similarity index 96% rename from vendor/github.com/containers/podman/v4/libpod/pod_status.go rename to vendor/github.com/containers/podman/v5/libpod/pod_status.go index 1b4767b25..186709a29 100644 --- a/vendor/github.com/containers/podman/v4/libpod/pod_status.go +++ b/vendor/github.com/containers/podman/v5/libpod/pod_status.go @@ -1,9 +1,8 @@ //go:build !remote -// +build !remote package libpod -import "github.com/containers/podman/v4/libpod/define" +import "github.com/containers/podman/v5/libpod/define" // GetPodStatus determines the status of the pod based on the // statuses of the containers in the pod. diff --git a/vendor/github.com/containers/podman/v4/libpod/pod_top_freebsd.go b/vendor/github.com/containers/podman/v5/libpod/pod_top_freebsd.go similarity index 96% rename from vendor/github.com/containers/podman/v4/libpod/pod_top_freebsd.go rename to vendor/github.com/containers/podman/v5/libpod/pod_top_freebsd.go index 902e369a7..017594410 100644 --- a/vendor/github.com/containers/podman/v4/libpod/pod_top_freebsd.go +++ b/vendor/github.com/containers/podman/v5/libpod/pod_top_freebsd.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -7,7 +6,7 @@ import ( "fmt" "strings" - "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v5/libpod/define" ) // GetPodPidInformation returns process-related data of all processes in diff --git a/vendor/github.com/containers/podman/v4/libpod/pod_top_linux.go b/vendor/github.com/containers/podman/v5/libpod/pod_top_linux.go similarity index 94% rename from vendor/github.com/containers/podman/v4/libpod/pod_top_linux.go rename to vendor/github.com/containers/podman/v5/libpod/pod_top_linux.go index a80049418..c7f0c9ce3 100644 --- a/vendor/github.com/containers/podman/v4/libpod/pod_top_linux.go +++ b/vendor/github.com/containers/podman/v5/libpod/pod_top_linux.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -7,8 +6,8 @@ import ( "strconv" "strings" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/rootless" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/rootless" "github.com/containers/psgo" ) diff --git a/vendor/github.com/containers/podman/v4/libpod/reset.go b/vendor/github.com/containers/podman/v5/libpod/reset.go similarity index 93% rename from vendor/github.com/containers/podman/v4/libpod/reset.go rename to vendor/github.com/containers/podman/v5/libpod/reset.go index de06e5784..955fcdff2 100644 --- a/vendor/github.com/containers/podman/v4/libpod/reset.go +++ b/vendor/github.com/containers/podman/v5/libpod/reset.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -12,10 +11,9 @@ import ( "github.com/containers/common/libimage" "github.com/containers/common/libnetwork/types" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/errorhandling" - "github.com/containers/podman/v4/pkg/rootless" - "github.com/containers/podman/v4/pkg/util" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/errorhandling" + "github.com/containers/podman/v5/pkg/util" "github.com/containers/storage" "github.com/containers/storage/pkg/lockfile" stypes "github.com/containers/storage/types" @@ -110,13 +108,13 @@ func (r *Runtime) Reset(ctx context.Context) error { return define.ErrRuntimeStopped } - var timeout *uint + var timeout uint = 0 pods, err := r.GetAllPods() if err != nil { return err } for _, p := range pods { - if ctrs, err := r.RemovePod(ctx, p, true, true, timeout); err != nil { + if ctrs, err := r.RemovePod(ctx, p, true, true, &timeout); err != nil { if errors.Is(err, define.ErrNoSuchPod) { continue } @@ -135,7 +133,7 @@ func (r *Runtime) Reset(ctx context.Context) error { } for _, c := range ctrs { - if ctrs, _, err := r.RemoveContainerAndDependencies(ctx, c, true, true, timeout); err != nil { + if ctrs, _, err := r.RemoveContainerAndDependencies(ctx, c, true, true, &timeout); err != nil { for ctr, err := range ctrs { logrus.Errorf("Error removing container %s: %v", ctr, err) } @@ -165,7 +163,7 @@ func (r *Runtime) Reset(ctx context.Context) error { return err } for _, v := range volumes { - if err := r.RemoveVolume(ctx, v, true, timeout); err != nil { + if err := r.RemoveVolume(ctx, v, true, &timeout); err != nil { if errors.Is(err, define.ErrNoSuchVolume) { continue } @@ -238,7 +236,7 @@ func (r *Runtime) Reset(ctx context.Context) error { prevError = err } } - runtimeDir, err := util.GetRuntimeDir() + runtimeDir, err := util.GetRootlessRuntimeDir() if err != nil { return err } @@ -259,7 +257,7 @@ func (r *Runtime) Reset(ctx context.Context) error { prevError = err } } - if storageConfPath, err := storage.DefaultConfigFile(rootless.IsRootless()); err == nil { + if storageConfPath, err := storage.DefaultConfigFile(); err == nil { switch storageConfPath { case stypes.SystemConfigFile: break diff --git a/vendor/github.com/containers/podman/v4/libpod/rlimit_int64.go b/vendor/github.com/containers/podman/v5/libpod/rlimit_int64.go similarity index 71% rename from vendor/github.com/containers/podman/v4/libpod/rlimit_int64.go rename to vendor/github.com/containers/podman/v5/libpod/rlimit_int64.go index 23dbc5066..ba651d078 100644 --- a/vendor/github.com/containers/podman/v4/libpod/rlimit_int64.go +++ b/vendor/github.com/containers/podman/v5/libpod/rlimit_int64.go @@ -1,5 +1,4 @@ //go:build !remote && freebsd -// +build !remote,freebsd package libpod diff --git a/vendor/github.com/containers/podman/v4/libpod/rlimit_uint64.go b/vendor/github.com/containers/podman/v5/libpod/rlimit_uint64.go similarity index 72% rename from vendor/github.com/containers/podman/v4/libpod/rlimit_uint64.go rename to vendor/github.com/containers/podman/v5/libpod/rlimit_uint64.go index d446b4651..585cec214 100644 --- a/vendor/github.com/containers/podman/v4/libpod/rlimit_uint64.go +++ b/vendor/github.com/containers/podman/v5/libpod/rlimit_uint64.go @@ -1,5 +1,4 @@ //go:build !remote && linux -// +build !remote,linux package libpod diff --git a/vendor/github.com/containers/podman/v4/libpod/runtime.go b/vendor/github.com/containers/podman/v5/libpod/runtime.go similarity index 92% rename from vendor/github.com/containers/podman/v4/libpod/runtime.go rename to vendor/github.com/containers/podman/v5/libpod/runtime.go index 986e40f60..3ef535a73 100644 --- a/vendor/github.com/containers/podman/v4/libpod/runtime.go +++ b/vendor/github.com/containers/podman/v5/libpod/runtime.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -25,18 +24,18 @@ import ( "github.com/containers/common/pkg/cgroups" "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/secrets" + systemdCommon "github.com/containers/common/pkg/systemd" "github.com/containers/image/v5/pkg/sysregistriesv2" is "github.com/containers/image/v5/storage" "github.com/containers/image/v5/types" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/libpod/events" - "github.com/containers/podman/v4/libpod/lock" - "github.com/containers/podman/v4/libpod/plugin" - "github.com/containers/podman/v4/libpod/shutdown" - "github.com/containers/podman/v4/pkg/rootless" - "github.com/containers/podman/v4/pkg/systemd" - "github.com/containers/podman/v4/pkg/util" - "github.com/containers/podman/v4/utils" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/libpod/events" + "github.com/containers/podman/v5/libpod/lock" + "github.com/containers/podman/v5/libpod/plugin" + "github.com/containers/podman/v5/libpod/shutdown" + "github.com/containers/podman/v5/pkg/rootless" + "github.com/containers/podman/v5/pkg/systemd" + "github.com/containers/podman/v5/pkg/util" "github.com/containers/storage" "github.com/containers/storage/pkg/lockfile" "github.com/containers/storage/pkg/unshare" @@ -114,8 +113,6 @@ type Runtime struct { // mechanism to read and write even logs eventer events.Eventer - // noStore indicates whether we need to interact with a store or not - noStore bool // secretsManager manages secrets secretsManager *secrets.SecretsManager } @@ -133,7 +130,7 @@ func SetXdgDirs() error { if runtimeDir == "" { var err error - runtimeDir, err = util.GetRuntimeDir() + runtimeDir, err = util.GetRootlessRuntimeDir() if err != nil { return err } @@ -169,7 +166,7 @@ func NewRuntime(ctx context.Context, options ...RuntimeOption) (*Runtime, error) if err != nil { return nil, err } - return newRuntimeFromConfig(conf, options...) + return newRuntimeFromConfig(ctx, conf, options...) } // NewRuntimeFromConfig creates a new container runtime using the given @@ -178,10 +175,10 @@ func NewRuntime(ctx context.Context, options ...RuntimeOption) (*Runtime, error) // An error will be returned if the configuration file at the given path does // not exist or cannot be loaded func NewRuntimeFromConfig(ctx context.Context, userConfig *config.Config, options ...RuntimeOption) (*Runtime, error) { - return newRuntimeFromConfig(userConfig, options...) + return newRuntimeFromConfig(ctx, userConfig, options...) } -func newRuntimeFromConfig(conf *config.Config, options ...RuntimeOption) (*Runtime, error) { +func newRuntimeFromConfig(ctx context.Context, conf *config.Config, options ...RuntimeOption) (*Runtime, error) { runtime := new(Runtime) if conf.Engine.OCIRuntime == "" { @@ -198,7 +195,7 @@ func newRuntimeFromConfig(conf *config.Config, options ...RuntimeOption) (*Runti return nil, err } - storeOpts, err := storage.DefaultStoreOptions(rootless.IsRootless(), rootless.GetRootlessUID()) + storeOpts, err := storage.DefaultStoreOptions() if err != nil { return nil, err } @@ -226,7 +223,7 @@ func newRuntimeFromConfig(conf *config.Config, options ...RuntimeOption) (*Runti return nil, fmt.Errorf("starting shutdown signal handler: %w", err) } - if err := makeRuntime(runtime); err != nil { + if err := makeRuntime(ctx, runtime); err != nil { return nil, err } @@ -336,7 +333,7 @@ func getDBState(runtime *Runtime) (State, error) { // Make a new runtime based on the given configuration // Sets up containers/storage, state store, OCI runtime -func makeRuntime(runtime *Runtime) (retErr error) { +func makeRuntime(ctx context.Context, runtime *Runtime) (retErr error) { // Find a working conmon binary cPath, err := runtime.config.FindConmon() if err != nil { @@ -344,13 +341,6 @@ func makeRuntime(runtime *Runtime) (retErr error) { } runtime.conmonPath = cPath - if runtime.noStore && runtime.doReset { - return fmt.Errorf("cannot perform system reset if runtime is not creating a store: %w", define.ErrInvalidArg) - } - if runtime.doReset && runtime.doRenumber { - return fmt.Errorf("cannot perform system reset while renumbering locks: %w", define.ErrInvalidArg) - } - if runtime.config.Engine.StaticDir == "" { runtime.config.Engine.StaticDir = filepath.Join(runtime.storageConfig.GraphRoot, "libpod") runtime.storageSet.StaticDirSet = true @@ -403,6 +393,15 @@ func makeRuntime(runtime *Runtime) (retErr error) { runtime.mergeDBConfig(dbConfig) unified, _ := cgroups.IsCgroup2UnifiedMode() + // DELETE ON RHEL9 + if !unified { + _, ok := os.LookupEnv("PODMAN_IGNORE_CGROUPSV1_WARNING") + if !ok { + logrus.Warn("Using cgroups-v1 which is deprecated in favor of cgroups-v2 with Podman v5 and will be removed in a future version. Set environment variable `PODMAN_IGNORE_CGROUPSV1_WARNING` to hide this warning.") + } + } + // DELETE ON RHEL9 + if unified && rootless.IsRootless() && !systemd.IsSystemdSessionValid(rootless.GetRootlessUID()) { // If user is rootless and XDG_RUNTIME_DIR is found, podman will not proceed with /tmp directory // it will try to use existing XDG_RUNTIME_DIR @@ -456,8 +455,6 @@ func makeRuntime(runtime *Runtime) (retErr error) { var store storage.Store if needsUserns { logrus.Debug("Not configuring container store") - } else if runtime.noStore { - logrus.Debug("No store required. Not opening container store.") } else if err := runtime.configureStore(); err != nil { // Make a best-effort attempt to clean up if performing a // storage reset. @@ -608,7 +605,7 @@ func makeRuntime(runtime *Runtime) (retErr error) { if became { // Check if the pause process was created. If it was created, then // move it to its own systemd scope. - utils.MovePauseProcessToScope(pausePid) + systemdCommon.MovePauseProcessToScope(pausePid) // gocritic complains because defer is not run on os.Exit() // However this is fine because the lock is released anyway when the process exits @@ -632,6 +629,13 @@ func makeRuntime(runtime *Runtime) (retErr error) { return err } + // Mark the runtime as valid - ready to be used, cannot be modified + // further. + // Need to do this *before* refresh as we can remove containers there. + // Should not be a big deal as we don't return it to users until after + // refresh runs. + runtime.valid = true + // If we need to refresh the state, do it now - things are guaranteed to // be set up by now. if doRefresh { @@ -642,17 +646,13 @@ func makeRuntime(runtime *Runtime) (retErr error) { } } - if err2 := runtime.refresh(runtimeAliveFile); err2 != nil { + if err2 := runtime.refresh(ctx, runtimeAliveFile); err2 != nil { return err2 } } runtime.startWorker() - // Mark the runtime as valid - ready to be used, cannot be modified - // further - runtime.valid = true - return nil } @@ -696,15 +696,16 @@ func (r *Runtime) GetConfig() (*config.Config, error) { // libimageEventsMap translates a libimage event type to a libpod event status. var libimageEventsMap = map[libimage.EventType]events.Status{ - libimage.EventTypeImagePull: events.Pull, - libimage.EventTypeImagePush: events.Push, - libimage.EventTypeImageRemove: events.Remove, - libimage.EventTypeImageLoad: events.LoadFromArchive, - libimage.EventTypeImageSave: events.Save, - libimage.EventTypeImageTag: events.Tag, - libimage.EventTypeImageUntag: events.Untag, - libimage.EventTypeImageMount: events.Mount, - libimage.EventTypeImageUnmount: events.Unmount, + libimage.EventTypeImagePull: events.Pull, + libimage.EventTypeImagePullError: events.PullError, + libimage.EventTypeImagePush: events.Push, + libimage.EventTypeImageRemove: events.Remove, + libimage.EventTypeImageLoad: events.LoadFromArchive, + libimage.EventTypeImageSave: events.Save, + libimage.EventTypeImageTag: events.Tag, + libimage.EventTypeImageUntag: events.Untag, + libimage.EventTypeImageMount: events.Mount, + libimage.EventTypeImageUnmount: events.Unmount, } // libimageEvents spawns a goroutine which will listen for events on @@ -736,6 +737,9 @@ func (r *Runtime) libimageEvents() { Time: libimageEvent.Time, Type: events.Image, } + if libimageEvent.Error != nil { + e.Error = libimageEvent.Error.Error() + } if err := r.eventer.Write(e); err != nil { logrus.Errorf("Unable to write image event: %q", err) } @@ -822,7 +826,7 @@ func (r *Runtime) Shutdown(force bool) error { // Reconfigures the runtime after a reboot // Refreshes the state, recreating temporary files // Does not check validity as the runtime is not valid until after this has run -func (r *Runtime) refresh(alivePath string) error { +func (r *Runtime) refresh(ctx context.Context, alivePath string) error { logrus.Debugf("Podman detected system restart - performing state refresh") // Clear state of database if not running in container @@ -859,6 +863,22 @@ func (r *Runtime) refresh(alivePath string) error { if err := ctr.refresh(); err != nil { logrus.Errorf("Refreshing container %s: %v", ctr.ID(), err) } + // This is the only place it's safe to use ctr.state.State unlocked + // We're holding the alive lock, guaranteed to be the only Libpod on the system right now. + if (ctr.AutoRemove() && ctr.state.State == define.ContainerStateExited) || ctr.state.State == define.ContainerStateRemoving { + opts := ctrRmOpts{ + // Don't force-remove, we're supposed to be fresh off a reboot + // If we have to force something is seriously wrong + Force: false, + RemoveVolume: true, + } + // This container should have autoremoved before the + // reboot but did not. + // Get rid of it. + if _, _, err := r.removeContainer(ctx, ctr, opts); err != nil { + logrus.Errorf("Unable to remove container %s which should have autoremoved: %v", ctr.ID(), err) + } + } } for _, pod := range pods { if err := pod.refresh(); err != nil { @@ -1089,7 +1109,7 @@ func (r *Runtime) reloadContainersConf() error { // reloadStorageConf reloads the storage.conf func (r *Runtime) reloadStorageConf() error { - configFile, err := storage.DefaultConfigFile(rootless.IsRootless()) + configFile, err := storage.DefaultConfigFile() if err != nil { return err } diff --git a/vendor/github.com/containers/podman/v4/libpod/runtime_cstorage.go b/vendor/github.com/containers/podman/v5/libpod/runtime_cstorage.go similarity index 98% rename from vendor/github.com/containers/podman/v4/libpod/runtime_cstorage.go rename to vendor/github.com/containers/podman/v5/libpod/runtime_cstorage.go index 86b6d065d..21078999b 100644 --- a/vendor/github.com/containers/podman/v4/libpod/runtime_cstorage.go +++ b/vendor/github.com/containers/podman/v5/libpod/runtime_cstorage.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -8,7 +7,7 @@ import ( "fmt" "time" - "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v5/libpod/define" "github.com/containers/storage" "github.com/sirupsen/logrus" ) diff --git a/vendor/github.com/containers/podman/v4/libpod/runtime_ctr.go b/vendor/github.com/containers/podman/v5/libpod/runtime_ctr.go similarity index 98% rename from vendor/github.com/containers/podman/v4/libpod/runtime_ctr.go rename to vendor/github.com/containers/podman/v5/libpod/runtime_ctr.go index 9b13de5af..9501969d9 100644 --- a/vendor/github.com/containers/podman/v4/libpod/runtime_ctr.go +++ b/vendor/github.com/containers/podman/v5/libpod/runtime_ctr.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -18,20 +17,20 @@ import ( "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/cgroups" "github.com/containers/common/pkg/config" - cutil "github.com/containers/common/pkg/util" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/libpod/events" - "github.com/containers/podman/v4/libpod/shutdown" - "github.com/containers/podman/v4/pkg/domain/entities/reports" - "github.com/containers/podman/v4/pkg/rootless" - "github.com/containers/podman/v4/pkg/specgen" - "github.com/containers/podman/v4/pkg/util" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/libpod/events" + "github.com/containers/podman/v5/libpod/shutdown" + "github.com/containers/podman/v5/pkg/domain/entities/reports" + "github.com/containers/podman/v5/pkg/rootless" + "github.com/containers/podman/v5/pkg/specgen" + "github.com/containers/podman/v5/pkg/util" "github.com/containers/storage" "github.com/containers/storage/pkg/stringid" "github.com/docker/go-units" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" ) // Contains the public Runtime API for containers @@ -257,7 +256,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai for _, opts := range ctr.config.Networks { if opts.InterfaceName != "" { // check that no name is assigned to more than network - if cutil.StringInSlice(opts.InterfaceName, usedIfNames) { + if slices.Contains(usedIfNames, opts.InterfaceName) { return nil, fmt.Errorf("network interface name %q is already assigned to another network", opts.InterfaceName) } usedIfNames = append(usedIfNames, opts.InterfaceName) @@ -265,15 +264,22 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai } i := 0 for nameOrID, opts := range ctr.config.Networks { - netName, err := r.normalizeNetworkName(nameOrID) + netName, nicName, err := r.normalizeNetworkName(nameOrID) if err != nil { return nil, err } - // assign interface name if empty + + // check whether interface is to be named as the network_interface + // when name left unspecified + if opts.InterfaceName == "" { + opts.InterfaceName = nicName + } + + // assign default interface name if empty if opts.InterfaceName == "" { for i < 100000 { ifName := fmt.Sprintf("eth%d", i) - if !cutil.StringInSlice(ifName, usedIfNames) { + if !slices.Contains(usedIfNames, ifName) { opts.InterfaceName = ifName usedIfNames = append(usedIfNames, ifName) break diff --git a/vendor/github.com/containers/podman/v4/libpod/runtime_ctr_freebsd.go b/vendor/github.com/containers/podman/v5/libpod/runtime_ctr_freebsd.go similarity index 78% rename from vendor/github.com/containers/podman/v4/libpod/runtime_ctr_freebsd.go rename to vendor/github.com/containers/podman/v5/libpod/runtime_ctr_freebsd.go index 0f6f1b025..0f6b93938 100644 --- a/vendor/github.com/containers/podman/v4/libpod/runtime_ctr_freebsd.go +++ b/vendor/github.com/containers/podman/v5/libpod/runtime_ctr_freebsd.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod diff --git a/vendor/github.com/containers/podman/v4/libpod/runtime_ctr_linux.go b/vendor/github.com/containers/podman/v5/libpod/runtime_ctr_linux.go similarity index 78% rename from vendor/github.com/containers/podman/v4/libpod/runtime_ctr_linux.go rename to vendor/github.com/containers/podman/v5/libpod/runtime_ctr_linux.go index 09082feba..d29b73516 100644 --- a/vendor/github.com/containers/podman/v4/libpod/runtime_ctr_linux.go +++ b/vendor/github.com/containers/podman/v5/libpod/runtime_ctr_linux.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod diff --git a/vendor/github.com/containers/podman/v4/libpod/runtime_img.go b/vendor/github.com/containers/podman/v5/libpod/runtime_img.go similarity index 96% rename from vendor/github.com/containers/podman/v4/libpod/runtime_img.go rename to vendor/github.com/containers/podman/v5/libpod/runtime_img.go index c07e8cbaa..db70c4c10 100644 --- a/vendor/github.com/containers/podman/v4/libpod/runtime_img.go +++ b/vendor/github.com/containers/podman/v5/libpod/runtime_img.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -14,9 +13,9 @@ import ( "github.com/containers/buildah/imagebuildah" "github.com/containers/common/libimage" "github.com/containers/image/v5/docker/reference" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/libpod/events" - "github.com/containers/podman/v4/pkg/util" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/libpod/events" + "github.com/containers/podman/v5/pkg/util" "github.com/sirupsen/logrus" ) diff --git a/vendor/github.com/containers/podman/v4/libpod/runtime_migrate_linux.go b/vendor/github.com/containers/podman/v5/libpod/runtime_migrate_linux.go similarity index 95% rename from vendor/github.com/containers/podman/v4/libpod/runtime_migrate_linux.go rename to vendor/github.com/containers/podman/v5/libpod/runtime_migrate_linux.go index 8a3793b8d..002acb09a 100644 --- a/vendor/github.com/containers/podman/v4/libpod/runtime_migrate_linux.go +++ b/vendor/github.com/containers/podman/v5/libpod/runtime_migrate_linux.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -10,9 +9,9 @@ import ( "strconv" "syscall" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/rootless" - "github.com/containers/podman/v4/pkg/util" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/rootless" + "github.com/containers/podman/v5/pkg/util" "github.com/sirupsen/logrus" ) diff --git a/vendor/github.com/containers/podman/v4/libpod/runtime_migrate_unsupported.go b/vendor/github.com/containers/podman/v5/libpod/runtime_migrate_unsupported.go similarity index 92% rename from vendor/github.com/containers/podman/v4/libpod/runtime_migrate_unsupported.go rename to vendor/github.com/containers/podman/v5/libpod/runtime_migrate_unsupported.go index af6e05c60..9a8c2fda7 100644 --- a/vendor/github.com/containers/podman/v4/libpod/runtime_migrate_unsupported.go +++ b/vendor/github.com/containers/podman/v5/libpod/runtime_migrate_unsupported.go @@ -1,5 +1,4 @@ //go:build !remote && !linux -// +build !remote,!linux package libpod diff --git a/vendor/github.com/containers/podman/v4/libpod/runtime_pod.go b/vendor/github.com/containers/podman/v5/libpod/runtime_pod.go similarity index 96% rename from vendor/github.com/containers/podman/v4/libpod/runtime_pod.go rename to vendor/github.com/containers/podman/v5/libpod/runtime_pod.go index 3fe88268d..86b0780df 100644 --- a/vendor/github.com/containers/podman/v4/libpod/runtime_pod.go +++ b/vendor/github.com/containers/podman/v5/libpod/runtime_pod.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -9,8 +8,8 @@ import ( "fmt" "time" - "github.com/containers/common/pkg/util" - "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v5/libpod/define" + "golang.org/x/exp/slices" ) // Contains the public Runtime API for pods @@ -146,7 +145,7 @@ func (r *Runtime) GetRunningPods() ([]*Pod, error) { } // Assemble running pods for _, c := range containers { - if !util.StringInSlice(c.PodID(), pods) { + if !slices.Contains(pods, c.PodID()) { pods = append(pods, c.PodID()) pod, err := r.GetPod(c.PodID()) if err != nil { diff --git a/vendor/github.com/containers/podman/v4/libpod/runtime_pod_common.go b/vendor/github.com/containers/podman/v5/libpod/runtime_pod_common.go similarity index 97% rename from vendor/github.com/containers/podman/v4/libpod/runtime_pod_common.go rename to vendor/github.com/containers/podman/v5/libpod/runtime_pod_common.go index 2aa9ea320..802026f90 100644 --- a/vendor/github.com/containers/podman/v4/libpod/runtime_pod_common.go +++ b/vendor/github.com/containers/podman/v5/libpod/runtime_pod_common.go @@ -1,6 +1,4 @@ //go:build !remote && (linux || freebsd) -// +build !remote -// +build linux freebsd package libpod @@ -9,9 +7,9 @@ import ( "errors" "fmt" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/libpod/events" - "github.com/containers/podman/v4/pkg/specgen" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/libpod/events" + "github.com/containers/podman/v5/pkg/specgen" "github.com/hashicorp/go-multierror" "github.com/sirupsen/logrus" ) diff --git a/vendor/github.com/containers/podman/v4/libpod/runtime_pod_freebsd.go b/vendor/github.com/containers/podman/v5/libpod/runtime_pod_freebsd.go similarity index 93% rename from vendor/github.com/containers/podman/v4/libpod/runtime_pod_freebsd.go rename to vendor/github.com/containers/podman/v5/libpod/runtime_pod_freebsd.go index 67d02385e..b78b9ec16 100644 --- a/vendor/github.com/containers/podman/v4/libpod/runtime_pod_freebsd.go +++ b/vendor/github.com/containers/podman/v5/libpod/runtime_pod_freebsd.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod diff --git a/vendor/github.com/containers/podman/v4/libpod/runtime_pod_linux.go b/vendor/github.com/containers/podman/v5/libpod/runtime_pod_linux.go similarity index 95% rename from vendor/github.com/containers/podman/v4/libpod/runtime_pod_linux.go rename to vendor/github.com/containers/podman/v5/libpod/runtime_pod_linux.go index d67a624d9..b0f39b101 100644 --- a/vendor/github.com/containers/podman/v4/libpod/runtime_pod_linux.go +++ b/vendor/github.com/containers/podman/v5/libpod/runtime_pod_linux.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -11,9 +10,8 @@ import ( "github.com/containers/common/pkg/cgroups" "github.com/containers/common/pkg/config" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/rootless" - "github.com/containers/podman/v4/utils" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/rootless" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" ) @@ -98,7 +96,7 @@ func (p *Pod) removePodCgroup() error { } logrus.Debugf("Removing pod cgroup %s", p.state.CgroupPath) - cgroup, err := utils.GetOwnCgroup() + cgroup, err := cgroups.GetOwnCgroup() if err != nil { return err } @@ -107,7 +105,7 @@ func (p *Pod) removePodCgroup() error { // current process out of it before the cgroup is destroyed. if isSubDir(cgroup, string(filepath.Separator)+p.state.CgroupPath) { parent := path.Dir(p.state.CgroupPath) - if err := utils.MoveUnderCgroup(parent, "cleanup", nil); err != nil { + if err := cgroups.MoveUnderCgroup(parent, "cleanup", nil); err != nil { return err } } diff --git a/vendor/github.com/containers/podman/v4/libpod/runtime_pre_go1.20.go b/vendor/github.com/containers/podman/v5/libpod/runtime_pre_go1.20.go similarity index 94% rename from vendor/github.com/containers/podman/v4/libpod/runtime_pre_go1.20.go rename to vendor/github.com/containers/podman/v5/libpod/runtime_pre_go1.20.go index 3dcbe1808..94195f35c 100644 --- a/vendor/github.com/containers/podman/v4/libpod/runtime_pre_go1.20.go +++ b/vendor/github.com/containers/podman/v5/libpod/runtime_pre_go1.20.go @@ -1,7 +1,6 @@ // In go 1.20 and later, the global RNG is automatically initialized. // Ref: https://pkg.go.dev/math/rand@go1.20#Seed //go:build !go1.20 && !remote -// +build !go1.20,!remote package libpod diff --git a/vendor/github.com/containers/podman/v4/libpod/runtime_renumber.go b/vendor/github.com/containers/podman/v5/libpod/runtime_renumber.go similarity index 95% rename from vendor/github.com/containers/podman/v4/libpod/runtime_renumber.go rename to vendor/github.com/containers/podman/v5/libpod/runtime_renumber.go index f4d6c4319..953e83242 100644 --- a/vendor/github.com/containers/podman/v4/libpod/runtime_renumber.go +++ b/vendor/github.com/containers/podman/v5/libpod/runtime_renumber.go @@ -1,13 +1,12 @@ //go:build !remote -// +build !remote package libpod import ( "fmt" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/libpod/events" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/libpod/events" ) // RenumberLocks reassigns lock numbers for all containers and pods in the diff --git a/vendor/github.com/containers/podman/v4/libpod/runtime_volume.go b/vendor/github.com/containers/podman/v5/libpod/runtime_volume.go similarity index 95% rename from vendor/github.com/containers/podman/v4/libpod/runtime_volume.go rename to vendor/github.com/containers/podman/v5/libpod/runtime_volume.go index 292432b7b..6066145f0 100644 --- a/vendor/github.com/containers/podman/v4/libpod/runtime_volume.go +++ b/vendor/github.com/containers/podman/v5/libpod/runtime_volume.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -7,9 +6,9 @@ import ( "context" "errors" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/libpod/events" - "github.com/containers/podman/v4/pkg/domain/entities/reports" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/libpod/events" + "github.com/containers/podman/v5/pkg/domain/entities/reports" ) // Contains the public Runtime API for volumes diff --git a/vendor/github.com/containers/podman/v4/libpod/runtime_volume_common.go b/vendor/github.com/containers/podman/v5/libpod/runtime_volume_common.go similarity index 98% rename from vendor/github.com/containers/podman/v4/libpod/runtime_volume_common.go rename to vendor/github.com/containers/podman/v5/libpod/runtime_volume_common.go index a254b599d..2f982fce7 100644 --- a/vendor/github.com/containers/podman/v4/libpod/runtime_volume_common.go +++ b/vendor/github.com/containers/podman/v5/libpod/runtime_volume_common.go @@ -1,6 +1,4 @@ //go:build !remote && (linux || freebsd) -// +build !remote -// +build linux freebsd package libpod @@ -13,9 +11,9 @@ import ( "strings" "time" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/libpod/events" - volplugin "github.com/containers/podman/v4/libpod/plugin" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/libpod/events" + volplugin "github.com/containers/podman/v5/libpod/plugin" "github.com/containers/storage" "github.com/containers/storage/drivers/quota" "github.com/containers/storage/pkg/idtools" diff --git a/vendor/github.com/containers/podman/v4/libpod/runtime_worker.go b/vendor/github.com/containers/podman/v5/libpod/runtime_worker.go similarity index 94% rename from vendor/github.com/containers/podman/v4/libpod/runtime_worker.go rename to vendor/github.com/containers/podman/v5/libpod/runtime_worker.go index 0851d74a9..20e1f4e7d 100644 --- a/vendor/github.com/containers/podman/v4/libpod/runtime_worker.go +++ b/vendor/github.com/containers/podman/v5/libpod/runtime_worker.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod diff --git a/vendor/github.com/containers/podman/v4/libpod/service.go b/vendor/github.com/containers/podman/v5/libpod/service.go similarity index 99% rename from vendor/github.com/containers/podman/v4/libpod/service.go rename to vendor/github.com/containers/podman/v5/libpod/service.go index fdc5f048e..e5cbb7b53 100644 --- a/vendor/github.com/containers/podman/v4/libpod/service.go +++ b/vendor/github.com/containers/podman/v5/libpod/service.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -8,7 +7,7 @@ import ( "errors" "fmt" - "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v5/libpod/define" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) diff --git a/vendor/github.com/containers/podman/v4/libpod/shutdown/handler.go b/vendor/github.com/containers/podman/v5/libpod/shutdown/handler.go similarity index 100% rename from vendor/github.com/containers/podman/v4/libpod/shutdown/handler.go rename to vendor/github.com/containers/podman/v5/libpod/shutdown/handler.go diff --git a/vendor/github.com/containers/podman/v4/libpod/sqlite_state.go b/vendor/github.com/containers/podman/v5/libpod/sqlite_state.go similarity index 99% rename from vendor/github.com/containers/podman/v4/libpod/sqlite_state.go rename to vendor/github.com/containers/podman/v5/libpod/sqlite_state.go index b1d366aea..9a9f793e8 100644 --- a/vendor/github.com/containers/podman/v4/libpod/sqlite_state.go +++ b/vendor/github.com/containers/podman/v5/libpod/sqlite_state.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -15,8 +14,7 @@ import ( "time" "github.com/containers/common/libnetwork/types" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/rootless" + "github.com/containers/podman/v5/libpod/define" "github.com/containers/storage" "github.com/sirupsen/logrus" @@ -42,7 +40,7 @@ const ( sqliteOptionForeignKeys = "&_foreign_keys=1" // Make sure that transactions happen exclusively. sqliteOptionTXLock = "&_txlock=exclusive" - // Make sure busy timeout is set to high value to keep retying when the db is locked. + // Make sure busy timeout is set to high value to keep retrying when the db is locked. // Timeout is in ms, so set it to 100s to have enough time to retry the operations. sqliteOptionBusyTimeout = "&_busy_timeout=100000" @@ -304,7 +302,7 @@ func (s *SQLiteState) ValidateDBConfig(runtime *Runtime) (defErr error) { return define.ErrDBClosed } - storeOpts, err := storage.DefaultStoreOptions(rootless.IsRootless(), rootless.GetRootlessUID()) + storeOpts, err := storage.DefaultStoreOptions() if err != nil { return err } @@ -961,8 +959,7 @@ func (s *SQLiteState) GetContainerExitCode(id string) (int32, error) { } row := s.conn.QueryRow("SELECT ExitCode FROM ContainerExitCode WHERE ID=?;", id) - - var exitCode int32 + var exitCode int32 = -1 if err := row.Scan(&exitCode); err != nil { if errors.Is(err, sql.ErrNoRows) { return -1, fmt.Errorf("getting exit code of container %s from DB: %w", id, define.ErrNoSuchExitCode) diff --git a/vendor/github.com/containers/podman/v4/libpod/sqlite_state_internal.go b/vendor/github.com/containers/podman/v5/libpod/sqlite_state_internal.go similarity index 99% rename from vendor/github.com/containers/podman/v4/libpod/sqlite_state_internal.go rename to vendor/github.com/containers/podman/v5/libpod/sqlite_state_internal.go index 1f2c88dee..7aa2048ed 100644 --- a/vendor/github.com/containers/podman/v4/libpod/sqlite_state_internal.go +++ b/vendor/github.com/containers/podman/v5/libpod/sqlite_state_internal.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -11,7 +10,7 @@ import ( "strings" "github.com/containers/common/libnetwork/types" - "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v5/libpod/define" "github.com/sirupsen/logrus" // SQLite backend for database/sql diff --git a/vendor/github.com/containers/podman/v4/libpod/state.go b/vendor/github.com/containers/podman/v5/libpod/state.go similarity index 99% rename from vendor/github.com/containers/podman/v4/libpod/state.go rename to vendor/github.com/containers/podman/v5/libpod/state.go index b84ee06d5..ae9290bc5 100644 --- a/vendor/github.com/containers/podman/v4/libpod/state.go +++ b/vendor/github.com/containers/podman/v5/libpod/state.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod diff --git a/vendor/github.com/containers/podman/v4/libpod/stats_common.go b/vendor/github.com/containers/podman/v5/libpod/stats_common.go similarity index 91% rename from vendor/github.com/containers/podman/v4/libpod/stats_common.go rename to vendor/github.com/containers/podman/v5/libpod/stats_common.go index 280aa0606..9d301a69d 100644 --- a/vendor/github.com/containers/podman/v4/libpod/stats_common.go +++ b/vendor/github.com/containers/podman/v5/libpod/stats_common.go @@ -1,13 +1,11 @@ //go:build !remote && (linux || freebsd) -// +build !remote -// +build linux freebsd package libpod import ( "fmt" - "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v5/libpod/define" ) // GetContainerStats gets the running stats for a given container. @@ -43,6 +41,12 @@ func (c *Container) GetContainerStats(previousStats *define.ContainerStats) (*de } } + netStats, err := getContainerNetIO(c) + if err != nil { + return nil, err + } + stats.Network = netStats + if err := c.getPlatformContainerStats(stats, previousStats); err != nil { return nil, err } diff --git a/vendor/github.com/containers/podman/v4/libpod/stats_freebsd.go b/vendor/github.com/containers/podman/v5/libpod/stats_freebsd.go similarity index 91% rename from vendor/github.com/containers/podman/v4/libpod/stats_freebsd.go rename to vendor/github.com/containers/podman/v5/libpod/stats_freebsd.go index 22141906c..9dbb8b12f 100644 --- a/vendor/github.com/containers/podman/v4/libpod/stats_freebsd.go +++ b/vendor/github.com/containers/podman/v5/libpod/stats_freebsd.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -8,8 +7,8 @@ import ( "math" "time" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/rctl" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/rctl" "github.com/containers/storage/pkg/system" "github.com/sirupsen/logrus" ) @@ -81,20 +80,6 @@ func (c *Container) getPlatformContainerStats(stats *define.ContainerStats, prev stats.MemLimit = c.getMemLimit() stats.SystemNano = now - netStats, err := getContainerNetIO(c) - if err != nil { - return err - } - - // Handle case where the container is not in a network namespace - if netStats != nil { - stats.NetInput = netStats.RxBytes - stats.NetOutput = netStats.TxBytes - } else { - stats.NetInput = 0 - stats.NetOutput = 0 - } - return nil } diff --git a/vendor/github.com/containers/podman/v4/libpod/stats_linux.go b/vendor/github.com/containers/podman/v5/libpod/stats_linux.go similarity index 92% rename from vendor/github.com/containers/podman/v4/libpod/stats_linux.go rename to vendor/github.com/containers/podman/v5/libpod/stats_linux.go index 1254e4867..19ce554cd 100644 --- a/vendor/github.com/containers/podman/v4/libpod/stats_linux.go +++ b/vendor/github.com/containers/podman/v5/libpod/stats_linux.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -12,7 +11,7 @@ import ( runccgroup "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/containers/common/pkg/cgroups" - "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v5/libpod/define" "golang.org/x/sys/unix" ) @@ -40,10 +39,6 @@ func (c *Container) getPlatformContainerStats(stats *define.ContainerStats, prev return fmt.Errorf("unable to obtain cgroup stats: %w", err) } conState := c.state.State - netStats, err := getContainerNetIO(c) - if err != nil { - return err - } // If the current total usage in the cgroup is less than what was previously // recorded then it means the container was restarted and runs in a new cgroup @@ -70,14 +65,6 @@ func (c *Container) getPlatformContainerStats(stats *define.ContainerStats, prev stats.CPUSystemNano = cgroupStats.CpuStats.CpuUsage.UsageInKernelmode stats.SystemNano = now stats.PerCPU = cgroupStats.CpuStats.CpuUsage.PercpuUsage - // Handle case where the container is not in a network namespace - if netStats != nil { - stats.NetInput = netStats.RxBytes - stats.NetOutput = netStats.TxBytes - } else { - stats.NetInput = 0 - stats.NetOutput = 0 - } return nil } diff --git a/vendor/github.com/containers/podman/v4/libpod/storage.go b/vendor/github.com/containers/podman/v5/libpod/storage.go similarity index 95% rename from vendor/github.com/containers/podman/v4/libpod/storage.go rename to vendor/github.com/containers/podman/v5/libpod/storage.go index 52d5e1e8f..b9db7921f 100644 --- a/vendor/github.com/containers/podman/v4/libpod/storage.go +++ b/vendor/github.com/containers/podman/v5/libpod/storage.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -10,7 +9,7 @@ import ( istorage "github.com/containers/image/v5/storage" "github.com/containers/image/v5/types" - "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v5/libpod/define" "github.com/containers/storage" "github.com/containers/storage/pkg/idtools" v1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -66,21 +65,17 @@ func (metadata *RuntimeContainerMetadata) SetMountLabel(mountLabel string) { metadata.MountLabel = mountLabel } -// CreateContainerStorage creates the storage end of things. We already have the container spec created +// CreateContainerStorage creates the storage end of things. We already have the container spec created. +// imageID and imageName must both be either empty or non-empty. // TO-DO We should be passing in an Image object in the future. func (r *storageService) CreateContainerStorage(ctx context.Context, systemContext *types.SystemContext, imageName, imageID, containerName, containerID string, options storage.ContainerOptions) (_ ContainerInfo, retErr error) { var imageConfig *v1.Image - if imageName != "" { - var ref types.ImageReference + if imageID != "" { if containerName == "" { return ContainerInfo{}, define.ErrEmptyID } // Check if we have the specified image. - ref, err := istorage.Transport.ParseStoreReference(r.store, imageID) - if err != nil { - return ContainerInfo{}, err - } - _, img, err := istorage.ResolveReference(ref) + ref, err := istorage.Transport.NewStoreReference(r.store, nil, imageID) if err != nil { return ContainerInfo{}, err } @@ -96,12 +91,6 @@ func (r *storageService) CreateContainerStorage(ctx context.Context, systemConte if err != nil { return ContainerInfo{}, err } - - // Update the image name and ID. - if imageName == "" && len(img.Names) > 0 { - imageName = img.Names[0] - } - imageID = img.ID } // Build metadata to store with the container. diff --git a/vendor/github.com/containers/podman/v4/libpod/util.go b/vendor/github.com/containers/podman/v5/libpod/util.go similarity index 95% rename from vendor/github.com/containers/podman/v4/libpod/util.go rename to vendor/github.com/containers/podman/v5/libpod/util.go index ed7c1260f..4bd4dd62c 100644 --- a/vendor/github.com/containers/podman/v4/libpod/util.go +++ b/vendor/github.com/containers/podman/v5/libpod/util.go @@ -1,11 +1,11 @@ //go:build !remote -// +build !remote package libpod import ( "bufio" "encoding/binary" + "errors" "fmt" "io" "net/http" @@ -18,11 +18,12 @@ import ( "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/config" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/api/handlers/utils/apiutil" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/api/handlers/utils/apiutil" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/selinux/go-selinux/label" "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) // FuncTimer helps measure the execution time of a function @@ -216,7 +217,7 @@ func writeHijackHeader(r *http.Request, conn io.Writer, tty bool) { // Upgraded fmt.Fprintf(conn, "HTTP/1.1 101 UPGRADED\r\nContent-Type: %s\r\nConnection: Upgrade\r\nUpgrade: %s\r\n\r\n", - proto, header) + header, proto) } } @@ -234,8 +235,12 @@ func makeInspectPorts(bindings []types.PortMapping, expose map[uint16][]string) for i := uint16(0); i < port.Range; i++ { key := fmt.Sprintf("%d/%s", port.ContainerPort+i, protocol) hostPorts := portBindings[key] + var hostIP = port.HostIP + if len(port.HostIP) == 0 { + hostIP = "0.0.0.0" + } hostPorts = append(hostPorts, define.InspectHostPort{ - HostIP: port.HostIP, + HostIP: hostIP, HostPort: strconv.FormatUint(uint64(port.HostPort+i), 10), }) portBindings[key] = hostPorts @@ -273,6 +278,10 @@ func writeStringToPath(path, contents, mountLabel string, uid, gid int) error { } // Relabel runDirResolv for the container if err := label.Relabel(path, mountLabel, false); err != nil { + if errors.Is(err, unix.ENOTSUP) { + logrus.Debugf("Labeling not supported on %q", path) + return nil + } return err } diff --git a/vendor/github.com/containers/podman/v4/libpod/util_freebsd.go b/vendor/github.com/containers/podman/v5/libpod/util_freebsd.go similarity index 98% rename from vendor/github.com/containers/podman/v4/libpod/util_freebsd.go rename to vendor/github.com/containers/podman/v5/libpod/util_freebsd.go index 20914b531..c732a93ec 100644 --- a/vendor/github.com/containers/podman/v4/libpod/util_freebsd.go +++ b/vendor/github.com/containers/podman/v5/libpod/util_freebsd.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod diff --git a/vendor/github.com/containers/podman/v4/libpod/util_linux.go b/vendor/github.com/containers/podman/v5/libpod/util_linux.go similarity index 97% rename from vendor/github.com/containers/podman/v4/libpod/util_linux.go rename to vendor/github.com/containers/podman/v5/libpod/util_linux.go index ac5fdeeb1..1d8347592 100644 --- a/vendor/github.com/containers/podman/v4/libpod/util_linux.go +++ b/vendor/github.com/containers/podman/v5/libpod/util_linux.go @@ -1,9 +1,9 @@ //go:build !remote -// +build !remote package libpod import ( + "errors" "fmt" "os" "path/filepath" @@ -11,8 +11,8 @@ import ( "syscall" "github.com/containers/common/pkg/cgroups" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/rootless" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/rootless" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/selinux/go-selinux/label" "github.com/sirupsen/logrus" @@ -146,7 +146,7 @@ func LabelVolumePath(path, mountLabel string) error { } if err := lvpRelabel(path, mountLabel, true); err != nil { - if err == syscall.ENOTSUP { + if errors.Is(err, unix.ENOTSUP) { logrus.Debugf("Labeling not supported on %q", path) } else { return fmt.Errorf("setting selinux label for %s to %q as shared: %w", path, mountLabel, err) diff --git a/vendor/github.com/containers/podman/v4/libpod/volume.go b/vendor/github.com/containers/podman/v5/libpod/volume.go similarity index 98% rename from vendor/github.com/containers/podman/v4/libpod/volume.go rename to vendor/github.com/containers/podman/v5/libpod/volume.go index 0d8a17963..4b5a224f8 100644 --- a/vendor/github.com/containers/podman/v4/libpod/volume.go +++ b/vendor/github.com/containers/podman/v5/libpod/volume.go @@ -1,14 +1,13 @@ //go:build !remote -// +build !remote package libpod import ( "time" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/libpod/lock" - "github.com/containers/podman/v4/libpod/plugin" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/libpod/lock" + "github.com/containers/podman/v5/libpod/plugin" "github.com/containers/storage/pkg/directory" ) diff --git a/vendor/github.com/containers/podman/v4/libpod/volume_inspect.go b/vendor/github.com/containers/podman/v5/libpod/volume_inspect.go similarity index 96% rename from vendor/github.com/containers/podman/v4/libpod/volume_inspect.go rename to vendor/github.com/containers/podman/v5/libpod/volume_inspect.go index 991634914..f4a3cc889 100644 --- a/vendor/github.com/containers/podman/v4/libpod/volume_inspect.go +++ b/vendor/github.com/containers/podman/v5/libpod/volume_inspect.go @@ -1,12 +1,11 @@ //go:build !remote -// +build !remote package libpod import ( "fmt" - "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v5/libpod/define" pluginapi "github.com/docker/go-plugins-helpers/volume" "github.com/sirupsen/logrus" ) diff --git a/vendor/github.com/containers/podman/v4/libpod/volume_internal.go b/vendor/github.com/containers/podman/v5/libpod/volume_internal.go similarity index 97% rename from vendor/github.com/containers/podman/v4/libpod/volume_internal.go rename to vendor/github.com/containers/podman/v5/libpod/volume_internal.go index 7b86c2292..3d308c862 100644 --- a/vendor/github.com/containers/podman/v4/libpod/volume_internal.go +++ b/vendor/github.com/containers/podman/v5/libpod/volume_internal.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod @@ -8,7 +7,7 @@ import ( "os" "path/filepath" - "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v5/libpod/define" ) // Creates a new volume diff --git a/vendor/github.com/containers/podman/v4/libpod/volume_internal_common.go b/vendor/github.com/containers/podman/v5/libpod/volume_internal_common.go similarity index 98% rename from vendor/github.com/containers/podman/v4/libpod/volume_internal_common.go rename to vendor/github.com/containers/podman/v5/libpod/volume_internal_common.go index 06c9dd53a..c9a20820f 100644 --- a/vendor/github.com/containers/podman/v4/libpod/volume_internal_common.go +++ b/vendor/github.com/containers/podman/v5/libpod/volume_internal_common.go @@ -1,6 +1,4 @@ //go:build !remote && (linux || freebsd) -// +build !remote -// +build linux freebsd package libpod @@ -10,7 +8,7 @@ import ( "os/exec" "strings" - "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v5/libpod/define" pluginapi "github.com/docker/go-plugins-helpers/volume" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" diff --git a/vendor/github.com/containers/podman/v4/libpod/volume_internal_freebsd.go b/vendor/github.com/containers/podman/v5/libpod/volume_internal_freebsd.go similarity index 90% rename from vendor/github.com/containers/podman/v4/libpod/volume_internal_freebsd.go rename to vendor/github.com/containers/podman/v5/libpod/volume_internal_freebsd.go index d0d9df77e..7e16429ea 100644 --- a/vendor/github.com/containers/podman/v4/libpod/volume_internal_freebsd.go +++ b/vendor/github.com/containers/podman/v5/libpod/volume_internal_freebsd.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod diff --git a/vendor/github.com/containers/podman/v4/libpod/volume_internal_linux.go b/vendor/github.com/containers/podman/v5/libpod/volume_internal_linux.go similarity index 90% rename from vendor/github.com/containers/podman/v4/libpod/volume_internal_linux.go rename to vendor/github.com/containers/podman/v5/libpod/volume_internal_linux.go index e21fd97a3..4c84e27fd 100644 --- a/vendor/github.com/containers/podman/v4/libpod/volume_internal_linux.go +++ b/vendor/github.com/containers/podman/v5/libpod/volume_internal_linux.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package libpod diff --git a/vendor/github.com/containers/podman/v4/pkg/annotations/annotations.go b/vendor/github.com/containers/podman/v5/pkg/annotations/annotations.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/annotations/annotations.go rename to vendor/github.com/containers/podman/v5/pkg/annotations/annotations.go diff --git a/vendor/github.com/containers/podman/v5/pkg/annotations/validate.go b/vendor/github.com/containers/podman/v5/pkg/annotations/validate.go new file mode 100644 index 000000000..4ddeea30e --- /dev/null +++ b/vendor/github.com/containers/podman/v5/pkg/annotations/validate.go @@ -0,0 +1,124 @@ +package annotations + +import ( + "fmt" + "regexp" + "strings" + + "github.com/containers/podman/v5/libpod/define" +) + +// regexErrorMsg returns a string explanation of a regex validation failure. +func regexErrorMsg(msg string, fmt string, examples ...string) string { + if len(examples) == 0 { + return msg + " (regex used for validation is '" + fmt + "')" + } + msg += " (e.g. " + for i := range examples { + if i > 0 { + msg += " or " + } + msg += "'" + examples[i] + "', " + } + msg += "regex used for validation is '" + fmt + "')" + return msg +} + +const dns1123LabelFmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?" +const dns1123SubdomainFmt string = dns1123LabelFmt + "(\\." + dns1123LabelFmt + ")*" +const dns1123SubdomainErrorMsg string = "annotations must be formatted as a valid lowercase RFC1123 subdomain of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character" + +// DNS1123SubdomainMaxLength is a subdomain's max length in DNS (RFC 1123) +const DNS1123SubdomainMaxLength int = 253 + +var dns1123SubdomainRegexp = regexp.MustCompile("^" + dns1123SubdomainFmt + "$") + +// isDNS1123Subdomain tests for a string that conforms to the definition of a +// subdomain in DNS (RFC 1123). +func isDNS1123Subdomain(value string) error { + if len(value) > DNS1123SubdomainMaxLength { + return fmt.Errorf("prefix part must be no more than %d characters", DNS1123SubdomainMaxLength) + } + + if !dns1123SubdomainRegexp.MatchString(value) { + return fmt.Errorf(regexErrorMsg(dns1123SubdomainErrorMsg, dns1123SubdomainFmt, "example.com")) + } + + return nil +} + +const qnameCharFmt string = "[A-Za-z0-9]" +const qnameExtCharFmt string = "[-A-Za-z0-9_.]" +const qualifiedNameFmt string = "(" + qnameCharFmt + qnameExtCharFmt + "*)?" + qnameCharFmt +const qualifiedNameErrMsg string = "must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character" +const qualifiedNameMaxLength int = 63 + +var qualifiedNameRegexp = regexp.MustCompile("^" + qualifiedNameFmt + "$") + +// isQualifiedName tests whether the value passed is what Kubernetes calls a +// "qualified name". This is a format used in various places throughout the +// system. If the value is not valid, a list of error strings is returned. +// Otherwise an empty list (or nil) is returned. +func isQualifiedName(value string) error { + parts := strings.Split(value, "/") + var name string + + switch len(parts) { + case 1: + name = parts[0] + case 2: + var prefix string + prefix, name = parts[0], parts[1] + if len(prefix) == 0 { + return fmt.Errorf("prefix part of %s must be non-empty", value) + } else if err := isDNS1123Subdomain(prefix); err != nil { + return err + } + default: + return fmt.Errorf("a qualified name of %s "+ + regexErrorMsg(qualifiedNameErrMsg, qualifiedNameFmt, "MyName", "my.name", "123-abc")+ + " with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')", value) + } + + if len(name) == 0 { + return fmt.Errorf("name part of %s must be non-empty", value) + } else if len(name) > qualifiedNameMaxLength { + return fmt.Errorf("name part of %s must be no more than %d characters", value, qualifiedNameMaxLength) + } + + if !qualifiedNameRegexp.MatchString(name) { + return fmt.Errorf("name part of %s "+ + regexErrorMsg(qualifiedNameErrMsg, qualifiedNameFmt, "MyName", "my.name", "123-abc"), value) + } + + return nil +} + +func validateAnnotationsSize(annotations map[string]string) error { + var totalSize int64 + for k, v := range annotations { + totalSize += (int64)(len(k)) + (int64)(len(v)) + } + if totalSize > (int64)(define.TotalAnnotationSizeLimitB) { + return fmt.Errorf("annotations size %d is larger than limit %d", totalSize, define.TotalAnnotationSizeLimitB) + } + return nil +} + +// ValidateAnnotations validates that a set of annotations are correctly +// defined. +func ValidateAnnotations(annotations map[string]string) error { + for k := range annotations { + // The rule is QualifiedName except that case doesn't matter, + // so convert to lowercase before checking. + if err := isQualifiedName(strings.ToLower(k)); err != nil { + return err + } + } + + if err := validateAnnotationsSize(annotations); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/containers/podman/v4/pkg/api/handlers/changes.go b/vendor/github.com/containers/podman/v5/pkg/api/handlers/changes.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/api/handlers/changes.go rename to vendor/github.com/containers/podman/v5/pkg/api/handlers/changes.go diff --git a/vendor/github.com/containers/podman/v4/pkg/api/handlers/decoder.go b/vendor/github.com/containers/podman/v5/pkg/api/handlers/decoder.go similarity index 97% rename from vendor/github.com/containers/podman/v4/pkg/api/handlers/decoder.go rename to vendor/github.com/containers/podman/v5/pkg/api/handlers/decoder.go index 4a636bc81..4750fa1c5 100644 --- a/vendor/github.com/containers/podman/v4/pkg/api/handlers/decoder.go +++ b/vendor/github.com/containers/podman/v5/pkg/api/handlers/decoder.go @@ -7,8 +7,8 @@ import ( "syscall" "time" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/util" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/util" "github.com/gorilla/schema" "github.com/sirupsen/logrus" ) diff --git a/vendor/github.com/containers/podman/v4/pkg/api/handlers/types.go b/vendor/github.com/containers/podman/v5/pkg/api/handlers/types.go similarity index 90% rename from vendor/github.com/containers/podman/v4/pkg/api/handlers/types.go rename to vendor/github.com/containers/podman/v5/pkg/api/handlers/types.go index 1a14628c6..194a8d391 100644 --- a/vendor/github.com/containers/podman/v4/pkg/api/handlers/types.go +++ b/vendor/github.com/containers/podman/v5/pkg/api/handlers/types.go @@ -1,15 +1,17 @@ package handlers import ( - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/domain/entities" docker "github.com/docker/docker/api/types" + dockerBackend "github.com/docker/docker/api/types/backend" dockerContainer "github.com/docker/docker/api/types/container" dockerNetwork "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/registry" "github.com/opencontainers/runtime-spec/specs-go" ) type AuthConfig struct { - docker.AuthConfig + registry.AuthConfig } type ImageInspect struct { @@ -32,6 +34,12 @@ type LibpodImagesRemoveReport struct { Errors []string } +// LibpodImagesResolveReport includes a list of fully-qualified image references. +type LibpodImagesResolveReport struct { + // Fully-qualified image references. + Names []string +} + type ContainersPruneReport struct { docker.ContainersPruneReport } @@ -79,7 +87,7 @@ type Info struct { type Container struct { docker.Container - docker.ContainerCreateConfig + dockerBackend.ContainerCreateConfig } type DiskUsage struct { diff --git a/vendor/github.com/containers/podman/v4/pkg/api/handlers/types/types.go b/vendor/github.com/containers/podman/v5/pkg/api/handlers/types/types.go similarity index 88% rename from vendor/github.com/containers/podman/v4/pkg/api/handlers/types/types.go rename to vendor/github.com/containers/podman/v5/pkg/api/handlers/types/types.go index 71e1d5024..7329fc7a7 100644 --- a/vendor/github.com/containers/podman/v4/pkg/api/handlers/types/types.go +++ b/vendor/github.com/containers/podman/v5/pkg/api/handlers/types/types.go @@ -1,6 +1,6 @@ package types -import "github.com/containers/podman/v4/pkg/domain/entities" +import "github.com/containers/podman/v5/pkg/domain/entities" // LibpodImagesRemoveReport is the return type for image removal via the rest // api. diff --git a/vendor/github.com/containers/podman/v4/pkg/api/handlers/utils/apiutil/apiutil.go b/vendor/github.com/containers/podman/v5/pkg/api/handlers/utils/apiutil/apiutil.go similarity index 98% rename from vendor/github.com/containers/podman/v4/pkg/api/handlers/utils/apiutil/apiutil.go rename to vendor/github.com/containers/podman/v5/pkg/api/handlers/utils/apiutil/apiutil.go index b33627e31..48f4dfba5 100644 --- a/vendor/github.com/containers/podman/v4/pkg/api/handlers/utils/apiutil/apiutil.go +++ b/vendor/github.com/containers/podman/v5/pkg/api/handlers/utils/apiutil/apiutil.go @@ -8,7 +8,7 @@ import ( "strings" "github.com/blang/semver/v4" - "github.com/containers/podman/v4/version" + "github.com/containers/podman/v5/version" "github.com/gorilla/mux" ) diff --git a/vendor/github.com/containers/podman/v4/pkg/auth/auth.go b/vendor/github.com/containers/podman/v5/pkg/auth/auth.go similarity index 99% rename from vendor/github.com/containers/podman/v4/pkg/auth/auth.go rename to vendor/github.com/containers/podman/v5/pkg/auth/auth.go index 12ea2458b..ecbf4d3ae 100644 --- a/vendor/github.com/containers/podman/v4/pkg/auth/auth.go +++ b/vendor/github.com/containers/podman/v5/pkg/auth/auth.go @@ -268,7 +268,7 @@ func normalizeAuthFileKey(authFileKey string) string { stripped = strings.TrimPrefix(stripped, "https://") if stripped != authFileKey { // URLs are interpreted to mean complete registries - stripped = strings.SplitN(stripped, "/", 2)[0] + stripped, _, _ = strings.Cut(stripped, "/") } // Only non-namespaced registry names (or URLs) need to be normalized; repo namespaces diff --git a/vendor/github.com/containers/podman/v4/pkg/autoupdate/autoupdate.go b/vendor/github.com/containers/podman/v5/pkg/autoupdate/autoupdate.go similarity index 97% rename from vendor/github.com/containers/podman/v4/pkg/autoupdate/autoupdate.go rename to vendor/github.com/containers/podman/v5/pkg/autoupdate/autoupdate.go index b979b92e1..5b2ac829f 100644 --- a/vendor/github.com/containers/podman/v4/pkg/autoupdate/autoupdate.go +++ b/vendor/github.com/containers/podman/v5/pkg/autoupdate/autoupdate.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package autoupdate @@ -12,12 +11,12 @@ import ( "github.com/containers/common/libimage" "github.com/containers/common/pkg/config" "github.com/containers/image/v5/docker" - "github.com/containers/podman/v4/libpod" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/libpod/events" - "github.com/containers/podman/v4/pkg/domain/entities" - "github.com/containers/podman/v4/pkg/systemd" - systemdDefine "github.com/containers/podman/v4/pkg/systemd/define" + "github.com/containers/podman/v5/libpod" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/libpod/events" + "github.com/containers/podman/v5/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/systemd" + systemdDefine "github.com/containers/podman/v5/pkg/systemd/define" "github.com/coreos/go-systemd/v22/dbus" "github.com/sirupsen/logrus" ) @@ -317,7 +316,7 @@ func (t *task) localUpdateAvailable() (bool, error) { if err != nil { return false, err } - return localImg.Digest().String() != t.image.Digest().String(), nil + return localImg.ID() != t.image.ID(), nil } // rollbackImage rolls back the task's image to the previous version before the update. diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/README.md b/vendor/github.com/containers/podman/v5/pkg/bindings/README.md similarity index 92% rename from vendor/github.com/containers/podman/v4/pkg/bindings/README.md rename to vendor/github.com/containers/podman/v5/pkg/bindings/README.md index 55255a22a..773de5e8c 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/README.md +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/README.md @@ -43,7 +43,7 @@ import ( "fmt" "os" - "github.com/containers/podman/v4/pkg/bindings" + "github.com/containers/podman/v5/pkg/bindings" ) func main() { @@ -74,8 +74,8 @@ import ( "fmt" "os" - "github.com/containers/podman/v4/pkg/bindings" - "github.com/containers/podman/v4/pkg/bindings/containers" + "github.com/containers/podman/v5/pkg/bindings" + "github.com/containers/podman/v5/pkg/bindings/containers" ) func main() { @@ -102,8 +102,8 @@ import ( "fmt" "os" - "github.com/containers/podman/v4/pkg/bindings" - "github.com/containers/podman/v4/pkg/bindings/images" + "github.com/containers/podman/v5/pkg/bindings" + "github.com/containers/podman/v5/pkg/bindings/images" ) func main() { @@ -130,10 +130,10 @@ import ( "fmt" "os" - "github.com/containers/podman/v4/pkg/bindings" - "github.com/containers/podman/v4/pkg/bindings/containers" - "github.com/containers/podman/v4/pkg/bindings/images" - "github.com/containers/podman/v4/pkg/specgen" + "github.com/containers/podman/v5/pkg/bindings" + "github.com/containers/podman/v5/pkg/bindings/containers" + "github.com/containers/podman/v5/pkg/bindings/images" + "github.com/containers/podman/v5/pkg/specgen" ) func main() { @@ -239,3 +239,6 @@ $ You can also verify that the information being passed back and forth is correct by putting with a tool like `socat`, which can dump what the socket is seeing. + +## Reducing Binary Size with "remote" Build Tag +When building a program that uses the Podman Go bindings, you can reduce the binary size by passing the "remote" build tag to the go build command. This tag excludes code related to local Podman operations, which is not needed for applications that only interact with Podman over a network. diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/connection.go b/vendor/github.com/containers/podman/v5/pkg/bindings/connection.go similarity index 99% rename from vendor/github.com/containers/podman/v4/pkg/bindings/connection.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/connection.go index d3bd5e8a8..d2c3aaba2 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/connection.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/connection.go @@ -15,7 +15,7 @@ import ( "github.com/blang/semver/v4" "github.com/containers/common/pkg/ssh" - "github.com/containers/podman/v4/version" + "github.com/containers/podman/v5/version" "github.com/sirupsen/logrus" "golang.org/x/net/proxy" ) diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/archive.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/archive.go similarity index 79% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/archive.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/archive.go index 660d9da6b..f24efdf65 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/archive.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/archive.go @@ -7,15 +7,15 @@ import ( "net/http" "net/url" - "github.com/containers/podman/v4/pkg/bindings" - "github.com/containers/podman/v4/pkg/copy" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/bindings" + "github.com/containers/podman/v5/pkg/copy" + "github.com/containers/podman/v5/pkg/domain/entities/types" ) // Stat checks if the specified path is on the container. Note that the stat // report may be set even in case of an error. This happens when the path // resolves to symlink pointing to a non-existent path. -func Stat(ctx context.Context, nameOrID string, path string) (*entities.ContainerStatReport, error) { +func Stat(ctx context.Context, nameOrID string, path string) (*types.ContainerStatReport, error) { conn, err := bindings.GetClient(ctx) if err != nil { return nil, err @@ -36,7 +36,7 @@ func Stat(ctx context.Context, nameOrID string, path string) (*entities.Containe finalErr = errors.New(response.Status) } - var statReport *entities.ContainerStatReport + var statReport *types.ContainerStatReport fileInfo, err := copy.ExtractFileInfoFromHeader(&response.Header) if err != nil && finalErr == nil { @@ -44,18 +44,18 @@ func Stat(ctx context.Context, nameOrID string, path string) (*entities.Containe } if fileInfo != nil { - statReport = &entities.ContainerStatReport{FileInfo: *fileInfo} + statReport = &types.ContainerStatReport{FileInfo: *fileInfo} } return statReport, finalErr } -func CopyFromArchive(ctx context.Context, nameOrID string, path string, reader io.Reader) (entities.ContainerCopyFunc, error) { +func CopyFromArchive(ctx context.Context, nameOrID string, path string, reader io.Reader) (types.ContainerCopyFunc, error) { return CopyFromArchiveWithOptions(ctx, nameOrID, path, reader, nil) } // CopyFromArchiveWithOptions copy files into container -func CopyFromArchiveWithOptions(ctx context.Context, nameOrID string, path string, reader io.Reader, options *CopyOptions) (entities.ContainerCopyFunc, error) { +func CopyFromArchiveWithOptions(ctx context.Context, nameOrID string, path string, reader io.Reader, options *CopyOptions) (types.ContainerCopyFunc, error) { conn, err := bindings.GetClient(ctx) if err != nil { return nil, err @@ -82,7 +82,7 @@ func CopyFromArchiveWithOptions(ctx context.Context, nameOrID string, path strin } // CopyToArchive copy files from container -func CopyToArchive(ctx context.Context, nameOrID string, path string, writer io.Writer) (entities.ContainerCopyFunc, error) { +func CopyToArchive(ctx context.Context, nameOrID string, path string, writer io.Writer) (types.ContainerCopyFunc, error) { conn, err := bindings.GetClient(ctx) if err != nil { return nil, err diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/attach.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/attach.go similarity index 99% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/attach.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/attach.go index e1c22d9e6..94219174b 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/attach.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/attach.go @@ -16,8 +16,8 @@ import ( "time" "github.com/containers/common/pkg/detach" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/bindings" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/bindings" "github.com/moby/term" "github.com/sirupsen/logrus" terminal "golang.org/x/term" diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/checkpoint.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/checkpoint.go similarity index 88% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/checkpoint.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/checkpoint.go index 8c072f588..fa0ae97cf 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/checkpoint.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/checkpoint.go @@ -6,14 +6,14 @@ import ( "net/http" "os" - "github.com/containers/podman/v4/pkg/bindings" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/bindings" + "github.com/containers/podman/v5/pkg/domain/entities/types" ) // Checkpoint checkpoints the given container (identified by nameOrID). All additional // options are options and allow for more fine grained control of the checkpoint process. -func Checkpoint(ctx context.Context, nameOrID string, options *CheckpointOptions) (*entities.CheckpointReport, error) { - var report entities.CheckpointReport +func Checkpoint(ctx context.Context, nameOrID string, options *CheckpointOptions) (*types.CheckpointReport, error) { + var report types.CheckpointReport if options == nil { options = new(CheckpointOptions) } @@ -52,13 +52,13 @@ func Checkpoint(ctx context.Context, nameOrID string, options *CheckpointOptions return nil, err } - return &entities.CheckpointReport{}, nil + return &types.CheckpointReport{}, nil } // Restore restores a checkpointed container to running. The container is identified by the nameOrID option. All // additional options are optional and allow finer control of the restore process. -func Restore(ctx context.Context, nameOrID string, options *RestoreOptions) (*entities.RestoreReport, error) { - var report entities.RestoreReport +func Restore(ctx context.Context, nameOrID string, options *RestoreOptions) (*types.RestoreReport, error) { + var report types.RestoreReport if options == nil { options = new(RestoreOptions) } diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/commit.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/commit.go similarity index 89% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/commit.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/commit.go index 6d094a2ff..76b22d1f0 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/commit.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/commit.go @@ -9,28 +9,28 @@ import ( "os" "strings" - "github.com/containers/podman/v4/pkg/bindings" - "github.com/containers/podman/v4/pkg/bindings/images" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/bindings" + "github.com/containers/podman/v5/pkg/bindings/images" "github.com/containers/storage/pkg/regexp" + dockerAPI "github.com/docker/docker/api/types" ) var iidRegex = regexp.Delayed(`^[0-9a-f]{12}`) // Commit creates a container image from a container. The container is defined by nameOrID. Use // the CommitOptions for finer grain control on characteristics of the resulting image. -func Commit(ctx context.Context, nameOrID string, options *CommitOptions) (entities.IDResponse, error) { +func Commit(ctx context.Context, nameOrID string, options *CommitOptions) (dockerAPI.IDResponse, error) { if options == nil { options = new(CommitOptions) } - id := entities.IDResponse{} + id := dockerAPI.IDResponse{} conn, err := bindings.GetClient(ctx) if err != nil { return id, err } params, err := options.ToParams() if err != nil { - return entities.IDResponse{}, err + return dockerAPI.IDResponse{}, err } params.Set("container", nameOrID) var requestBody io.Reader diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/containers.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/containers.go similarity index 95% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/containers.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/containers.go index 0ca579a6c..19fc74f73 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/containers.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/containers.go @@ -9,11 +9,11 @@ import ( "net/url" "strings" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/api/handlers" - "github.com/containers/podman/v4/pkg/bindings" - "github.com/containers/podman/v4/pkg/domain/entities" - "github.com/containers/podman/v4/pkg/domain/entities/reports" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/api/handlers" + "github.com/containers/podman/v5/pkg/bindings" + "github.com/containers/podman/v5/pkg/domain/entities/reports" + "github.com/containers/podman/v5/pkg/domain/entities/types" ) var ( @@ -25,7 +25,7 @@ var ( // the most recent number of containers. The pod and size booleans indicate that pod information and rootfs // size information should also be included. Finally, the sync bool synchronizes the OCI runtime and // container state. -func List(ctx context.Context, options *ListOptions) ([]entities.ListContainer, error) { +func List(ctx context.Context, options *ListOptions) ([]types.ListContainer, error) { if options == nil { options = new(ListOptions) } @@ -33,7 +33,7 @@ func List(ctx context.Context, options *ListOptions) ([]entities.ListContainer, if err != nil { return nil, err } - var containers []entities.ListContainer + var containers []types.ListContainer params, err := options.ToParams() if err != nil { return nil, err @@ -218,7 +218,7 @@ func Start(ctx context.Context, nameOrID string, options *StartOptions) error { return response.Process(nil) } -func Stats(ctx context.Context, containers []string, options *StatsOptions) (chan entities.ContainerStatsReport, error) { +func Stats(ctx context.Context, containers []string, options *StatsOptions) (chan types.ContainerStatsReport, error) { if options == nil { options = new(StatsOptions) } @@ -243,7 +243,7 @@ func Stats(ctx context.Context, containers []string, options *StatsOptions) (cha return nil, response.Process(nil) } - statsChan := make(chan entities.ContainerStatsReport) + statsChan := make(chan types.ContainerStatsReport) go func() { defer close(statsChan) @@ -263,9 +263,9 @@ func Stats(ctx context.Context, containers []string, options *StatsOptions) (cha // fall through and do some work } - var report entities.ContainerStatsReport + var report types.ContainerStatsReport if err := dec.Decode(&report); err != nil { - report = entities.ContainerStatsReport{Error: err} + report = types.ContainerStatsReport{Error: err} } statsChan <- report diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/create.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/create.go similarity index 71% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/create.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/create.go index 9c090f67d..7b39e848d 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/create.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/create.go @@ -5,14 +5,14 @@ import ( "net/http" "strings" - "github.com/containers/podman/v4/pkg/bindings" - "github.com/containers/podman/v4/pkg/domain/entities" - "github.com/containers/podman/v4/pkg/specgen" + "github.com/containers/podman/v5/pkg/bindings" + "github.com/containers/podman/v5/pkg/domain/entities/types" + "github.com/containers/podman/v5/pkg/specgen" jsoniter "github.com/json-iterator/go" ) -func CreateWithSpec(ctx context.Context, s *specgen.SpecGenerator, options *CreateOptions) (entities.ContainerCreateResponse, error) { - var ccr entities.ContainerCreateResponse +func CreateWithSpec(ctx context.Context, s *specgen.SpecGenerator, options *CreateOptions) (types.ContainerCreateResponse, error) { + var ccr types.ContainerCreateResponse if options == nil { options = new(CreateOptions) } diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/diff.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/diff.go similarity index 93% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/diff.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/diff.go index 2eb13088c..e782aa920 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/diff.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/diff.go @@ -4,7 +4,7 @@ import ( "context" "net/http" - "github.com/containers/podman/v4/pkg/bindings" + "github.com/containers/podman/v5/pkg/bindings" "github.com/containers/storage/pkg/archive" ) diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/exec.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/exec.go similarity index 93% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/exec.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/exec.go index 395ef06dd..e471a58dd 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/exec.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/exec.go @@ -8,10 +8,10 @@ import ( "net/http" "strings" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/api/handlers" - "github.com/containers/podman/v4/pkg/bindings" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/api/handlers" + "github.com/containers/podman/v5/pkg/bindings" + dockerAPI "github.com/docker/docker/api/types" jsoniter "github.com/json-iterator/go" "github.com/sirupsen/logrus" ) @@ -43,7 +43,7 @@ func ExecCreate(ctx context.Context, nameOrID string, config *handlers.ExecCreat } defer resp.Body.Close() - respStruct := new(entities.IDResponse) + respStruct := new(dockerAPI.IDResponse) if err := resp.Process(respStruct); err != nil { return "", err } diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/healthcheck.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/healthcheck.go similarity index 87% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/healthcheck.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/healthcheck.go index e0680238d..7f396fcc5 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/healthcheck.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/healthcheck.go @@ -4,8 +4,8 @@ import ( "context" "net/http" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/bindings" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/bindings" ) // RunHealthCheck executes the container's healthcheck and returns the health status of the diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/logs.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/logs.go similarity index 97% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/logs.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/logs.go index 9d3fdb8eb..141c7733a 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/logs.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/logs.go @@ -8,7 +8,7 @@ import ( "net/http" "strconv" - "github.com/containers/podman/v4/pkg/bindings" + "github.com/containers/podman/v5/pkg/bindings" ) // Logs obtains a container's logs given the options provided. The logs are then sent to the diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/mount.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/mount.go similarity index 97% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/mount.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/mount.go index de286e4b4..63aaf5c11 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/mount.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/mount.go @@ -4,7 +4,7 @@ import ( "context" "net/http" - "github.com/containers/podman/v4/pkg/bindings" + "github.com/containers/podman/v5/pkg/bindings" ) // Mount mounts an existing container to the filesystem. It returns the path diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/rename.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/rename.go similarity index 92% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/rename.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/rename.go index 7cc16e334..4642547bf 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/rename.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/rename.go @@ -4,7 +4,7 @@ import ( "context" "net/http" - "github.com/containers/podman/v4/pkg/bindings" + "github.com/containers/podman/v5/pkg/bindings" ) // Rename an existing container. diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/term_unix.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/term_unix.go similarity index 87% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/term_unix.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/term_unix.go index e14f50813..6c9f2a4b9 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/term_unix.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/term_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package containers @@ -8,7 +7,7 @@ import ( "os" "os/signal" - sig "github.com/containers/podman/v4/pkg/signal" + sig "github.com/containers/podman/v5/pkg/signal" "golang.org/x/term" ) diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/term_windows.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/term_windows.go similarity index 96% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/term_windows.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/term_windows.go index e710e2998..1be8f2d68 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/term_windows.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/term_windows.go @@ -5,7 +5,7 @@ import ( "os" "time" - sig "github.com/containers/podman/v4/pkg/signal" + sig "github.com/containers/podman/v5/pkg/signal" "golang.org/x/sys/windows" "golang.org/x/term" ) @@ -61,7 +61,6 @@ func notifyWinChange(ctx context.Context, winChange chan os.Signal, stdin *os.Fi } } }() - } func getTermSize(stdin *os.File, stdout *os.File) (width, height int, err error) { diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types.go similarity index 99% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/types.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/types.go index ee2fe4b94..ba3703cee 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types.go @@ -4,7 +4,7 @@ import ( "bufio" "io" - "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v5/libpod/define" ) // LogOptions describe finer control of log content or diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_attach_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_attach_options.go similarity index 95% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_attach_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_attach_options.go index 9fa21ef4e..6c272f33c 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_attach_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_attach_options.go @@ -4,7 +4,7 @@ package containers import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_checkpoint_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_checkpoint_options.go similarity index 98% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_checkpoint_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_checkpoint_options.go index d5f6e541d..16bd91a72 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_checkpoint_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_checkpoint_options.go @@ -4,7 +4,7 @@ package containers import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_commit_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_commit_options.go similarity index 98% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_commit_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_commit_options.go index 20e59f4d5..c74e2bab5 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_commit_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_commit_options.go @@ -5,7 +5,7 @@ import ( "io" "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_copy_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_copy_options.go similarity index 95% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_copy_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_copy_options.go index e43d79752..b6cf40d70 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_copy_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_copy_options.go @@ -4,7 +4,7 @@ package containers import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_create_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_create_options.go similarity index 86% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_create_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_create_options.go index 57896b9ef..099685ec3 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_create_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_create_options.go @@ -4,7 +4,7 @@ package containers import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_diff_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_diff_options.go similarity index 94% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_diff_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_diff_options.go index 5fc3dedae..75f7907d9 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_diff_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_diff_options.go @@ -4,7 +4,7 @@ package containers import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_execinspect_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_execinspect_options.go similarity index 86% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_execinspect_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_execinspect_options.go index 0e926be0c..e96311834 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_execinspect_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_execinspect_options.go @@ -4,7 +4,7 @@ package containers import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_execremove_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_execremove_options.go similarity index 91% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_execremove_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_execremove_options.go index 3dcebe300..cfc6d8a4e 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_execremove_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_execremove_options.go @@ -4,7 +4,7 @@ package containers import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_execstart_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_execstart_options.go similarity index 86% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_execstart_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_execstart_options.go index 4bd66e25b..bbfb4a304 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_execstart_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_execstart_options.go @@ -4,7 +4,7 @@ package containers import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_execstartandattach_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_execstartandattach_options.go similarity index 97% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_execstartandattach_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_execstartandattach_options.go index 759676f2f..1b8e48725 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_execstartandattach_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_execstartandattach_options.go @@ -6,7 +6,7 @@ import ( "io" "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_exists_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_exists_options.go similarity index 91% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_exists_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_exists_options.go index 6c73fcc65..91a7ceec5 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_exists_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_exists_options.go @@ -4,7 +4,7 @@ package containers import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_export_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_export_options.go similarity index 86% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_export_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_export_options.go index 041a18041..81c94f2b3 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_export_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_export_options.go @@ -4,7 +4,7 @@ package containers import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_healthcheck_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_healthcheck_options.go similarity index 86% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_healthcheck_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_healthcheck_options.go index f7daafdab..e346d4822 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_healthcheck_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_healthcheck_options.go @@ -4,7 +4,7 @@ package containers import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_init_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_init_options.go similarity index 85% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_init_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_init_options.go index f93422ee3..2f06ae25f 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_init_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_init_options.go @@ -4,7 +4,7 @@ package containers import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_inspect_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_inspect_options.go similarity index 91% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_inspect_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_inspect_options.go index 0fa8d0917..29fa6fedd 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_inspect_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_inspect_options.go @@ -4,7 +4,7 @@ package containers import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_kill_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_kill_options.go similarity index 91% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_kill_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_kill_options.go index af26b3416..8cb985528 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_kill_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_kill_options.go @@ -4,7 +4,7 @@ package containers import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_list_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_list_options.go similarity index 97% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_list_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_list_options.go index 0204423eb..c6a25908e 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_list_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_list_options.go @@ -4,7 +4,7 @@ package containers import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_log_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_log_options.go similarity index 97% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_log_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_log_options.go index 4aab596d8..f6efbaa9b 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_log_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_log_options.go @@ -4,7 +4,7 @@ package containers import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_mount_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_mount_options.go similarity index 86% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_mount_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_mount_options.go index 1e0b7ddbf..332f01809 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_mount_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_mount_options.go @@ -4,7 +4,7 @@ package containers import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_mountedcontainerpaths_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_mountedcontainerpaths_options.go similarity index 86% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_mountedcontainerpaths_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_mountedcontainerpaths_options.go index 62377b52b..8c5494fac 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_mountedcontainerpaths_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_mountedcontainerpaths_options.go @@ -4,7 +4,7 @@ package containers import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_pause_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_pause_options.go similarity index 86% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_pause_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_pause_options.go index 26ee31db0..14b18a87f 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_pause_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_pause_options.go @@ -4,7 +4,7 @@ package containers import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_prune_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_prune_options.go similarity index 92% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_prune_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_prune_options.go index 413b84f47..69d619d16 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_prune_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_prune_options.go @@ -4,7 +4,7 @@ package containers import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_remove_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_remove_options.go similarity index 96% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_remove_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_remove_options.go index b73b01cd2..e7da10a0f 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_remove_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_remove_options.go @@ -4,7 +4,7 @@ package containers import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_rename_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_rename_options.go similarity index 91% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_rename_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_rename_options.go index 1957a3982..572581213 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_rename_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_rename_options.go @@ -4,7 +4,7 @@ package containers import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_resizeexectty_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_resizeexectty_options.go similarity index 94% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_resizeexectty_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_resizeexectty_options.go index 2a7d5b540..0741f4eae 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_resizeexectty_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_resizeexectty_options.go @@ -4,7 +4,7 @@ package containers import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_resizetty_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_resizetty_options.go similarity index 95% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_resizetty_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_resizetty_options.go index fc027c48d..c336f6fe8 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_resizetty_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_resizetty_options.go @@ -4,7 +4,7 @@ package containers import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_restart_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_restart_options.go similarity index 91% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_restart_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_restart_options.go index f5f20df51..e27e881ad 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_restart_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_restart_options.go @@ -4,7 +4,7 @@ package containers import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_restore_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_restore_options.go similarity index 98% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_restore_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_restore_options.go index b1b14a704..cb4e30760 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_restore_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_restore_options.go @@ -4,7 +4,7 @@ package containers import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_shouldrestart_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_shouldrestart_options.go similarity index 86% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_shouldrestart_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_shouldrestart_options.go index e9d529355..e968731b0 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_shouldrestart_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_shouldrestart_options.go @@ -4,7 +4,7 @@ package containers import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_start_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_start_options.go similarity index 94% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_start_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_start_options.go index 88342dcd0..fb77b421c 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_start_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_start_options.go @@ -4,7 +4,7 @@ package containers import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_stats_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_stats_options.go similarity index 95% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_stats_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_stats_options.go index 968f824d5..fbae6bd49 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_stats_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_stats_options.go @@ -4,7 +4,7 @@ package containers import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_stop_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_stop_options.go similarity index 94% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_stop_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_stop_options.go index 375557ecb..eea280e3f 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_stop_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_stop_options.go @@ -4,7 +4,7 @@ package containers import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_top_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_top_options.go similarity index 92% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_top_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_top_options.go index 61d37ed0d..cfd21ecc3 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_top_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_top_options.go @@ -4,7 +4,7 @@ package containers import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_unmount_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_unmount_options.go similarity index 86% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_unmount_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_unmount_options.go index 0faa405c4..0752556a0 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_unmount_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_unmount_options.go @@ -4,7 +4,7 @@ package containers import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_unpause_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_unpause_options.go similarity index 86% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_unpause_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_unpause_options.go index 4a967862e..2f91a7d33 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_unpause_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_unpause_options.go @@ -4,7 +4,7 @@ package containers import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_wait_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_wait_options.go similarity index 92% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_wait_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_wait_options.go index 260a73d6f..fcb9a9140 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_wait_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/types_wait_options.go @@ -4,8 +4,8 @@ package containers import ( "net/url" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/update.go b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/update.go similarity index 74% rename from vendor/github.com/containers/podman/v4/pkg/bindings/containers/update.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/containers/update.go index 7cda7c306..20b743c95 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/update.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/containers/update.go @@ -5,12 +5,12 @@ import ( "net/http" "strings" - "github.com/containers/podman/v4/pkg/bindings" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/bindings" + "github.com/containers/podman/v5/pkg/domain/entities/types" jsoniter "github.com/json-iterator/go" ) -func Update(ctx context.Context, options *entities.ContainerUpdateOptions) (string, error) { +func Update(ctx context.Context, options *types.ContainerUpdateOptions) (string, error) { conn, err := bindings.GetClient(ctx) if err != nil { return "", err diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/doc.go b/vendor/github.com/containers/podman/v5/pkg/bindings/doc.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/bindings/doc.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/doc.go diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/errors.go b/vendor/github.com/containers/podman/v5/pkg/bindings/errors.go similarity index 95% rename from vendor/github.com/containers/podman/v4/pkg/bindings/errors.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/errors.go index a7cbeb30a..af506a0f3 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/errors.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/errors.go @@ -7,7 +7,7 @@ import ( "io" "github.com/blang/semver/v4" - "github.com/containers/podman/v4/pkg/errorhandling" + "github.com/containers/podman/v5/pkg/errorhandling" ) var ( @@ -71,7 +71,7 @@ type APIVersionError struct { // NewAPIVersionError create bindings error when the endpoint on the server is not supported // because the version is to old. -// - endpoint is the name fo the endpoint (e.g. /containers/json) +// - endpoint is the name for the endpoint (e.g. /containers/json) // - version is the server API version // - requiredVersion is the server version need to use said endpoint func NewAPIVersionError(endpoint string, version *semver.Version, requiredVersion string) *APIVersionError { diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/generate/generate.go b/vendor/github.com/containers/podman/v5/pkg/bindings/generate/generate.go similarity index 82% rename from vendor/github.com/containers/podman/v4/pkg/bindings/generate/generate.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/generate/generate.go index 810aab2b3..59a5cd4cb 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/generate/generate.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/generate/generate.go @@ -6,11 +6,11 @@ import ( "net/http" "strconv" - "github.com/containers/podman/v4/pkg/bindings" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/bindings" + "github.com/containers/podman/v5/pkg/domain/entities/types" ) -func Systemd(ctx context.Context, nameOrID string, options *SystemdOptions) (*entities.GenerateSystemdReport, error) { +func Systemd(ctx context.Context, nameOrID string, options *SystemdOptions) (*types.GenerateSystemdReport, error) { if options == nil { options = new(SystemdOptions) } @@ -29,14 +29,14 @@ func Systemd(ctx context.Context, nameOrID string, options *SystemdOptions) (*en } defer response.Body.Close() - report := &entities.GenerateSystemdReport{} + report := &types.GenerateSystemdReport{} return report, response.Process(&report.Units) } // Kube generate Kubernetes YAML (v1 specification) // // Note: Caller is responsible for closing returned reader -func Kube(ctx context.Context, nameOrIDs []string, options *KubeOptions) (*entities.GenerateKubeReport, error) { +func Kube(ctx context.Context, nameOrIDs []string, options *KubeOptions) (*types.GenerateKubeReport, error) { if options == nil { options = new(KubeOptions) } @@ -64,7 +64,7 @@ func Kube(ctx context.Context, nameOrIDs []string, options *KubeOptions) (*entit } if response.StatusCode == http.StatusOK { - return &entities.GenerateKubeReport{Reader: response.Body}, nil + return &types.GenerateKubeReport{Reader: response.Body}, nil } // Unpack the error. diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/generate/types.go b/vendor/github.com/containers/podman/v5/pkg/bindings/generate/types.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/bindings/generate/types.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/generate/types.go diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/generate/types_kube_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/generate/types_kube_options.go similarity index 96% rename from vendor/github.com/containers/podman/v4/pkg/bindings/generate/types_kube_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/generate/types_kube_options.go index b01594957..4770a7b61 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/generate/types_kube_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/generate/types_kube_options.go @@ -4,7 +4,7 @@ package generate import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/generate/types_systemd_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/generate/types_systemd_options.go similarity index 98% rename from vendor/github.com/containers/podman/v4/pkg/bindings/generate/types_systemd_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/generate/types_systemd_options.go index 3aec33a54..2069e39ff 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/generate/types_systemd_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/generate/types_systemd_options.go @@ -4,7 +4,7 @@ package generate import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/build.go b/vendor/github.com/containers/podman/v5/pkg/bindings/images/build.go similarity index 94% rename from vendor/github.com/containers/podman/v4/pkg/bindings/images/build.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/images/build.go index b286b1a49..1d9f92e79 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/images/build.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/images/build.go @@ -2,7 +2,6 @@ package images import ( "archive/tar" - "compress/gzip" "context" "encoding/json" "errors" @@ -18,12 +17,12 @@ import ( "strings" "github.com/containers/buildah/define" - "github.com/containers/image/v5/types" - ldefine "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/auth" - "github.com/containers/podman/v4/pkg/bindings" - "github.com/containers/podman/v4/pkg/domain/entities" - "github.com/containers/podman/v4/pkg/util" + imageTypes "github.com/containers/image/v5/types" + ldefine "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/auth" + "github.com/containers/podman/v5/pkg/bindings" + "github.com/containers/podman/v5/pkg/domain/entities/types" + "github.com/containers/podman/v5/pkg/util" "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/regexp" @@ -31,6 +30,7 @@ import ( "github.com/docker/go-units" "github.com/hashicorp/go-multierror" jsoniter "github.com/json-iterator/go" + gzip "github.com/klauspost/pgzip" "github.com/sirupsen/logrus" ) @@ -50,7 +50,7 @@ type BuildResponse struct { } // Build creates an image using a containerfile reference -func Build(ctx context.Context, containerFiles []string, options entities.BuildOptions) (*entities.BuildReport, error) { +func Build(ctx context.Context, containerFiles []string, options types.BuildOptions) (*types.BuildReport, error) { if options.CommonBuildOpts == nil { options.CommonBuildOpts = new(define.CommonBuildOptions) } @@ -255,9 +255,9 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO } switch options.SkipUnusedStages { - case types.OptionalBoolTrue: + case imageTypes.OptionalBoolTrue: params.Set("skipunusedstages", "1") - case types.OptionalBoolFalse: + case imageTypes.OptionalBoolFalse: params.Set("skipunusedstages", "0") } @@ -342,9 +342,9 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO params.Set("pullpolicy", options.PullPolicy.String()) switch options.CommonBuildOpts.IdentityLabel { - case types.OptionalBoolTrue: + case imageTypes.OptionalBoolTrue: params.Set("identitylabel", "1") - case types.OptionalBoolFalse: + case imageTypes.OptionalBoolFalse: params.Set("identitylabel", "0") } if options.Quiet { @@ -416,7 +416,7 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO } else { headers, err = auth.MakeXRegistryConfigHeader(options.SystemContext, "", "") } - if options.SystemContext.DockerInsecureSkipTLSVerify == types.OptionalBoolTrue { + if options.SystemContext.DockerInsecureSkipTLSVerify == imageTypes.OptionalBoolTrue { params.Set("tlsVerify", "false") } } @@ -530,9 +530,9 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO if len(secretOpt) > 0 { modifiedOpt := []string{} for _, token := range secretOpt { - arr := strings.SplitN(token, "=", 2) - if len(arr) > 1 { - if arr[0] == "src" { + opt, val, hasVal := strings.Cut(token, "=") + if hasVal { + if opt == "src" { // read specified secret into a tmp file // move tmp file to tar and change secret source to relative tmp file tmpSecretFile, err := os.CreateTemp(options.ContextDirectory, "podman-build-secret") @@ -541,7 +541,7 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO } defer os.Remove(tmpSecretFile.Name()) // clean up defer tmpSecretFile.Close() - srcSecretFile, err := os.Open(arr[1]) + srcSecretFile, err := os.Open(val) if err != nil { return nil, err } @@ -618,7 +618,7 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO // even when the server quit but it seems desirable to // distinguish a proper build from a transient EOF. case <-response.Request.Context().Done(): - return &entities.BuildReport{ID: id, SaveFormat: saveFormat}, nil + return &types.BuildReport{ID: id, SaveFormat: saveFormat}, nil default: // non-blocking select } @@ -632,7 +632,7 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO if errors.Is(err, io.EOF) && id != "" { break } - return &entities.BuildReport{ID: id, SaveFormat: saveFormat}, fmt.Errorf("decoding stream: %w", err) + return &types.BuildReport{ID: id, SaveFormat: saveFormat}, fmt.Errorf("decoding stream: %w", err) } switch { @@ -645,12 +645,12 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO case s.Error != nil: // If there's an error, return directly. The stream // will be closed on return. - return &entities.BuildReport{ID: id, SaveFormat: saveFormat}, errors.New(s.Error.Message) + return &types.BuildReport{ID: id, SaveFormat: saveFormat}, errors.New(s.Error.Message) default: - return &entities.BuildReport{ID: id, SaveFormat: saveFormat}, errors.New("failed to parse build results stream, unexpected input") + return &types.BuildReport{ID: id, SaveFormat: saveFormat}, errors.New("failed to parse build results stream, unexpected input") } } - return &entities.BuildReport{ID: id, SaveFormat: saveFormat}, nil + return &types.BuildReport{ID: id, SaveFormat: saveFormat}, nil } func nTar(excludes []string, sources ...string) (io.ReadCloser, error) { diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/build_unix.go b/vendor/github.com/containers/podman/v5/pkg/bindings/images/build_unix.go similarity index 92% rename from vendor/github.com/containers/podman/v4/pkg/bindings/images/build_unix.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/images/build_unix.go index 07bb8cbcd..fbdfa5df2 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/images/build_unix.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/images/build_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package images diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/build_windows.go b/vendor/github.com/containers/podman/v5/pkg/bindings/images/build_windows.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/bindings/images/build_windows.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/images/build_windows.go diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/diff.go b/vendor/github.com/containers/podman/v5/pkg/bindings/images/diff.go similarity index 93% rename from vendor/github.com/containers/podman/v4/pkg/bindings/images/diff.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/images/diff.go index 2a59e6d69..b4eaac8a0 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/images/diff.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/images/diff.go @@ -4,7 +4,7 @@ import ( "context" "net/http" - "github.com/containers/podman/v4/pkg/bindings" + "github.com/containers/podman/v5/pkg/bindings" "github.com/containers/storage/pkg/archive" ) diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/images.go b/vendor/github.com/containers/podman/v5/pkg/bindings/images/images.go similarity index 89% rename from vendor/github.com/containers/podman/v4/pkg/bindings/images/images.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/images/images.go index ef76bb9df..018edbc97 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/images/images.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/images/images.go @@ -10,11 +10,11 @@ import ( "strconv" imageTypes "github.com/containers/image/v5/types" - "github.com/containers/podman/v4/pkg/api/handlers/types" - "github.com/containers/podman/v4/pkg/auth" - "github.com/containers/podman/v4/pkg/bindings" - "github.com/containers/podman/v4/pkg/domain/entities" - "github.com/containers/podman/v4/pkg/domain/entities/reports" + handlersTypes "github.com/containers/podman/v5/pkg/api/handlers/types" + "github.com/containers/podman/v5/pkg/auth" + "github.com/containers/podman/v5/pkg/bindings" + "github.com/containers/podman/v5/pkg/domain/entities/reports" + "github.com/containers/podman/v5/pkg/domain/entities/types" ) // Exists a lightweight way to determine if an image exists in local storage. It returns a @@ -35,11 +35,11 @@ func Exists(ctx context.Context, nameOrID string, options *ExistsOptions) (bool, // List returns a list of images in local storage. The all boolean and filters parameters are optional // ways to alter the image query. -func List(ctx context.Context, options *ListOptions) ([]*entities.ImageSummary, error) { +func List(ctx context.Context, options *ListOptions) ([]*types.ImageSummary, error) { if options == nil { options = new(ListOptions) } - var imageSummary []*entities.ImageSummary + var imageSummary []*types.ImageSummary conn, err := bindings.GetClient(ctx) if err != nil { return nil, err @@ -59,7 +59,7 @@ func List(ctx context.Context, options *ListOptions) ([]*entities.ImageSummary, // Get performs an image inspect. To have the on-disk size of the image calculated, you can // use the optional size parameter. -func GetImage(ctx context.Context, nameOrID string, options *GetOptions) (*entities.ImageInspectReport, error) { +func GetImage(ctx context.Context, nameOrID string, options *GetOptions) (*types.ImageInspectReport, error) { if options == nil { options = new(GetOptions) } @@ -71,7 +71,7 @@ func GetImage(ctx context.Context, nameOrID string, options *GetOptions) (*entit if err != nil { return nil, err } - inspectedData := entities.ImageInspectReport{} + inspectedData := types.ImageInspectReport{} response, err := conn.DoRequest(ctx, nil, http.MethodGet, "/images/%s/json", params, nil, nameOrID) if err != nil { return &inspectedData, err @@ -82,11 +82,11 @@ func GetImage(ctx context.Context, nameOrID string, options *GetOptions) (*entit } // Tree retrieves a "tree" based representation of the given image -func Tree(ctx context.Context, nameOrID string, options *TreeOptions) (*entities.ImageTreeReport, error) { +func Tree(ctx context.Context, nameOrID string, options *TreeOptions) (*types.ImageTreeReport, error) { if options == nil { options = new(TreeOptions) } - var report entities.ImageTreeReport + var report types.ImageTreeReport conn, err := bindings.GetClient(ctx) if err != nil { return nil, err @@ -105,12 +105,12 @@ func Tree(ctx context.Context, nameOrID string, options *TreeOptions) (*entities } // History returns the parent layers of an image. -func History(ctx context.Context, nameOrID string, options *HistoryOptions) ([]*types.HistoryResponse, error) { +func History(ctx context.Context, nameOrID string, options *HistoryOptions) ([]*handlersTypes.HistoryResponse, error) { if options == nil { options = new(HistoryOptions) } _ = options - var history []*types.HistoryResponse + var history []*handlersTypes.HistoryResponse conn, err := bindings.GetClient(ctx) if err != nil { return nil, err @@ -124,8 +124,8 @@ func History(ctx context.Context, nameOrID string, options *HistoryOptions) ([]* return history, response.Process(&history) } -func Load(ctx context.Context, r io.Reader) (*entities.ImageLoadReport, error) { - var report entities.ImageLoadReport +func Load(ctx context.Context, r io.Reader) (*types.ImageLoadReport, error) { + var report types.ImageLoadReport conn, err := bindings.GetClient(ctx) if err != nil { return nil, err @@ -242,11 +242,11 @@ func Untag(ctx context.Context, nameOrID, tag, repo string, options *UntagOption // Import adds the given image to the local image store. This can be done by file and the given reader // or via the url parameter. Additional metadata can be associated with the image by using the changes and // message parameters. The image can also be tagged given a reference. One of url OR r must be provided. -func Import(ctx context.Context, r io.Reader, options *ImportOptions) (*entities.ImageImportReport, error) { +func Import(ctx context.Context, r io.Reader, options *ImportOptions) (*types.ImageImportReport, error) { if options == nil { options = new(ImportOptions) } - var report entities.ImageImportReport + var report types.ImageImportReport if r != nil && options.URL != nil { return nil, errors.New("url and r parameters cannot be used together") } @@ -268,7 +268,7 @@ func Import(ctx context.Context, r io.Reader, options *ImportOptions) (*entities } // Search is the binding for libpod's v2 endpoints for Search images. -func Search(ctx context.Context, term string, options *SearchOptions) ([]entities.ImageSearchReport, error) { +func Search(ctx context.Context, term string, options *SearchOptions) ([]types.ImageSearchReport, error) { if options == nil { options = new(SearchOptions) } @@ -299,7 +299,7 @@ func Search(ctx context.Context, term string, options *SearchOptions) ([]entitie } defer response.Body.Close() - results := []entities.ImageSearchReport{} + results := []types.ImageSearchReport{} if err := response.Process(&results); err != nil { return nil, err } diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/pull.go b/vendor/github.com/containers/podman/v5/pkg/bindings/images/pull.go similarity index 83% rename from vendor/github.com/containers/podman/v4/pkg/bindings/images/pull.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/images/pull.go index 43592f6e2..45534a212 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/images/pull.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/images/pull.go @@ -10,11 +10,11 @@ import ( "os" "strconv" - "github.com/containers/image/v5/types" - "github.com/containers/podman/v4/pkg/auth" - "github.com/containers/podman/v4/pkg/bindings" - "github.com/containers/podman/v4/pkg/domain/entities" - "github.com/containers/podman/v4/pkg/errorhandling" + imgTypes "github.com/containers/image/v5/types" + "github.com/containers/podman/v5/pkg/auth" + "github.com/containers/podman/v5/pkg/bindings" + "github.com/containers/podman/v5/pkg/domain/entities/types" + "github.com/containers/podman/v5/pkg/errorhandling" ) // Pull is the binding for libpod's v2 endpoints for pulling images. Note that @@ -41,7 +41,7 @@ func Pull(ctx context.Context, rawImage string, options *PullOptions) ([]string, params.Set("tlsVerify", strconv.FormatBool(!options.GetSkipTLSVerify())) } - header, err := auth.MakeXRegistryAuthHeader(&types.SystemContext{AuthFilePath: options.GetAuthfile()}, options.GetUsername(), options.GetPassword()) + header, err := auth.MakeXRegistryAuthHeader(&imgTypes.SystemContext{AuthFilePath: options.GetAuthfile()}, options.GetUsername(), options.GetPassword()) if err != nil { return nil, err } @@ -71,7 +71,7 @@ func Pull(ctx context.Context, rawImage string, options *PullOptions) ([]string, var pullErrors []error LOOP: for { - var report entities.ImagePullReport + var report types.ImagePullReport if err := dec.Decode(&report); err != nil { if errors.Is(err, io.EOF) { break diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/push.go b/vendor/github.com/containers/podman/v5/pkg/bindings/images/push.go similarity index 92% rename from vendor/github.com/containers/podman/v4/pkg/bindings/images/push.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/images/push.go index ea1d96823..d6769786f 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/images/push.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/images/push.go @@ -11,9 +11,9 @@ import ( "strconv" imageTypes "github.com/containers/image/v5/types" - "github.com/containers/podman/v4/pkg/auth" - "github.com/containers/podman/v4/pkg/bindings" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/auth" + "github.com/containers/podman/v5/pkg/bindings" + "github.com/containers/podman/v5/pkg/domain/entities/types" ) // Push is the binding for libpod's endpoints for push images. Note that @@ -69,7 +69,7 @@ func Push(ctx context.Context, source string, destination string, options *PushO dec := json.NewDecoder(response.Body) LOOP: for { - var report entities.ImagePushStream + var report types.ImagePushStream if err := dec.Decode(&report); err != nil { if errors.Is(err, io.EOF) { break diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/rm.go b/vendor/github.com/containers/podman/v5/pkg/bindings/images/rm.go similarity index 72% rename from vendor/github.com/containers/podman/v4/pkg/bindings/images/rm.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/images/rm.go index eb3eef10c..4fd35c555 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/images/rm.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/images/rm.go @@ -4,19 +4,19 @@ import ( "context" "net/http" - "github.com/containers/podman/v4/pkg/api/handlers/types" - "github.com/containers/podman/v4/pkg/bindings" - "github.com/containers/podman/v4/pkg/domain/entities" - "github.com/containers/podman/v4/pkg/errorhandling" + handlersTypes "github.com/containers/podman/v5/pkg/api/handlers/types" + "github.com/containers/podman/v5/pkg/bindings" + "github.com/containers/podman/v5/pkg/domain/entities/types" + "github.com/containers/podman/v5/pkg/errorhandling" ) // Remove removes one or more images from the local storage. Use optional force option to remove an // image, even if it's used by containers. -func Remove(ctx context.Context, images []string, options *RemoveOptions) (*entities.ImageRemoveReport, []error) { +func Remove(ctx context.Context, images []string, options *RemoveOptions) (*types.ImageRemoveReport, []error) { if options == nil { options = new(RemoveOptions) } - var report types.LibpodImagesRemoveReport + var report handlersTypes.LibpodImagesRemoveReport conn, err := bindings.GetClient(ctx) if err != nil { return nil, []error{err} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types.go b/vendor/github.com/containers/podman/v5/pkg/bindings/images/types.go similarity index 95% rename from vendor/github.com/containers/podman/v4/pkg/bindings/images/types.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/images/types.go index e5c58df00..7621f8cd7 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/images/types.go @@ -3,7 +3,7 @@ package images import ( "io" - buildahDefine "github.com/containers/buildah/define" + "github.com/containers/podman/v5/pkg/domain/entities/types" ) // RemoveOptions are optional options for image removal @@ -162,6 +162,10 @@ type PushOptions struct { SkipTLSVerify *bool `schema:"-"` // RemoveSignatures Discard any pre-existing signatures in the image. RemoveSignatures *bool + // Retry number of times to retry push in case of failure + Retry *uint + // RetryDelay between retries in case of push failures + RetryDelay *string // Username for authenticating against the registry. Username *string `schema:"-"` // Quiet can be specified to suppress progress when pushing. @@ -217,6 +221,10 @@ type PullOptions struct { // Quiet can be specified to suppress pull progress when pulling. Ignored // for remote calls. Quiet *bool + // Retry number of times to retry pull in case of failure + Retry *uint + // RetryDelay between retries in case of pull failures + RetryDelay *string // SkipTLSVerify to skip HTTPS and certificate verification. SkipTLSVerify *bool `schema:"-"` // Username for authenticating against the registry. @@ -226,9 +234,7 @@ type PullOptions struct { } // BuildOptions are optional options for building images -type BuildOptions struct { - buildahDefine.BuildOptions -} +type BuildOptions = types.BuildOptions // ExistsOptions are optional options for checking if an image exists // diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_diff_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/images/types_diff_options.go similarity index 94% rename from vendor/github.com/containers/podman/v4/pkg/bindings/images/types_diff_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/images/types_diff_options.go index 3a3032155..18bcf9526 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_diff_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/images/types_diff_options.go @@ -4,7 +4,7 @@ package images import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_exists_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/images/types_exists_options.go similarity index 85% rename from vendor/github.com/containers/podman/v4/pkg/bindings/images/types_exists_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/images/types_exists_options.go index 7521d85f3..e31138d43 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_exists_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/images/types_exists_options.go @@ -4,7 +4,7 @@ package images import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_export_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/images/types_export_options.go similarity index 95% rename from vendor/github.com/containers/podman/v4/pkg/bindings/images/types_export_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/images/types_export_options.go index 77850734c..0cc3da8c8 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_export_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/images/types_export_options.go @@ -4,7 +4,7 @@ package images import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_get_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/images/types_get_options.go similarity index 91% rename from vendor/github.com/containers/podman/v4/pkg/bindings/images/types_get_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/images/types_get_options.go index 36b62272e..84295a75f 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_get_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/images/types_get_options.go @@ -4,7 +4,7 @@ package images import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_history_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/images/types_history_options.go similarity index 86% rename from vendor/github.com/containers/podman/v4/pkg/bindings/images/types_history_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/images/types_history_options.go index a9abb482a..3dc43cc42 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_history_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/images/types_history_options.go @@ -4,7 +4,7 @@ package images import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_import_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/images/types_import_options.go similarity index 97% rename from vendor/github.com/containers/podman/v4/pkg/bindings/images/types_import_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/images/types_import_options.go index f958fe8b4..aafb8de70 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_import_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/images/types_import_options.go @@ -4,7 +4,7 @@ package images import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_list_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/images/types_list_options.go similarity index 94% rename from vendor/github.com/containers/podman/v4/pkg/bindings/images/types_list_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/images/types_list_options.go index f47cd9c75..4c378e1e8 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_list_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/images/types_list_options.go @@ -4,7 +4,7 @@ package images import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_load_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/images/types_load_options.go similarity index 91% rename from vendor/github.com/containers/podman/v4/pkg/bindings/images/types_load_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/images/types_load_options.go index 9978201ce..470e4293b 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_load_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/images/types_load_options.go @@ -4,7 +4,7 @@ package images import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_prune_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/images/types_prune_options.go similarity index 95% rename from vendor/github.com/containers/podman/v4/pkg/bindings/images/types_prune_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/images/types_prune_options.go index eddf1ae45..ead74e7ea 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_prune_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/images/types_prune_options.go @@ -4,7 +4,7 @@ package images import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_pull_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/images/types_pull_options.go similarity index 84% rename from vendor/github.com/containers/podman/v4/pkg/bindings/images/types_pull_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/images/types_pull_options.go index c1a88fd9e..7a10a5c39 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_pull_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/images/types_pull_options.go @@ -5,7 +5,7 @@ import ( "io" "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set @@ -138,6 +138,36 @@ func (o *PullOptions) GetQuiet() bool { return *o.Quiet } +// WithRetry set field Retry to given value +func (o *PullOptions) WithRetry(value uint) *PullOptions { + o.Retry = &value + return o +} + +// GetRetry returns value of field Retry +func (o *PullOptions) GetRetry() uint { + if o.Retry == nil { + var z uint + return z + } + return *o.Retry +} + +// WithRetryDelay set field RetryDelay to given value +func (o *PullOptions) WithRetryDelay(value string) *PullOptions { + o.RetryDelay = &value + return o +} + +// GetRetryDelay returns value of field RetryDelay +func (o *PullOptions) GetRetryDelay() string { + if o.RetryDelay == nil { + var z string + return z + } + return *o.RetryDelay +} + // WithSkipTLSVerify set field SkipTLSVerify to given value func (o *PullOptions) WithSkipTLSVerify(value bool) *PullOptions { o.SkipTLSVerify = &value diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_push_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/images/types_push_options.go similarity index 89% rename from vendor/github.com/containers/podman/v4/pkg/bindings/images/types_push_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/images/types_push_options.go index 770ffffd1..686636cdb 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_push_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/images/types_push_options.go @@ -5,7 +5,7 @@ import ( "io" "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set @@ -198,6 +198,36 @@ func (o *PushOptions) GetRemoveSignatures() bool { return *o.RemoveSignatures } +// WithRetry set field Retry to given value +func (o *PushOptions) WithRetry(value uint) *PushOptions { + o.Retry = &value + return o +} + +// GetRetry returns value of field Retry +func (o *PushOptions) GetRetry() uint { + if o.Retry == nil { + var z uint + return z + } + return *o.Retry +} + +// WithRetryDelay set field RetryDelay to given value +func (o *PushOptions) WithRetryDelay(value string) *PushOptions { + o.RetryDelay = &value + return o +} + +// GetRetryDelay returns value of field RetryDelay +func (o *PushOptions) GetRetryDelay() string { + if o.RetryDelay == nil { + var z string + return z + } + return *o.RetryDelay +} + // WithUsername set field Username to given value func (o *PushOptions) WithUsername(value string) *PushOptions { o.Username = &value diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_remove_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/images/types_remove_options.go similarity index 96% rename from vendor/github.com/containers/podman/v4/pkg/bindings/images/types_remove_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/images/types_remove_options.go index 8972ac93c..68d24149c 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_remove_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/images/types_remove_options.go @@ -4,7 +4,7 @@ package images import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_scp_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/images/types_scp_options.go similarity index 74% rename from vendor/github.com/containers/podman/v4/pkg/bindings/images/types_scp_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/images/types_scp_options.go index 5a1178cb1..e92b3dc03 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_scp_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/images/types_scp_options.go @@ -3,7 +3,7 @@ package images import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // ToParams formats struct fields to be passed to API service diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_search_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/images/types_search_options.go similarity index 97% rename from vendor/github.com/containers/podman/v4/pkg/bindings/images/types_search_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/images/types_search_options.go index cc28da57f..7da357ba5 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_search_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/images/types_search_options.go @@ -4,7 +4,7 @@ package images import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_tag_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/images/types_tag_options.go similarity index 85% rename from vendor/github.com/containers/podman/v4/pkg/bindings/images/types_tag_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/images/types_tag_options.go index 30cef4dd2..39582afd6 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_tag_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/images/types_tag_options.go @@ -4,7 +4,7 @@ package images import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_tree_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/images/types_tree_options.go similarity index 92% rename from vendor/github.com/containers/podman/v4/pkg/bindings/images/types_tree_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/images/types_tree_options.go index 57eecf959..f87517cb0 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_tree_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/images/types_tree_options.go @@ -4,7 +4,7 @@ package images import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_untag_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/images/types_untag_options.go similarity index 85% rename from vendor/github.com/containers/podman/v4/pkg/bindings/images/types_untag_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/images/types_untag_options.go index eba687f3d..4fdb689ad 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_untag_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/images/types_untag_options.go @@ -4,7 +4,7 @@ package images import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/internal/util/util.go b/vendor/github.com/containers/podman/v5/pkg/bindings/internal/util/util.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/bindings/internal/util/util.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/internal/util/util.go diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/kube/kube.go b/vendor/github.com/containers/podman/v5/pkg/bindings/kube/kube.go similarity index 86% rename from vendor/github.com/containers/podman/v4/pkg/bindings/kube/kube.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/kube/kube.go index fefbe1a2f..c8e4bb664 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/kube/kube.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/kube/kube.go @@ -9,14 +9,14 @@ import ( "strconv" "github.com/containers/image/v5/types" - "github.com/containers/podman/v4/pkg/auth" - "github.com/containers/podman/v4/pkg/bindings" - "github.com/containers/podman/v4/pkg/bindings/generate" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/auth" + "github.com/containers/podman/v5/pkg/bindings" + "github.com/containers/podman/v5/pkg/bindings/generate" + entitiesTypes "github.com/containers/podman/v5/pkg/domain/entities/types" "github.com/sirupsen/logrus" ) -func Play(ctx context.Context, path string, options *PlayOptions) (*entities.KubePlayReport, error) { +func Play(ctx context.Context, path string, options *PlayOptions) (*entitiesTypes.KubePlayReport, error) { f, err := os.Open(path) if err != nil { return nil, err @@ -26,8 +26,8 @@ func Play(ctx context.Context, path string, options *PlayOptions) (*entities.Kub return PlayWithBody(ctx, f, options) } -func PlayWithBody(ctx context.Context, body io.Reader, options *PlayOptions) (*entities.KubePlayReport, error) { - var report entities.KubePlayReport +func PlayWithBody(ctx context.Context, body io.Reader, options *PlayOptions) (*entitiesTypes.KubePlayReport, error) { + var report entitiesTypes.KubePlayReport if options == nil { options = new(PlayOptions) } @@ -88,7 +88,7 @@ func PlayWithBody(ctx context.Context, body io.Reader, options *PlayOptions) (*e return &report, nil } -func Down(ctx context.Context, path string, options DownOptions) (*entities.KubePlayReport, error) { +func Down(ctx context.Context, path string, options DownOptions) (*entitiesTypes.KubePlayReport, error) { f, err := os.Open(path) if err != nil { return nil, err @@ -102,8 +102,8 @@ func Down(ctx context.Context, path string, options DownOptions) (*entities.Kube return DownWithBody(ctx, f, options) } -func DownWithBody(ctx context.Context, body io.Reader, options DownOptions) (*entities.KubePlayReport, error) { - var report entities.KubePlayReport +func DownWithBody(ctx context.Context, body io.Reader, options DownOptions) (*entitiesTypes.KubePlayReport, error) { + var report entitiesTypes.KubePlayReport conn, err := bindings.GetClient(ctx) if err != nil { return nil, err @@ -125,7 +125,7 @@ func DownWithBody(ctx context.Context, body io.Reader, options DownOptions) (*en } // Kube generate Kubernetes YAML (v1 specification) -func Generate(ctx context.Context, nameOrIDs []string, options generate.KubeOptions) (*entities.GenerateKubeReport, error) { +func Generate(ctx context.Context, nameOrIDs []string, options generate.KubeOptions) (*entitiesTypes.GenerateKubeReport, error) { return generate.Kube(ctx, nameOrIDs, &options) } diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/kube/types.go b/vendor/github.com/containers/podman/v5/pkg/bindings/kube/types.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/bindings/kube/types.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/kube/types.go diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/kube/types_apply_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/kube/types_apply_options.go similarity index 96% rename from vendor/github.com/containers/podman/v4/pkg/bindings/kube/types_apply_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/kube/types_apply_options.go index 9fb49594d..9ec4a8215 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/kube/types_apply_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/kube/types_apply_options.go @@ -4,7 +4,7 @@ package kube import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/kube/types_down_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/kube/types_down_options.go similarity index 91% rename from vendor/github.com/containers/podman/v4/pkg/bindings/kube/types_down_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/kube/types_down_options.go index 99ce3abe4..58b85de0d 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/kube/types_down_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/kube/types_down_options.go @@ -4,7 +4,7 @@ package kube import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/kube/types_play_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/kube/types_play_options.go similarity index 99% rename from vendor/github.com/containers/podman/v4/pkg/bindings/kube/types_play_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/kube/types_play_options.go index 6082799a6..f9e160e14 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/kube/types_play_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/kube/types_play_options.go @@ -5,7 +5,7 @@ import ( "net" "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/manifests/manifests.go b/vendor/github.com/containers/podman/v5/pkg/bindings/manifests/manifests.go similarity index 69% rename from vendor/github.com/containers/podman/v4/pkg/bindings/manifests/manifests.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/manifests/manifests.go index ec3affce0..c77dceaf1 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/manifests/manifests.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/manifests/manifests.go @@ -6,20 +6,26 @@ import ( "errors" "fmt" "io" + "mime/multipart" "net/http" + "net/textproto" "os" + "path/filepath" "strconv" "strings" + "sync" "github.com/containers/common/libimage/define" "github.com/containers/image/v5/manifest" imageTypes "github.com/containers/image/v5/types" - "github.com/containers/podman/v4/pkg/auth" - "github.com/containers/podman/v4/pkg/bindings" - "github.com/containers/podman/v4/pkg/bindings/images" - "github.com/containers/podman/v4/pkg/domain/entities" - "github.com/containers/podman/v4/pkg/errorhandling" + "github.com/containers/podman/v5/pkg/auth" + "github.com/containers/podman/v5/pkg/bindings" + "github.com/containers/podman/v5/pkg/bindings/images" + entitiesTypes "github.com/containers/podman/v5/pkg/domain/entities/types" + "github.com/containers/podman/v5/pkg/errorhandling" + dockerAPI "github.com/docker/docker/api/types" jsoniter "github.com/json-iterator/go" + "golang.org/x/exp/slices" ) // Create creates a manifest for the given name. Optional images to be associated with @@ -27,7 +33,7 @@ import ( // of a list if the name provided is a manifest list. The ID of the new manifest list // is returned as a string. func Create(ctx context.Context, name string, images []string, options *CreateOptions) (string, error) { - var idr entities.IDResponse + var idr dockerAPI.IDResponse if options == nil { options = new(CreateOptions) } @@ -159,7 +165,7 @@ func Add(ctx context.Context, name string, options *AddOptions) (string, error) Features: options.Features, Images: options.Images, OS: options.OS, - OSFeatures: nil, + OSFeatures: options.OSFeatures, OSVersion: options.OSVersion, Variant: options.Variant, Username: options.Username, @@ -171,6 +177,37 @@ func Add(ctx context.Context, name string, options *AddOptions) (string, error) return Modify(ctx, name, options.Images, &optionsv4) } +// AddArtifact creates an artifact manifest and adds it to a given manifest +// list. Additional options for the manifest can also be specified. The ID of +// the new manifest list is returned as a string +func AddArtifact(ctx context.Context, name string, options *AddArtifactOptions) (string, error) { + if options == nil { + options = new(AddArtifactOptions) + } + optionsv4 := ModifyOptions{ + Annotations: options.Annotation, + Arch: options.Arch, + Features: options.Features, + OS: options.OS, + OSFeatures: options.OSFeatures, + OSVersion: options.OSVersion, + Variant: options.Variant, + + ArtifactType: options.Type, + ArtifactConfigType: options.ConfigType, + ArtifactLayerType: options.LayerType, + ArtifactConfig: options.Config, + ArtifactExcludeTitles: options.ExcludeTitles, + ArtifactSubject: options.Subject, + ArtifactAnnotations: options.Annotations, + } + if len(options.Files) > 0 { + optionsv4.WithArtifactFiles(options.Files) + } + optionsv4.WithOperation("update") + return Modify(ctx, name, nil, &optionsv4) +} + // Remove deletes a manifest entry from a manifest list. Both name and the digest to be // removed are mandatory inputs. The ID of the new manifest list is returned as a string. func Remove(ctx context.Context, name, digest string, _ *RemoveOptions) (string, error) { @@ -179,8 +216,8 @@ func Remove(ctx context.Context, name, digest string, _ *RemoveOptions) (string, } // Delete removes specified manifest from local storage. -func Delete(ctx context.Context, name string) (*entities.ManifestRemoveReport, error) { - var report entities.ManifestRemoveReport +func Delete(ctx context.Context, name string) (*entitiesTypes.ManifestRemoveReport, error) { + var report entitiesTypes.ManifestRemoveReport conn, err := bindings.GetClient(ctx) if err != nil { return nil, err @@ -250,7 +287,7 @@ func Push(ctx context.Context, name, destination string, options *images.PushOpt dec := json.NewDecoder(response.Body) for { - var report entities.ManifestPushReport + var report entitiesTypes.ManifestPushReport if err := dec.Decode(&report); err != nil { return "", err } @@ -283,6 +320,16 @@ func Modify(ctx context.Context, name string, images []string, options *ModifyOp } options.WithImages(images) + var artifactFiles, artifactBaseNames []string + if options.ArtifactFiles != nil && len(*options.ArtifactFiles) > 0 { + artifactFiles = slices.Clone(*options.ArtifactFiles) + artifactBaseNames = make([]string, 0, len(artifactFiles)) + for _, filename := range artifactFiles { + artifactBaseNames = append(artifactBaseNames, filepath.Base(filename)) + } + options.ArtifactFiles = &artifactBaseNames + } + conn, err := bindings.GetClient(ctx) if err != nil { return "", err @@ -291,12 +338,81 @@ func Modify(ctx context.Context, name string, images []string, options *ModifyOp if err != nil { return "", err } - reader := strings.NewReader(opts) + reader := io.Reader(strings.NewReader(opts)) + if options.Body != nil { + reader = io.MultiReader(reader, *options.Body) + } + var artifactContentType string + var artifactWriterGroup sync.WaitGroup + var artifactWriterError error + if len(artifactFiles) > 0 { + // get ready to upload the passed-in files + bodyReader, bodyWriter := io.Pipe() + defer bodyReader.Close() + requestBodyReader := reader + reader = bodyReader + // upload the files in another goroutine + writer := multipart.NewWriter(bodyWriter) + artifactContentType = writer.FormDataContentType() + artifactWriterGroup.Add(1) + go func() { + defer bodyWriter.Close() + defer writer.Close() + // start with the body we would have uploaded if we weren't + // attaching artifacts + headers := textproto.MIMEHeader{ + "Content-Type": []string{"application/json"}, + } + requestPartWriter, err := writer.CreatePart(headers) + if err != nil { + artifactWriterError = fmt.Errorf("creating form part for request: %v", err) + return + } + if _, err := io.Copy(requestPartWriter, requestBodyReader); err != nil { + artifactWriterError = fmt.Errorf("uploading request as form part: %v", err) + return + } + // now walk the list of files we're attaching + for _, file := range artifactFiles { + if err := func() error { + f, err := os.Open(file) + if err != nil { + return err + } + defer f.Close() + fileBase := filepath.Base(file) + formFile, err := writer.CreateFormFile(fileBase, fileBase) + if err != nil { + return err + } + st, err := f.Stat() + if err != nil { + return err + } + // upload the file contents + n, err := io.Copy(formFile, f) + if err != nil { + return fmt.Errorf("uploading contents of artifact file %s: %w", filepath.Base(file), err) + } + if n != st.Size() { + return fmt.Errorf("short write while uploading contents of artifact file %s: %d != %d", filepath.Base(file), n, st.Size()) + } + return nil + }(); err != nil { + artifactWriterError = err + break + } + } + }() + } header, err := auth.MakeXRegistryAuthHeader(&imageTypes.SystemContext{AuthFilePath: options.GetAuthfile()}, options.GetUsername(), options.GetPassword()) if err != nil { return "", err } + if artifactContentType != "" { + header["Content-Type"] = []string{artifactContentType} + } params, err := options.ToParams() if err != nil { @@ -314,13 +430,18 @@ func Modify(ctx context.Context, name string, images []string, options *ModifyOp } defer response.Body.Close() + artifactWriterGroup.Wait() + if artifactWriterError != nil { + return "", fmt.Errorf("uploading artifacts: %w", err) + } + data, err := io.ReadAll(response.Body) if err != nil { return "", fmt.Errorf("unable to process API response: %w", err) } if response.IsSuccess() || response.IsRedirection() { - var report entities.ManifestModifyReport + var report entitiesTypes.ManifestModifyReport if err = jsoniter.Unmarshal(data, &report); err != nil { return "", fmt.Errorf("unable to decode API response: %w", err) } diff --git a/vendor/github.com/containers/podman/v5/pkg/bindings/manifests/types.go b/vendor/github.com/containers/podman/v5/pkg/bindings/manifests/types.go new file mode 100644 index 000000000..aae36c9a6 --- /dev/null +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/manifests/types.go @@ -0,0 +1,113 @@ +package manifests + +import "io" + +// InspectOptions are optional options for inspecting manifests +// +//go:generate go run ../generator/generator.go InspectOptions +type InspectOptions struct { + // Authfile - path to an authentication file. + Authfile *string + // SkipTLSVerify - skip https and certificate validation when + // contacting container registries. + SkipTLSVerify *bool +} + +// CreateOptions are optional options for creating manifests +// +//go:generate go run ../generator/generator.go CreateOptions +type CreateOptions struct { + All *bool + Amend *bool + Annotation map[string]string +} + +// ExistsOptions are optional options for checking +// if a manifest list exists +// +//go:generate go run ../generator/generator.go ExistsOptions +type ExistsOptions struct { +} + +// AddOptions are optional options for adding manifest lists +// +//go:generate go run ../generator/generator.go AddOptions +type AddOptions struct { + All *bool + + Annotation map[string]string + Arch *string + Features []string + OS *string + OSVersion *string + OSFeatures []string + Variant *string + + Images []string + Authfile *string + Password *string + Username *string + SkipTLSVerify *bool `schema:"-"` +} + +// AddArtifactOptions are optional options for adding artifact manifests +// +//go:generate go run ../generator/generator.go AddArtifactOptions +type AddArtifactOptions struct { + Annotation map[string]string + Arch *string + Features []string + OS *string + OSVersion *string + OSFeatures []string + Variant *string + + Type **string `json:"artifact_type,omitempty"` + ConfigType *string `json:"artifact_config_type,omitempty"` + Config *string `json:"artifact_config,omitempty"` + LayerType *string `json:"artifact_layer_type,omitempty"` + ExcludeTitles *bool `json:"artifact_exclude_titles,omitempty"` + Subject *string `json:"artifact_subject,omitempty"` + Annotations map[string]string `json:"artifact_annotations,omitempty"` + Files []string `json:"artifact_files,omitempty"` +} + +// RemoveOptions are optional options for removing manifest lists +// +//go:generate go run ../generator/generator.go RemoveOptions +type RemoveOptions struct { +} + +// ModifyOptions are optional options for modifying manifest lists +// +//go:generate go run ../generator/generator.go ModifyOptions +type ModifyOptions struct { + // Operation values are "update", "remove" and "annotate". This allows the service to + // efficiently perform each update on a manifest list. + Operation *string + All *bool // All when true, operate on all images in a manifest list that may be included in Images + + Annotations map[string]string // Annotations to add to the entries for Images in the manifest list + Arch *string // Arch overrides the architecture for the image + Features []string // Feature list for the image + OS *string // OS overrides the operating system for the image + OSFeatures []string `json:"os_features" schema:"os_features"` // OSFeatures overrides the OS features for the image + OSVersion *string `json:"os_version" schema:"os_version"` // OSVersion overrides the operating system version for the image + Variant *string // Variant overrides the architecture variant for the image + + Images []string // Images is an optional list of images to add/remove to/from manifest list depending on operation + Authfile *string + Password *string + Username *string + SkipTLSVerify *bool `schema:"-"` + + ArtifactType **string `json:"artifact_type"` // the ArtifactType in an artifact manifest being created + ArtifactConfigType *string `json:"artifact_config_type"` // the config.MediaType in an artifact manifest being created + ArtifactConfig *string `json:"artifact_config"` // the config.Data in an artifact manifest being created + ArtifactLayerType *string `json:"artifact_layer_type"` // the MediaType for each layer in an artifact manifest being created + ArtifactExcludeTitles *bool `json:"artifact_exclude_titles"` // whether or not to include title annotations for each layer in an artifact manifest being created + ArtifactSubject *string `json:"artifact_subject"` // subject to set in an artifact manifest being created + ArtifactAnnotations map[string]string `json:"artifact_annotations"` // annotations to add to an artifact manifest being created + ArtifactFiles *[]string `json:"artifact_files"` // an optional list of files to add to a new artifact manifest in the manifest list + Body *io.Reader `json:"-" schema:"-"` +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/manifests/types_add_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/manifests/types_add_options.go similarity index 91% rename from vendor/github.com/containers/podman/v4/pkg/bindings/manifests/types_add_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/manifests/types_add_options.go index 5ba1cc5fa..47892d793 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/manifests/types_add_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/manifests/types_add_options.go @@ -4,7 +4,7 @@ package manifests import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set @@ -77,21 +77,6 @@ func (o *AddOptions) GetFeatures() []string { return o.Features } -// WithImages set field Images to given value -func (o *AddOptions) WithImages(value []string) *AddOptions { - o.Images = value - return o -} - -// GetImages returns value of field Images -func (o *AddOptions) GetImages() []string { - if o.Images == nil { - var z []string - return z - } - return o.Images -} - // WithOS set field OS to given value func (o *AddOptions) WithOS(value string) *AddOptions { o.OS = &value @@ -122,6 +107,21 @@ func (o *AddOptions) GetOSVersion() string { return *o.OSVersion } +// WithOSFeatures set field OSFeatures to given value +func (o *AddOptions) WithOSFeatures(value []string) *AddOptions { + o.OSFeatures = value + return o +} + +// GetOSFeatures returns value of field OSFeatures +func (o *AddOptions) GetOSFeatures() []string { + if o.OSFeatures == nil { + var z []string + return z + } + return o.OSFeatures +} + // WithVariant set field Variant to given value func (o *AddOptions) WithVariant(value string) *AddOptions { o.Variant = &value @@ -137,6 +137,21 @@ func (o *AddOptions) GetVariant() string { return *o.Variant } +// WithImages set field Images to given value +func (o *AddOptions) WithImages(value []string) *AddOptions { + o.Images = value + return o +} + +// GetImages returns value of field Images +func (o *AddOptions) GetImages() []string { + if o.Images == nil { + var z []string + return z + } + return o.Images +} + // WithAuthfile set field Authfile to given value func (o *AddOptions) WithAuthfile(value string) *AddOptions { o.Authfile = &value diff --git a/vendor/github.com/containers/podman/v5/pkg/bindings/manifests/types_addartifact_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/manifests/types_addartifact_options.go new file mode 100644 index 000000000..3b1c2804e --- /dev/null +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/manifests/types_addartifact_options.go @@ -0,0 +1,243 @@ +// Code generated by go generate; DO NOT EDIT. +package manifests + +import ( + "net/url" + + "github.com/containers/podman/v5/pkg/bindings/internal/util" +) + +// Changed returns true if named field has been set +func (o *AddArtifactOptions) Changed(fieldName string) bool { + return util.Changed(o, fieldName) +} + +// ToParams formats struct fields to be passed to API service +func (o *AddArtifactOptions) ToParams() (url.Values, error) { + return util.ToParams(o) +} + +// WithAnnotation set field Annotation to given value +func (o *AddArtifactOptions) WithAnnotation(value map[string]string) *AddArtifactOptions { + o.Annotation = value + return o +} + +// GetAnnotation returns value of field Annotation +func (o *AddArtifactOptions) GetAnnotation() map[string]string { + if o.Annotation == nil { + var z map[string]string + return z + } + return o.Annotation +} + +// WithArch set field Arch to given value +func (o *AddArtifactOptions) WithArch(value string) *AddArtifactOptions { + o.Arch = &value + return o +} + +// GetArch returns value of field Arch +func (o *AddArtifactOptions) GetArch() string { + if o.Arch == nil { + var z string + return z + } + return *o.Arch +} + +// WithFeatures set field Features to given value +func (o *AddArtifactOptions) WithFeatures(value []string) *AddArtifactOptions { + o.Features = value + return o +} + +// GetFeatures returns value of field Features +func (o *AddArtifactOptions) GetFeatures() []string { + if o.Features == nil { + var z []string + return z + } + return o.Features +} + +// WithOS set field OS to given value +func (o *AddArtifactOptions) WithOS(value string) *AddArtifactOptions { + o.OS = &value + return o +} + +// GetOS returns value of field OS +func (o *AddArtifactOptions) GetOS() string { + if o.OS == nil { + var z string + return z + } + return *o.OS +} + +// WithOSVersion set field OSVersion to given value +func (o *AddArtifactOptions) WithOSVersion(value string) *AddArtifactOptions { + o.OSVersion = &value + return o +} + +// GetOSVersion returns value of field OSVersion +func (o *AddArtifactOptions) GetOSVersion() string { + if o.OSVersion == nil { + var z string + return z + } + return *o.OSVersion +} + +// WithOSFeatures set field OSFeatures to given value +func (o *AddArtifactOptions) WithOSFeatures(value []string) *AddArtifactOptions { + o.OSFeatures = value + return o +} + +// GetOSFeatures returns value of field OSFeatures +func (o *AddArtifactOptions) GetOSFeatures() []string { + if o.OSFeatures == nil { + var z []string + return z + } + return o.OSFeatures +} + +// WithVariant set field Variant to given value +func (o *AddArtifactOptions) WithVariant(value string) *AddArtifactOptions { + o.Variant = &value + return o +} + +// GetVariant returns value of field Variant +func (o *AddArtifactOptions) GetVariant() string { + if o.Variant == nil { + var z string + return z + } + return *o.Variant +} + +// WithType set field Type to given value +func (o *AddArtifactOptions) WithType(value *string) *AddArtifactOptions { + o.Type = &value + return o +} + +// GetType returns value of field Type +func (o *AddArtifactOptions) GetType() *string { + if o.Type == nil { + var z *string + return z + } + return *o.Type +} + +// WithConfigType set field ConfigType to given value +func (o *AddArtifactOptions) WithConfigType(value string) *AddArtifactOptions { + o.ConfigType = &value + return o +} + +// GetConfigType returns value of field ConfigType +func (o *AddArtifactOptions) GetConfigType() string { + if o.ConfigType == nil { + var z string + return z + } + return *o.ConfigType +} + +// WithConfig set field Config to given value +func (o *AddArtifactOptions) WithConfig(value string) *AddArtifactOptions { + o.Config = &value + return o +} + +// GetConfig returns value of field Config +func (o *AddArtifactOptions) GetConfig() string { + if o.Config == nil { + var z string + return z + } + return *o.Config +} + +// WithLayerType set field LayerType to given value +func (o *AddArtifactOptions) WithLayerType(value string) *AddArtifactOptions { + o.LayerType = &value + return o +} + +// GetLayerType returns value of field LayerType +func (o *AddArtifactOptions) GetLayerType() string { + if o.LayerType == nil { + var z string + return z + } + return *o.LayerType +} + +// WithExcludeTitles set field ExcludeTitles to given value +func (o *AddArtifactOptions) WithExcludeTitles(value bool) *AddArtifactOptions { + o.ExcludeTitles = &value + return o +} + +// GetExcludeTitles returns value of field ExcludeTitles +func (o *AddArtifactOptions) GetExcludeTitles() bool { + if o.ExcludeTitles == nil { + var z bool + return z + } + return *o.ExcludeTitles +} + +// WithSubject set field Subject to given value +func (o *AddArtifactOptions) WithSubject(value string) *AddArtifactOptions { + o.Subject = &value + return o +} + +// GetSubject returns value of field Subject +func (o *AddArtifactOptions) GetSubject() string { + if o.Subject == nil { + var z string + return z + } + return *o.Subject +} + +// WithAnnotations set field Annotations to given value +func (o *AddArtifactOptions) WithAnnotations(value map[string]string) *AddArtifactOptions { + o.Annotations = value + return o +} + +// GetAnnotations returns value of field Annotations +func (o *AddArtifactOptions) GetAnnotations() map[string]string { + if o.Annotations == nil { + var z map[string]string + return z + } + return o.Annotations +} + +// WithFiles set field Files to given value +func (o *AddArtifactOptions) WithFiles(value []string) *AddArtifactOptions { + o.Files = value + return o +} + +// GetFiles returns value of field Files +func (o *AddArtifactOptions) GetFiles() []string { + if o.Files == nil { + var z []string + return z + } + return o.Files +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/manifests/types_create_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/manifests/types_create_options.go similarity index 68% rename from vendor/github.com/containers/podman/v4/pkg/bindings/manifests/types_create_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/manifests/types_create_options.go index 09942c00a..758010716 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/manifests/types_create_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/manifests/types_create_options.go @@ -4,7 +4,7 @@ package manifests import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set @@ -46,3 +46,18 @@ func (o *CreateOptions) GetAmend() bool { } return *o.Amend } + +// WithAnnotation set field Annotation to given value +func (o *CreateOptions) WithAnnotation(value map[string]string) *CreateOptions { + o.Annotation = value + return o +} + +// GetAnnotation returns value of field Annotation +func (o *CreateOptions) GetAnnotation() map[string]string { + if o.Annotation == nil { + var z map[string]string + return z + } + return o.Annotation +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/manifests/types_exists_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/manifests/types_exists_options.go similarity index 86% rename from vendor/github.com/containers/podman/v4/pkg/bindings/manifests/types_exists_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/manifests/types_exists_options.go index 287710ce9..82ace8c3c 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/manifests/types_exists_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/manifests/types_exists_options.go @@ -4,7 +4,7 @@ package manifests import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/manifests/types_inspect_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/manifests/types_inspect_options.go similarity index 94% rename from vendor/github.com/containers/podman/v4/pkg/bindings/manifests/types_inspect_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/manifests/types_inspect_options.go index 791dbf3e6..062851374 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/manifests/types_inspect_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/manifests/types_inspect_options.go @@ -4,7 +4,7 @@ package manifests import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/manifests/types_modify_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/manifests/types_modify_options.go similarity index 50% rename from vendor/github.com/containers/podman/v4/pkg/bindings/manifests/types_modify_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/manifests/types_modify_options.go index ab00cb2c5..1957b8027 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/manifests/types_modify_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/manifests/types_modify_options.go @@ -2,9 +2,10 @@ package manifests import ( + "io" "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set @@ -47,13 +48,13 @@ func (o *ModifyOptions) GetAll() bool { return *o.All } -// WithAnnotations set annotations to add to manifest list +// WithAnnotations set annotations to add to the entries for Images in the manifest list func (o *ModifyOptions) WithAnnotations(value map[string]string) *ModifyOptions { o.Annotations = value return o } -// GetAnnotations returns value of annotations to add to manifest list +// GetAnnotations returns value of annotations to add to the entries for Images in the manifest list func (o *ModifyOptions) GetAnnotations() map[string]string { if o.Annotations == nil { var z map[string]string @@ -92,21 +93,6 @@ func (o *ModifyOptions) GetFeatures() []string { return o.Features } -// WithImages set images is an optional list of images to add/remove to/from manifest list depending on operation -func (o *ModifyOptions) WithImages(value []string) *ModifyOptions { - o.Images = value - return o -} - -// GetImages returns value of images is an optional list of images to add/remove to/from manifest list depending on operation -func (o *ModifyOptions) GetImages() []string { - if o.Images == nil { - var z []string - return z - } - return o.Images -} - // WithOS set oS overrides the operating system for the image func (o *ModifyOptions) WithOS(value string) *ModifyOptions { o.OS = &value @@ -122,13 +108,13 @@ func (o *ModifyOptions) GetOS() string { return *o.OS } -// WithOSFeatures set field OSFeatures to given value +// WithOSFeatures set oSFeatures overrides the OS features for the image func (o *ModifyOptions) WithOSFeatures(value []string) *ModifyOptions { o.OSFeatures = value return o } -// GetOSFeatures returns value of field OSFeatures +// GetOSFeatures returns value of oSFeatures overrides the OS features for the image func (o *ModifyOptions) GetOSFeatures() []string { if o.OSFeatures == nil { var z []string @@ -137,13 +123,13 @@ func (o *ModifyOptions) GetOSFeatures() []string { return o.OSFeatures } -// WithOSVersion set field OSVersion to given value +// WithOSVersion set oSVersion overrides the operating system version for the image func (o *ModifyOptions) WithOSVersion(value string) *ModifyOptions { o.OSVersion = &value return o } -// GetOSVersion returns value of field OSVersion +// GetOSVersion returns value of oSVersion overrides the operating system version for the image func (o *ModifyOptions) GetOSVersion() string { if o.OSVersion == nil { var z string @@ -152,13 +138,13 @@ func (o *ModifyOptions) GetOSVersion() string { return *o.OSVersion } -// WithVariant set variant overrides the operating system variant for the image +// WithVariant set variant overrides the architecture variant for the image func (o *ModifyOptions) WithVariant(value string) *ModifyOptions { o.Variant = &value return o } -// GetVariant returns value of variant overrides the operating system variant for the image +// GetVariant returns value of variant overrides the architecture variant for the image func (o *ModifyOptions) GetVariant() string { if o.Variant == nil { var z string @@ -167,6 +153,21 @@ func (o *ModifyOptions) GetVariant() string { return *o.Variant } +// WithImages set images is an optional list of images to add/remove to/from manifest list depending on operation +func (o *ModifyOptions) WithImages(value []string) *ModifyOptions { + o.Images = value + return o +} + +// GetImages returns value of images is an optional list of images to add/remove to/from manifest list depending on operation +func (o *ModifyOptions) GetImages() []string { + if o.Images == nil { + var z []string + return z + } + return o.Images +} + // WithAuthfile set field Authfile to given value func (o *ModifyOptions) WithAuthfile(value string) *ModifyOptions { o.Authfile = &value @@ -226,3 +227,138 @@ func (o *ModifyOptions) GetSkipTLSVerify() bool { } return *o.SkipTLSVerify } + +// WithArtifactType set the ArtifactType in an artifact manifest being created +func (o *ModifyOptions) WithArtifactType(value *string) *ModifyOptions { + o.ArtifactType = &value + return o +} + +// GetArtifactType returns value of the ArtifactType in an artifact manifest being created +func (o *ModifyOptions) GetArtifactType() *string { + if o.ArtifactType == nil { + var z *string + return z + } + return *o.ArtifactType +} + +// WithArtifactConfigType set the config.MediaType in an artifact manifest being created +func (o *ModifyOptions) WithArtifactConfigType(value string) *ModifyOptions { + o.ArtifactConfigType = &value + return o +} + +// GetArtifactConfigType returns value of the config.MediaType in an artifact manifest being created +func (o *ModifyOptions) GetArtifactConfigType() string { + if o.ArtifactConfigType == nil { + var z string + return z + } + return *o.ArtifactConfigType +} + +// WithArtifactConfig set the config.Data in an artifact manifest being created +func (o *ModifyOptions) WithArtifactConfig(value string) *ModifyOptions { + o.ArtifactConfig = &value + return o +} + +// GetArtifactConfig returns value of the config.Data in an artifact manifest being created +func (o *ModifyOptions) GetArtifactConfig() string { + if o.ArtifactConfig == nil { + var z string + return z + } + return *o.ArtifactConfig +} + +// WithArtifactLayerType set the MediaType for each layer in an artifact manifest being created +func (o *ModifyOptions) WithArtifactLayerType(value string) *ModifyOptions { + o.ArtifactLayerType = &value + return o +} + +// GetArtifactLayerType returns value of the MediaType for each layer in an artifact manifest being created +func (o *ModifyOptions) GetArtifactLayerType() string { + if o.ArtifactLayerType == nil { + var z string + return z + } + return *o.ArtifactLayerType +} + +// WithArtifactExcludeTitles set whether or not to include title annotations for each layer in an artifact manifest being created +func (o *ModifyOptions) WithArtifactExcludeTitles(value bool) *ModifyOptions { + o.ArtifactExcludeTitles = &value + return o +} + +// GetArtifactExcludeTitles returns value of whether or not to include title annotations for each layer in an artifact manifest being created +func (o *ModifyOptions) GetArtifactExcludeTitles() bool { + if o.ArtifactExcludeTitles == nil { + var z bool + return z + } + return *o.ArtifactExcludeTitles +} + +// WithArtifactSubject set subject to set in an artifact manifest being created +func (o *ModifyOptions) WithArtifactSubject(value string) *ModifyOptions { + o.ArtifactSubject = &value + return o +} + +// GetArtifactSubject returns value of subject to set in an artifact manifest being created +func (o *ModifyOptions) GetArtifactSubject() string { + if o.ArtifactSubject == nil { + var z string + return z + } + return *o.ArtifactSubject +} + +// WithArtifactAnnotations set annotations to add to an artifact manifest being created +func (o *ModifyOptions) WithArtifactAnnotations(value map[string]string) *ModifyOptions { + o.ArtifactAnnotations = value + return o +} + +// GetArtifactAnnotations returns value of annotations to add to an artifact manifest being created +func (o *ModifyOptions) GetArtifactAnnotations() map[string]string { + if o.ArtifactAnnotations == nil { + var z map[string]string + return z + } + return o.ArtifactAnnotations +} + +// WithArtifactFiles set an optional list of files to add to a new artifact manifest in the manifest list +func (o *ModifyOptions) WithArtifactFiles(value []string) *ModifyOptions { + o.ArtifactFiles = &value + return o +} + +// GetArtifactFiles returns value of an optional list of files to add to a new artifact manifest in the manifest list +func (o *ModifyOptions) GetArtifactFiles() []string { + if o.ArtifactFiles == nil { + var z []string + return z + } + return *o.ArtifactFiles +} + +// WithBody set field Body to given value +func (o *ModifyOptions) WithBody(value io.Reader) *ModifyOptions { + o.Body = &value + return o +} + +// GetBody returns value of field Body +func (o *ModifyOptions) GetBody() io.Reader { + if o.Body == nil { + var z io.Reader + return z + } + return *o.Body +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/manifests/types_remove_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/manifests/types_remove_options.go similarity index 86% rename from vendor/github.com/containers/podman/v4/pkg/bindings/manifests/types_remove_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/manifests/types_remove_options.go index efadbe2c0..065e9f96b 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/manifests/types_remove_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/manifests/types_remove_options.go @@ -4,7 +4,7 @@ package manifests import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/network/network.go b/vendor/github.com/containers/podman/v5/pkg/bindings/network/network.go similarity index 92% rename from vendor/github.com/containers/podman/v4/pkg/bindings/network/network.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/network/network.go index 79d26cbfa..a0512d5ec 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/network/network.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/network/network.go @@ -7,8 +7,8 @@ import ( "strings" "github.com/containers/common/libnetwork/types" - "github.com/containers/podman/v4/pkg/bindings" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/bindings" + entitiesTypes "github.com/containers/podman/v5/pkg/domain/entities/types" jsoniter "github.com/json-iterator/go" ) @@ -70,8 +70,8 @@ func Update(ctx context.Context, netNameOrID string, options *UpdateOptions) err } // Inspect returns information about a network configuration -func Inspect(ctx context.Context, nameOrID string, _ *InspectOptions) (types.Network, error) { - var net types.Network +func Inspect(ctx context.Context, nameOrID string, _ *InspectOptions) (entitiesTypes.NetworkInspectReport, error) { + var net entitiesTypes.NetworkInspectReport conn, err := bindings.GetClient(ctx) if err != nil { return net, err @@ -88,8 +88,8 @@ func Inspect(ctx context.Context, nameOrID string, _ *InspectOptions) (types.Net // Remove deletes a defined network configuration by name. The optional force boolean // will remove all containers associated with the network when set to true. A slice // of NetworkRemoveReports are returned. -func Remove(ctx context.Context, nameOrID string, options *RemoveOptions) ([]*entities.NetworkRmReport, error) { - var reports []*entities.NetworkRmReport +func Remove(ctx context.Context, nameOrID string, options *RemoveOptions) ([]*entitiesTypes.NetworkRmReport, error) { + var reports []*entitiesTypes.NetworkRmReport if options == nil { options = new(RemoveOptions) } @@ -177,7 +177,7 @@ func Connect(ctx context.Context, networkName string, containerNameOrID string, return err } // Connect sends everything in body - connect := entities.NetworkConnectOptions{ + connect := entitiesTypes.NetworkConnectOptions{ Container: containerNameOrID, PerNetworkOptions: *options, } @@ -212,7 +212,7 @@ func Exists(ctx context.Context, nameOrID string, options *ExistsOptions) (bool, } // Prune removes unused networks -func Prune(ctx context.Context, options *PruneOptions) ([]*entities.NetworkPruneReport, error) { +func Prune(ctx context.Context, options *PruneOptions) ([]*entitiesTypes.NetworkPruneReport, error) { if options == nil { options = new(PruneOptions) } @@ -221,7 +221,7 @@ func Prune(ctx context.Context, options *PruneOptions) ([]*entities.NetworkPrune return nil, err } var ( - prunedNetworks []*entities.NetworkPruneReport + prunedNetworks []*entitiesTypes.NetworkPruneReport ) conn, err := bindings.GetClient(ctx) if err != nil { diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/network/types.go b/vendor/github.com/containers/podman/v5/pkg/bindings/network/types.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/bindings/network/types.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/network/types.go diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/network/types_create_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/network/types_create_options.go similarity index 98% rename from vendor/github.com/containers/podman/v4/pkg/bindings/network/types_create_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/network/types_create_options.go index 63551c14a..2bfa13a11 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/network/types_create_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/network/types_create_options.go @@ -5,7 +5,7 @@ import ( "net" "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/network/types_disconnect_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/network/types_disconnect_options.go similarity index 91% rename from vendor/github.com/containers/podman/v4/pkg/bindings/network/types_disconnect_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/network/types_disconnect_options.go index 0c3560d17..987138f50 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/network/types_disconnect_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/network/types_disconnect_options.go @@ -4,7 +4,7 @@ package network import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/network/types_exists_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/network/types_exists_options.go similarity index 86% rename from vendor/github.com/containers/podman/v4/pkg/bindings/network/types_exists_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/network/types_exists_options.go index 890f85db0..72c936a6d 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/network/types_exists_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/network/types_exists_options.go @@ -4,7 +4,7 @@ package network import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/network/types_extracreate_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/network/types_extracreate_options.go similarity index 92% rename from vendor/github.com/containers/podman/v4/pkg/bindings/network/types_extracreate_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/network/types_extracreate_options.go index f12adf6bc..6f74842cd 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/network/types_extracreate_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/network/types_extracreate_options.go @@ -4,7 +4,7 @@ package network import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/network/types_inspect_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/network/types_inspect_options.go similarity index 86% rename from vendor/github.com/containers/podman/v4/pkg/bindings/network/types_inspect_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/network/types_inspect_options.go index 1d5fcd4c8..d9cc782bc 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/network/types_inspect_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/network/types_inspect_options.go @@ -4,7 +4,7 @@ package network import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/network/types_list_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/network/types_list_options.go similarity index 92% rename from vendor/github.com/containers/podman/v4/pkg/bindings/network/types_list_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/network/types_list_options.go index 95bf088a8..b1f214cdd 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/network/types_list_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/network/types_list_options.go @@ -4,7 +4,7 @@ package network import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/network/types_prune_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/network/types_prune_options.go similarity index 92% rename from vendor/github.com/containers/podman/v4/pkg/bindings/network/types_prune_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/network/types_prune_options.go index 4d0fdbf43..6036648e9 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/network/types_prune_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/network/types_prune_options.go @@ -4,7 +4,7 @@ package network import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/network/types_remove_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/network/types_remove_options.go similarity index 94% rename from vendor/github.com/containers/podman/v4/pkg/bindings/network/types_remove_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/network/types_remove_options.go index 6c73a3989..426fdb1c4 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/network/types_remove_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/network/types_remove_options.go @@ -4,7 +4,7 @@ package network import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/network/types_update_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/network/types_update_options.go similarity index 94% rename from vendor/github.com/containers/podman/v4/pkg/bindings/network/types_update_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/network/types_update_options.go index 2cc9d50dc..424d06713 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/network/types_update_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/network/types_update_options.go @@ -4,7 +4,7 @@ package network import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/play/play.go b/vendor/github.com/containers/podman/v5/pkg/bindings/play/play.go similarity index 64% rename from vendor/github.com/containers/podman/v4/pkg/bindings/play/play.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/play/play.go index 803349b88..acc39d972 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/play/play.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/play/play.go @@ -4,24 +4,24 @@ import ( "context" "io" - "github.com/containers/podman/v4/pkg/bindings/kube" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/bindings/kube" + "github.com/containers/podman/v5/pkg/domain/entities/types" ) type KubeOptions = kube.PlayOptions -func Kube(ctx context.Context, path string, options *KubeOptions) (*entities.PlayKubeReport, error) { +func Kube(ctx context.Context, path string, options *KubeOptions) (*types.PlayKubeReport, error) { return kube.Play(ctx, path, options) } -func KubeWithBody(ctx context.Context, body io.Reader, options *KubeOptions) (*entities.PlayKubeReport, error) { +func KubeWithBody(ctx context.Context, body io.Reader, options *KubeOptions) (*types.PlayKubeReport, error) { return kube.PlayWithBody(ctx, body, options) } -func Down(ctx context.Context, path string, options kube.DownOptions) (*entities.PlayKubeReport, error) { +func Down(ctx context.Context, path string, options kube.DownOptions) (*types.PlayKubeReport, error) { return kube.Down(ctx, path, options) } -func DownWithBody(ctx context.Context, body io.Reader, options kube.DownOptions) (*entities.PlayKubeReport, error) { +func DownWithBody(ctx context.Context, body io.Reader, options kube.DownOptions) (*types.PlayKubeReport, error) { return kube.DownWithBody(ctx, body, options) } diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/pods/pods.go b/vendor/github.com/containers/podman/v5/pkg/bindings/pods/pods.go similarity index 86% rename from vendor/github.com/containers/podman/v4/pkg/bindings/pods/pods.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/pods/pods.go index 47befb093..88ceeca5e 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/pods/pods.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/pods/pods.go @@ -6,19 +6,19 @@ import ( "net/url" "strings" - "github.com/containers/podman/v4/pkg/api/handlers" - "github.com/containers/podman/v4/pkg/bindings" - "github.com/containers/podman/v4/pkg/domain/entities" - "github.com/containers/podman/v4/pkg/errorhandling" + "github.com/containers/podman/v5/pkg/api/handlers" + "github.com/containers/podman/v5/pkg/bindings" + entitiesTypes "github.com/containers/podman/v5/pkg/domain/entities/types" + "github.com/containers/podman/v5/pkg/errorhandling" jsoniter "github.com/json-iterator/go" ) -func CreatePodFromSpec(ctx context.Context, spec *entities.PodSpec) (*entities.PodCreateReport, error) { +func CreatePodFromSpec(ctx context.Context, spec *entitiesTypes.PodSpec) (*entitiesTypes.PodCreateReport, error) { var ( - pcr entities.PodCreateReport + pcr entitiesTypes.PodCreateReport ) if spec == nil { - spec = new(entities.PodSpec) + spec = new(entitiesTypes.PodSpec) } conn, err := bindings.GetClient(ctx) if err != nil { @@ -54,9 +54,9 @@ func Exists(ctx context.Context, nameOrID string, options *ExistsOptions) (bool, } // Inspect returns low-level information about the given pod. -func Inspect(ctx context.Context, nameOrID string, options *InspectOptions) (*entities.PodInspectReport, error) { +func Inspect(ctx context.Context, nameOrID string, options *InspectOptions) (*entitiesTypes.PodInspectReport, error) { var ( - report entities.PodInspectReport + report entitiesTypes.PodInspectReport ) if options == nil { options = new(InspectOptions) @@ -77,9 +77,9 @@ func Inspect(ctx context.Context, nameOrID string, options *InspectOptions) (*en // Kill sends a SIGTERM to all the containers in a pod. The optional signal parameter // can be used to override SIGTERM. -func Kill(ctx context.Context, nameOrID string, options *KillOptions) (*entities.PodKillReport, error) { +func Kill(ctx context.Context, nameOrID string, options *KillOptions) (*entitiesTypes.PodKillReport, error) { var ( - report entities.PodKillReport + report entitiesTypes.PodKillReport ) if options == nil { options = new(KillOptions) @@ -102,8 +102,8 @@ func Kill(ctx context.Context, nameOrID string, options *KillOptions) (*entities } // Pause pauses all running containers in a given pod. -func Pause(ctx context.Context, nameOrID string, options *PauseOptions) (*entities.PodPauseReport, error) { - var report entities.PodPauseReport +func Pause(ctx context.Context, nameOrID string, options *PauseOptions) (*entitiesTypes.PodPauseReport, error) { + var report entitiesTypes.PodPauseReport if options == nil { options = new(PauseOptions) } @@ -123,8 +123,8 @@ func Pause(ctx context.Context, nameOrID string, options *PauseOptions) (*entiti // Prune by default removes all non-running pods in local storage. // And with force set true removes all pods. -func Prune(ctx context.Context, options *PruneOptions) ([]*entities.PodPruneReport, error) { - var reports []*entities.PodPruneReport +func Prune(ctx context.Context, options *PruneOptions) ([]*entitiesTypes.PodPruneReport, error) { + var reports []*entitiesTypes.PodPruneReport if options == nil { options = new(PruneOptions) } @@ -144,9 +144,9 @@ func Prune(ctx context.Context, options *PruneOptions) ([]*entities.PodPruneRepo // List returns all pods in local storage. The optional filters parameter can // be used to refine which pods should be listed. -func List(ctx context.Context, options *ListOptions) ([]*entities.ListPodsReport, error) { +func List(ctx context.Context, options *ListOptions) ([]*entitiesTypes.ListPodsReport, error) { var ( - podsReports []*entities.ListPodsReport + podsReports []*entitiesTypes.ListPodsReport ) if options == nil { options = new(ListOptions) @@ -169,8 +169,8 @@ func List(ctx context.Context, options *ListOptions) ([]*entities.ListPodsReport } // Restart restarts all containers in a pod. -func Restart(ctx context.Context, nameOrID string, options *RestartOptions) (*entities.PodRestartReport, error) { - var report entities.PodRestartReport +func Restart(ctx context.Context, nameOrID string, options *RestartOptions) (*entitiesTypes.PodRestartReport, error) { + var report entitiesTypes.PodRestartReport if options == nil { options = new(RestartOptions) } @@ -190,8 +190,8 @@ func Restart(ctx context.Context, nameOrID string, options *RestartOptions) (*en // Remove deletes a Pod from local storage. The optional force parameter denotes // that the Pod can be removed even if in a running state. -func Remove(ctx context.Context, nameOrID string, options *RemoveOptions) (*entities.PodRmReport, error) { - var report entities.PodRmReport +func Remove(ctx context.Context, nameOrID string, options *RemoveOptions) (*entitiesTypes.PodRmReport, error) { + var report entitiesTypes.PodRmReport if options == nil { options = new(RemoveOptions) } @@ -213,8 +213,8 @@ func Remove(ctx context.Context, nameOrID string, options *RemoveOptions) (*enti } // Start starts all containers in a pod. -func Start(ctx context.Context, nameOrID string, options *StartOptions) (*entities.PodStartReport, error) { - var report entities.PodStartReport +func Start(ctx context.Context, nameOrID string, options *StartOptions) (*entitiesTypes.PodStartReport, error) { + var report entitiesTypes.PodStartReport if options == nil { options = new(StartOptions) } @@ -239,8 +239,8 @@ func Start(ctx context.Context, nameOrID string, options *StartOptions) (*entiti // Stop stops all containers in a Pod. The optional timeout parameter can be // used to override the timeout before the container is killed. -func Stop(ctx context.Context, nameOrID string, options *StopOptions) (*entities.PodStopReport, error) { - var report entities.PodStopReport +func Stop(ctx context.Context, nameOrID string, options *StopOptions) (*entitiesTypes.PodStopReport, error) { + var report entitiesTypes.PodStopReport if options == nil { options = new(StopOptions) } @@ -302,12 +302,12 @@ func Top(ctx context.Context, nameOrID string, options *TopOptions) ([]string, e } // Unpause unpauses all paused containers in a Pod. -func Unpause(ctx context.Context, nameOrID string, options *UnpauseOptions) (*entities.PodUnpauseReport, error) { +func Unpause(ctx context.Context, nameOrID string, options *UnpauseOptions) (*entitiesTypes.PodUnpauseReport, error) { if options == nil { options = new(UnpauseOptions) } _ = options - var report entities.PodUnpauseReport + var report entitiesTypes.PodUnpauseReport conn, err := bindings.GetClient(ctx) if err != nil { return nil, err @@ -322,7 +322,7 @@ func Unpause(ctx context.Context, nameOrID string, options *UnpauseOptions) (*en } // Stats display resource-usage statistics of one or more pods. -func Stats(ctx context.Context, namesOrIDs []string, options *StatsOptions) ([]*entities.PodStatsReport, error) { +func Stats(ctx context.Context, namesOrIDs []string, options *StatsOptions) ([]*entitiesTypes.PodStatsReport, error) { if options == nil { options = new(StatsOptions) } @@ -338,7 +338,7 @@ func Stats(ctx context.Context, namesOrIDs []string, options *StatsOptions) ([]* params.Add("namesOrIDs", i) } - var reports []*entities.PodStatsReport + var reports []*entitiesTypes.PodStatsReport response, err := conn.DoRequest(ctx, nil, http.MethodGet, "/pods/stats", params, nil) if err != nil { return nil, err diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/pods/types.go b/vendor/github.com/containers/podman/v5/pkg/bindings/pods/types.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/bindings/pods/types.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/pods/types.go diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/pods/types_create_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/pods/types_create_options.go similarity index 85% rename from vendor/github.com/containers/podman/v4/pkg/bindings/pods/types_create_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/pods/types_create_options.go index c7b7114e2..b9ae5122f 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/pods/types_create_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/pods/types_create_options.go @@ -4,7 +4,7 @@ package pods import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/pods/types_exists_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/pods/types_exists_options.go similarity index 85% rename from vendor/github.com/containers/podman/v4/pkg/bindings/pods/types_exists_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/pods/types_exists_options.go index ca043986f..3dd4f0a00 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/pods/types_exists_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/pods/types_exists_options.go @@ -4,7 +4,7 @@ package pods import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/pods/types_inspect_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/pods/types_inspect_options.go similarity index 85% rename from vendor/github.com/containers/podman/v4/pkg/bindings/pods/types_inspect_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/pods/types_inspect_options.go index cf630f36b..75c501a5b 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/pods/types_inspect_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/pods/types_inspect_options.go @@ -4,7 +4,7 @@ package pods import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/pods/types_kill_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/pods/types_kill_options.go similarity index 91% rename from vendor/github.com/containers/podman/v4/pkg/bindings/pods/types_kill_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/pods/types_kill_options.go index 547afe5d3..489800c6c 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/pods/types_kill_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/pods/types_kill_options.go @@ -4,7 +4,7 @@ package pods import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/pods/types_list_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/pods/types_list_options.go similarity index 92% rename from vendor/github.com/containers/podman/v4/pkg/bindings/pods/types_list_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/pods/types_list_options.go index af6ce44f8..038b753b6 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/pods/types_list_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/pods/types_list_options.go @@ -4,7 +4,7 @@ package pods import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/pods/types_pause_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/pods/types_pause_options.go similarity index 85% rename from vendor/github.com/containers/podman/v4/pkg/bindings/pods/types_pause_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/pods/types_pause_options.go index 6f38ad9b7..0bee2b0fd 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/pods/types_pause_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/pods/types_pause_options.go @@ -4,7 +4,7 @@ package pods import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/pods/types_prune_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/pods/types_prune_options.go similarity index 85% rename from vendor/github.com/containers/podman/v4/pkg/bindings/pods/types_prune_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/pods/types_prune_options.go index 9d1f3749a..dac0417f7 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/pods/types_prune_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/pods/types_prune_options.go @@ -4,7 +4,7 @@ package pods import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/pods/types_remove_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/pods/types_remove_options.go similarity index 94% rename from vendor/github.com/containers/podman/v4/pkg/bindings/pods/types_remove_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/pods/types_remove_options.go index 53c30617f..a0bbd57da 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/pods/types_remove_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/pods/types_remove_options.go @@ -4,7 +4,7 @@ package pods import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/pods/types_restart_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/pods/types_restart_options.go similarity index 85% rename from vendor/github.com/containers/podman/v4/pkg/bindings/pods/types_restart_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/pods/types_restart_options.go index d2beb641b..957bab982 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/pods/types_restart_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/pods/types_restart_options.go @@ -4,7 +4,7 @@ package pods import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/pods/types_start_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/pods/types_start_options.go similarity index 85% rename from vendor/github.com/containers/podman/v4/pkg/bindings/pods/types_start_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/pods/types_start_options.go index 59f3000e6..ec971037b 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/pods/types_start_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/pods/types_start_options.go @@ -4,7 +4,7 @@ package pods import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/pods/types_stats_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/pods/types_stats_options.go similarity index 91% rename from vendor/github.com/containers/podman/v4/pkg/bindings/pods/types_stats_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/pods/types_stats_options.go index ad7b6ca0a..f3e662b19 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/pods/types_stats_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/pods/types_stats_options.go @@ -4,7 +4,7 @@ package pods import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/pods/types_stop_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/pods/types_stop_options.go similarity index 91% rename from vendor/github.com/containers/podman/v4/pkg/bindings/pods/types_stop_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/pods/types_stop_options.go index 198210845..e635780a2 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/pods/types_stop_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/pods/types_stop_options.go @@ -4,7 +4,7 @@ package pods import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/pods/types_top_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/pods/types_top_options.go similarity index 92% rename from vendor/github.com/containers/podman/v4/pkg/bindings/pods/types_top_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/pods/types_top_options.go index 3d165c503..c8e415ec6 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/pods/types_top_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/pods/types_top_options.go @@ -4,7 +4,7 @@ package pods import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/pods/types_unpause_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/pods/types_unpause_options.go similarity index 85% rename from vendor/github.com/containers/podman/v4/pkg/bindings/pods/types_unpause_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/pods/types_unpause_options.go index 0cd5c3447..01f316d01 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/pods/types_unpause_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/pods/types_unpause_options.go @@ -4,7 +4,7 @@ package pods import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/secrets.go b/vendor/github.com/containers/podman/v5/pkg/bindings/secrets/secrets.go similarity index 85% rename from vendor/github.com/containers/podman/v4/pkg/bindings/secrets/secrets.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/secrets/secrets.go index 2cd392a14..49313f94c 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/secrets.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/secrets/secrets.go @@ -5,14 +5,14 @@ import ( "io" "net/http" - "github.com/containers/podman/v4/pkg/bindings" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/bindings" + entitiesTypes "github.com/containers/podman/v5/pkg/domain/entities/types" ) // List returns information about existing secrets in the form of a slice. -func List(ctx context.Context, options *ListOptions) ([]*entities.SecretInfoReport, error) { +func List(ctx context.Context, options *ListOptions) ([]*entitiesTypes.SecretInfoReport, error) { var ( - secrs []*entities.SecretInfoReport + secrs []*entitiesTypes.SecretInfoReport ) conn, err := bindings.GetClient(ctx) if err != nil { @@ -32,12 +32,12 @@ func List(ctx context.Context, options *ListOptions) ([]*entities.SecretInfoRepo } // Inspect returns low-level information about a secret. -func Inspect(ctx context.Context, nameOrID string, options *InspectOptions) (*entities.SecretInfoReport, error) { +func Inspect(ctx context.Context, nameOrID string, options *InspectOptions) (*entitiesTypes.SecretInfoReport, error) { if options == nil { options = new(InspectOptions) } var ( - inspect *entities.SecretInfoReport + inspect *entitiesTypes.SecretInfoReport ) conn, err := bindings.GetClient(ctx) if err != nil { @@ -72,9 +72,9 @@ func Remove(ctx context.Context, nameOrID string) error { } // Create creates a secret given some data -func Create(ctx context.Context, reader io.Reader, options *CreateOptions) (*entities.SecretCreateReport, error) { +func Create(ctx context.Context, reader io.Reader, options *CreateOptions) (*entitiesTypes.SecretCreateReport, error) { var ( - create *entities.SecretCreateReport + create *entitiesTypes.SecretCreateReport ) conn, err := bindings.GetClient(ctx) if err != nil { diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types.go b/vendor/github.com/containers/podman/v5/pkg/bindings/secrets/types.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/secrets/types.go diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types_create_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/secrets/types_create_options.go similarity index 96% rename from vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types_create_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/secrets/types_create_options.go index 19ae02d72..8403b1f80 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types_create_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/secrets/types_create_options.go @@ -4,7 +4,7 @@ package secrets import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types_inspect_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/secrets/types_inspect_options.go similarity index 92% rename from vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types_inspect_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/secrets/types_inspect_options.go index 5ef8a1276..0456ff77a 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types_inspect_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/secrets/types_inspect_options.go @@ -4,7 +4,7 @@ package secrets import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types_list_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/secrets/types_list_options.go similarity index 92% rename from vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types_list_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/secrets/types_list_options.go index 97351e16d..1dd120fa6 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types_list_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/secrets/types_list_options.go @@ -4,7 +4,7 @@ package secrets import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types_remove_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/secrets/types_remove_options.go similarity index 86% rename from vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types_remove_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/secrets/types_remove_options.go index f8a060fdd..5dff2f91c 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types_remove_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/secrets/types_remove_options.go @@ -4,7 +4,7 @@ package secrets import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/system/info.go b/vendor/github.com/containers/podman/v5/pkg/bindings/system/info.go similarity index 83% rename from vendor/github.com/containers/podman/v4/pkg/bindings/system/info.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/system/info.go index a75ffc982..bf7048136 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/system/info.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/system/info.go @@ -4,8 +4,8 @@ import ( "context" "net/http" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/bindings" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/bindings" ) // Info returns information about the libpod environment and its stores diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/system/system.go b/vendor/github.com/containers/podman/v5/pkg/bindings/system/system.go similarity index 80% rename from vendor/github.com/containers/podman/v4/pkg/bindings/system/system.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/system/system.go index 733b2cb5c..e97ebc7b6 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/system/system.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/system/system.go @@ -9,16 +9,16 @@ import ( "net/http" "time" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/bindings" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/bindings" + "github.com/containers/podman/v5/pkg/domain/entities/types" "github.com/sirupsen/logrus" ) // Events allows you to monitor libdpod related events like container creation and // removal. The events are then passed to the eventChan provided. The optional cancelChan // can be used to cancel the read of events and close down the HTTP connection. -func Events(ctx context.Context, eventChan chan entities.Event, cancelChan chan bool, options *EventsOptions) error { +func Events(ctx context.Context, eventChan chan types.Event, cancelChan chan bool, options *EventsOptions) error { conn, err := bindings.GetClient(ctx) if err != nil { return err @@ -44,7 +44,7 @@ func Events(ctx context.Context, eventChan chan entities.Event, cancelChan chan dec := json.NewDecoder(response.Body) for err = (error)(nil); err == nil; { - var e = entities.Event{} + var e = types.Event{} err = dec.Decode(&e) if err == nil { eventChan <- e @@ -62,9 +62,9 @@ func Events(ctx context.Context, eventChan chan entities.Event, cancelChan chan } // Prune removes all unused system data. -func Prune(ctx context.Context, options *PruneOptions) (*entities.SystemPruneReport, error) { +func Prune(ctx context.Context, options *PruneOptions) (*types.SystemPruneReport, error) { var ( - report entities.SystemPruneReport + report types.SystemPruneReport ) conn, err := bindings.GetClient(ctx) if err != nil { @@ -83,10 +83,10 @@ func Prune(ctx context.Context, options *PruneOptions) (*entities.SystemPruneRep return &report, response.Process(&report) } -func Version(ctx context.Context, options *VersionOptions) (*entities.SystemVersionReport, error) { +func Version(ctx context.Context, options *VersionOptions) (*types.SystemVersionReport, error) { var ( - component entities.ComponentVersion - report entities.SystemVersionReport + component types.SystemComponentVersion + report types.SystemVersionReport ) if options == nil { options = new(VersionOptions) @@ -134,8 +134,8 @@ func Version(ctx context.Context, options *VersionOptions) (*entities.SystemVers // DiskUsage returns information about image, container, and volume disk // consumption -func DiskUsage(ctx context.Context, options *DiskOptions) (*entities.SystemDfReport, error) { - var report entities.SystemDfReport +func DiskUsage(ctx context.Context, options *DiskOptions) (*types.SystemDfReport, error) { + var report types.SystemDfReport if options == nil { options = new(DiskOptions) } diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/system/types.go b/vendor/github.com/containers/podman/v5/pkg/bindings/system/types.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/bindings/system/types.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/system/types.go diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/system/types_disk_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/system/types_disk_options.go similarity index 85% rename from vendor/github.com/containers/podman/v4/pkg/bindings/system/types_disk_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/system/types_disk_options.go index 7f9cd000e..0acb3c55b 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/system/types_disk_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/system/types_disk_options.go @@ -4,7 +4,7 @@ package system import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/system/types_events_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/system/types_events_options.go similarity index 96% rename from vendor/github.com/containers/podman/v4/pkg/bindings/system/types_events_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/system/types_events_options.go index d7f057267..3a8920cf5 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/system/types_events_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/system/types_events_options.go @@ -4,7 +4,7 @@ package system import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/system/types_info_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/system/types_info_options.go similarity index 85% rename from vendor/github.com/containers/podman/v4/pkg/bindings/system/types_info_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/system/types_info_options.go index eae6a74bf..dce750c4c 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/system/types_info_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/system/types_info_options.go @@ -4,7 +4,7 @@ package system import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/system/types_prune_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/system/types_prune_options.go similarity index 96% rename from vendor/github.com/containers/podman/v4/pkg/bindings/system/types_prune_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/system/types_prune_options.go index d00498520..b9758eafb 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/system/types_prune_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/system/types_prune_options.go @@ -4,7 +4,7 @@ package system import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/system/types_version_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/system/types_version_options.go similarity index 86% rename from vendor/github.com/containers/podman/v4/pkg/bindings/system/types_version_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/system/types_version_options.go index 30e02dd95..d13f24392 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/system/types_version_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/system/types_version_options.go @@ -4,7 +4,7 @@ package system import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/volumes/types.go b/vendor/github.com/containers/podman/v5/pkg/bindings/volumes/types.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/bindings/volumes/types.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/volumes/types.go diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/volumes/types_create_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/volumes/types_create_options.go similarity index 86% rename from vendor/github.com/containers/podman/v4/pkg/bindings/volumes/types_create_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/volumes/types_create_options.go index 49c59f48a..98ac9d746 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/volumes/types_create_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/volumes/types_create_options.go @@ -4,7 +4,7 @@ package volumes import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/volumes/types_exists_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/volumes/types_exists_options.go similarity index 86% rename from vendor/github.com/containers/podman/v4/pkg/bindings/volumes/types_exists_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/volumes/types_exists_options.go index 3493c23ea..70f5f5cb0 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/volumes/types_exists_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/volumes/types_exists_options.go @@ -4,7 +4,7 @@ package volumes import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/volumes/types_inspect_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/volumes/types_inspect_options.go similarity index 86% rename from vendor/github.com/containers/podman/v4/pkg/bindings/volumes/types_inspect_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/volumes/types_inspect_options.go index a4aefd994..913faa5e9 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/volumes/types_inspect_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/volumes/types_inspect_options.go @@ -4,7 +4,7 @@ package volumes import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/volumes/types_list_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/volumes/types_list_options.go similarity index 92% rename from vendor/github.com/containers/podman/v4/pkg/bindings/volumes/types_list_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/volumes/types_list_options.go index 3d1930bf4..2db1d37ae 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/volumes/types_list_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/volumes/types_list_options.go @@ -4,7 +4,7 @@ package volumes import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/volumes/types_prune_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/volumes/types_prune_options.go similarity index 92% rename from vendor/github.com/containers/podman/v4/pkg/bindings/volumes/types_prune_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/volumes/types_prune_options.go index 3b4671c47..ad4346a89 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/volumes/types_prune_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/volumes/types_prune_options.go @@ -4,7 +4,7 @@ package volumes import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/volumes/types_remove_options.go b/vendor/github.com/containers/podman/v5/pkg/bindings/volumes/types_remove_options.go similarity index 94% rename from vendor/github.com/containers/podman/v4/pkg/bindings/volumes/types_remove_options.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/volumes/types_remove_options.go index 356527705..577633e2b 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/volumes/types_remove_options.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/volumes/types_remove_options.go @@ -4,7 +4,7 @@ package volumes import ( "net/url" - "github.com/containers/podman/v4/pkg/bindings/internal/util" + "github.com/containers/podman/v5/pkg/bindings/internal/util" ) // Changed returns true if named field has been set diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/volumes/volumes.go b/vendor/github.com/containers/podman/v5/pkg/bindings/volumes/volumes.go similarity index 85% rename from vendor/github.com/containers/podman/v4/pkg/bindings/volumes/volumes.go rename to vendor/github.com/containers/podman/v5/pkg/bindings/volumes/volumes.go index 290eab219..6ad1b9fb8 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/volumes/volumes.go +++ b/vendor/github.com/containers/podman/v5/pkg/bindings/volumes/volumes.go @@ -5,16 +5,16 @@ import ( "net/http" "strings" - "github.com/containers/podman/v4/pkg/bindings" - "github.com/containers/podman/v4/pkg/domain/entities" - "github.com/containers/podman/v4/pkg/domain/entities/reports" + "github.com/containers/podman/v5/pkg/bindings" + "github.com/containers/podman/v5/pkg/domain/entities/reports" + entitiesTypes "github.com/containers/podman/v5/pkg/domain/entities/types" jsoniter "github.com/json-iterator/go" ) // Create creates a volume given its configuration. -func Create(ctx context.Context, config entities.VolumeCreateOptions, options *CreateOptions) (*entities.VolumeConfigResponse, error) { +func Create(ctx context.Context, config entitiesTypes.VolumeCreateOptions, options *CreateOptions) (*entitiesTypes.VolumeConfigResponse, error) { var ( - v entities.VolumeConfigResponse + v entitiesTypes.VolumeConfigResponse ) if options == nil { options = new(CreateOptions) @@ -39,9 +39,9 @@ func Create(ctx context.Context, config entities.VolumeCreateOptions, options *C } // Inspect returns low-level information about a volume. -func Inspect(ctx context.Context, nameOrID string, options *InspectOptions) (*entities.VolumeConfigResponse, error) { +func Inspect(ctx context.Context, nameOrID string, options *InspectOptions) (*entitiesTypes.VolumeConfigResponse, error) { var ( - inspect entities.VolumeConfigResponse + inspect entitiesTypes.VolumeConfigResponse ) if options == nil { options = new(InspectOptions) @@ -62,9 +62,9 @@ func Inspect(ctx context.Context, nameOrID string, options *InspectOptions) (*en // List returns the configurations for existing volumes in the form of a slice. Optionally, filters // can be used to refine the list of volumes. -func List(ctx context.Context, options *ListOptions) ([]*entities.VolumeListReport, error) { +func List(ctx context.Context, options *ListOptions) ([]*entitiesTypes.VolumeListReport, error) { var ( - vols []*entities.VolumeListReport + vols []*entitiesTypes.VolumeListReport ) conn, err := bindings.GetClient(ctx) if err != nil { diff --git a/vendor/github.com/containers/podman/v4/pkg/checkpoint/checkpoint_restore.go b/vendor/github.com/containers/podman/v5/pkg/checkpoint/checkpoint_restore.go similarity index 95% rename from vendor/github.com/containers/podman/v4/pkg/checkpoint/checkpoint_restore.go rename to vendor/github.com/containers/podman/v5/pkg/checkpoint/checkpoint_restore.go index b44cbbbbc..e80e1cf1f 100644 --- a/vendor/github.com/containers/podman/v4/pkg/checkpoint/checkpoint_restore.go +++ b/vendor/github.com/containers/podman/v5/pkg/checkpoint/checkpoint_restore.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package checkpoint @@ -12,13 +11,13 @@ import ( metadata "github.com/checkpoint-restore/checkpointctl/lib" "github.com/containers/common/libimage" "github.com/containers/common/pkg/config" - "github.com/containers/podman/v4/libpod" - ann "github.com/containers/podman/v4/pkg/annotations" - "github.com/containers/podman/v4/pkg/checkpoint/crutils" - "github.com/containers/podman/v4/pkg/criu" - "github.com/containers/podman/v4/pkg/domain/entities" - "github.com/containers/podman/v4/pkg/specgen/generate" - "github.com/containers/podman/v4/pkg/specgenutil" + "github.com/containers/podman/v5/libpod" + ann "github.com/containers/podman/v5/pkg/annotations" + "github.com/containers/podman/v5/pkg/checkpoint/crutils" + "github.com/containers/podman/v5/pkg/criu" + "github.com/containers/podman/v5/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/specgen/generate" + "github.com/containers/podman/v5/pkg/specgenutil" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" ) diff --git a/vendor/github.com/containers/podman/v4/pkg/checkpoint/crutils/checkpoint_restore_utils.go b/vendor/github.com/containers/podman/v5/pkg/checkpoint/crutils/checkpoint_restore_utils.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/checkpoint/crutils/checkpoint_restore_utils.go rename to vendor/github.com/containers/podman/v5/pkg/checkpoint/crutils/checkpoint_restore_utils.go diff --git a/vendor/github.com/containers/podman/v4/pkg/copy/fileinfo.go b/vendor/github.com/containers/podman/v5/pkg/copy/fileinfo.go similarity index 98% rename from vendor/github.com/containers/podman/v4/pkg/copy/fileinfo.go rename to vendor/github.com/containers/podman/v5/pkg/copy/fileinfo.go index 7d4e67896..b0d31e5e0 100644 --- a/vendor/github.com/containers/podman/v4/pkg/copy/fileinfo.go +++ b/vendor/github.com/containers/podman/v5/pkg/copy/fileinfo.go @@ -10,7 +10,7 @@ import ( "path/filepath" "strings" - "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v5/libpod/define" ) // XDockerContainerPathStatHeader is the *key* in http headers pointing to the diff --git a/vendor/github.com/containers/podman/v4/pkg/copy/parse.go b/vendor/github.com/containers/podman/v5/pkg/copy/parse.go similarity index 92% rename from vendor/github.com/containers/podman/v4/pkg/copy/parse.go rename to vendor/github.com/containers/podman/v5/pkg/copy/parse.go index 50f1d211d..d58942f57 100644 --- a/vendor/github.com/containers/podman/v4/pkg/copy/parse.go +++ b/vendor/github.com/containers/podman/v5/pkg/copy/parse.go @@ -40,9 +40,9 @@ func parseUserInput(input string) (container string, path string) { return } - if spl := strings.SplitN(path, ":", 2); len(spl) == 2 { - container = spl[0] - path = spl[1] + if parsedContainer, parsedPath, ok := strings.Cut(path, ":"); ok { + container = parsedContainer + path = parsedPath } return } diff --git a/vendor/github.com/containers/podman/v4/pkg/criu/criu.go b/vendor/github.com/containers/podman/v5/pkg/criu/criu.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/criu/criu.go rename to vendor/github.com/containers/podman/v5/pkg/criu/criu.go diff --git a/vendor/github.com/containers/podman/v4/pkg/criu/criu_linux.go b/vendor/github.com/containers/podman/v5/pkg/criu/criu_linux.go similarity index 98% rename from vendor/github.com/containers/podman/v4/pkg/criu/criu_linux.go rename to vendor/github.com/containers/podman/v5/pkg/criu/criu_linux.go index f2b877aa9..75fd96265 100644 --- a/vendor/github.com/containers/podman/v4/pkg/criu/criu_linux.go +++ b/vendor/github.com/containers/podman/v5/pkg/criu/criu_linux.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package criu diff --git a/vendor/github.com/containers/podman/v4/pkg/criu/criu_unsupported.go b/vendor/github.com/containers/podman/v5/pkg/criu/criu_unsupported.go similarity index 93% rename from vendor/github.com/containers/podman/v4/pkg/criu/criu_unsupported.go rename to vendor/github.com/containers/podman/v5/pkg/criu/criu_unsupported.go index 8def2b89c..a80a82b90 100644 --- a/vendor/github.com/containers/podman/v4/pkg/criu/criu_unsupported.go +++ b/vendor/github.com/containers/podman/v5/pkg/criu/criu_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package criu diff --git a/vendor/github.com/containers/podman/v4/pkg/ctime/ctime.go b/vendor/github.com/containers/podman/v5/pkg/ctime/ctime.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/ctime/ctime.go rename to vendor/github.com/containers/podman/v5/pkg/ctime/ctime.go diff --git a/vendor/github.com/containers/podman/v4/pkg/ctime/ctime_linux.go b/vendor/github.com/containers/podman/v5/pkg/ctime/ctime_linux.go similarity index 94% rename from vendor/github.com/containers/podman/v4/pkg/ctime/ctime_linux.go rename to vendor/github.com/containers/podman/v5/pkg/ctime/ctime_linux.go index bf3cd5752..226aeba77 100644 --- a/vendor/github.com/containers/podman/v4/pkg/ctime/ctime_linux.go +++ b/vendor/github.com/containers/podman/v5/pkg/ctime/ctime_linux.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package ctime diff --git a/vendor/github.com/containers/podman/v4/pkg/ctime/ctime_unsupported.go b/vendor/github.com/containers/podman/v5/pkg/ctime/ctime_unsupported.go similarity index 87% rename from vendor/github.com/containers/podman/v4/pkg/ctime/ctime_unsupported.go rename to vendor/github.com/containers/podman/v5/pkg/ctime/ctime_unsupported.go index afee56027..2b93c5eb1 100644 --- a/vendor/github.com/containers/podman/v4/pkg/ctime/ctime_unsupported.go +++ b/vendor/github.com/containers/podman/v5/pkg/ctime/ctime_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package ctime diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/apply.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/apply.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/domain/entities/apply.go rename to vendor/github.com/containers/podman/v5/pkg/domain/entities/apply.go diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/auto-update.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/auto-update.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/domain/entities/auto-update.go rename to vendor/github.com/containers/podman/v5/pkg/domain/entities/auto-update.go diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/container_ps.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/container_ps.go similarity index 50% rename from vendor/github.com/containers/podman/v4/pkg/domain/entities/container_ps.go rename to vendor/github.com/containers/podman/v5/pkg/domain/entities/container_ps.go index cddea3987..0cd8e740f 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/container_ps.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/entities/container_ps.go @@ -4,90 +4,15 @@ import ( "errors" "sort" "strings" - "time" - "github.com/containers/common/libnetwork/types" - "github.com/containers/podman/v4/pkg/ps/define" + "github.com/containers/podman/v5/pkg/domain/entities/types" ) // ListContainer describes a container suitable for listing -type ListContainer struct { - // AutoRemove - AutoRemove bool - // Container command - Command []string - // Container creation time - Created time.Time - // Human-readable container creation time. - CreatedAt string - // CIDFile specified at creation time. - CIDFile string - // If container has exited/stopped - Exited bool - // Time container exited - ExitedAt int64 - // If container has exited, the return code from the command - ExitCode int32 - // The unique identifier for the container - ID string `json:"Id"` - // Container image - Image string - // Container image ID - ImageID string - // If this container is a Pod infra container - IsInfra bool - // Labels for container - Labels map[string]string - // User volume mounts - Mounts []string - // The names assigned to the container - Names []string - // Namespaces the container belongs to. Requires the - // namespace boolean to be true - Namespaces ListContainerNamespaces - // The network names assigned to the container - Networks []string - // The process id of the container - Pid int - // If the container is part of Pod, the Pod ID. Requires the pod - // boolean to be set - Pod string - // If the container is part of Pod, the Pod name. Requires the pod - // boolean to be set - PodName string - // Port mappings - Ports []types.PortMapping - // Restarts is how many times the container was restarted by its - // restart policy. This is NOT incremented by normal container restarts - // (only by restart policy). - Restarts uint - // Size of the container rootfs. Requires the size boolean to be true - Size *define.ContainerSize - // Time when container started - StartedAt int64 - // State of container - State string - // Status is a human-readable approximation of a duration for json output - Status string -} +type ListContainer = types.ListContainer // ListContainerNamespaces contains the identifiers of the container's Linux namespaces -type ListContainerNamespaces struct { - // Mount namespace - MNT string `json:"Mnt,omitempty"` - // Cgroup namespace - Cgroup string `json:"Cgroup,omitempty"` - // IPC namespace - IPC string `json:"Ipc,omitempty"` - // Network namespace - NET string `json:"Net,omitempty"` - // PID namespace - PIDNS string `json:"Pidns,omitempty"` - // UTS namespace - UTS string `json:"Uts,omitempty"` - // User namespace - User string `json:"User,omitempty"` -} +type ListContainerNamespaces = types.ListContainerNamespaces type SortListContainers []ListContainer @@ -176,31 +101,3 @@ func SortPsOutput(sortBy string, psOutput SortListContainers) (SortListContainer } return psOutput, nil } - -func (l ListContainer) CGROUPNS() string { - return l.Namespaces.Cgroup -} - -func (l ListContainer) IPC() string { - return l.Namespaces.IPC -} - -func (l ListContainer) MNT() string { - return l.Namespaces.MNT -} - -func (l ListContainer) NET() string { - return l.Namespaces.NET -} - -func (l ListContainer) PIDNS() string { - return l.Namespaces.PIDNS -} - -func (l ListContainer) USERNS() string { - return l.Namespaces.User -} - -func (l ListContainer) UTS() string { - return l.Namespaces.UTS -} diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/containers.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/containers.go similarity index 88% rename from vendor/github.com/containers/podman/v4/pkg/domain/entities/containers.go rename to vendor/github.com/containers/podman/v5/pkg/domain/entities/containers.go index 44cf3fc51..24c1d78f8 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/containers.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/entities/containers.go @@ -7,9 +7,10 @@ import ( "time" nettypes "github.com/containers/common/libnetwork/types" - "github.com/containers/image/v5/types" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/specgen" + imageTypes "github.com/containers/image/v5/types" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/domain/entities/types" + "github.com/containers/podman/v5/pkg/specgen" "github.com/containers/storage/pkg/archive" ) @@ -43,7 +44,7 @@ type ContainerRunlabelOptions struct { SignaturePolicy string // SkipTLSVerify - skip HTTPS and certificate verifications when // contacting registries. - SkipTLSVerify types.OptionalBool + SkipTLSVerify imageTypes.OptionalBool } // ContainerRunlabelReport contains the results from executing container-runlabel. @@ -157,9 +158,7 @@ type ContainerInspectReport struct { *define.InspectContainerData } -type ContainerStatReport struct { - define.FileInfo -} +type ContainerStatReport = types.ContainerStatReport type CommitOptions struct { Author string @@ -212,13 +211,7 @@ type CheckpointOptions struct { FileLocks bool } -type CheckpointReport struct { - Err error `json:"-"` - Id string `json:"Id"` //nolint:revive,stylecheck - RawInput string `json:"-"` - RuntimeDuration int64 `json:"runtime_checkpoint_duration"` - CRIUStatistics *define.CRIUCheckpointRestoreStatistics `json:"criu_statistics"` -} +type CheckpointReport = types.CheckpointReport type RestoreOptions struct { All bool @@ -239,13 +232,7 @@ type RestoreOptions struct { FileLocks bool } -type RestoreReport struct { - Err error `json:"-"` - Id string `json:"Id"` //nolint:revive,stylecheck - RawInput string `json:"-"` - RuntimeDuration int64 `json:"runtime_restore_duration"` - CRIUStatistics *define.CRIUCheckpointRestoreStatistics `json:"criu_statistics"` -} +type RestoreReport = types.RestoreReport type ContainerCreateReport struct { Id string //nolint:revive,stylecheck @@ -298,6 +285,7 @@ type ExecOptions struct { Interactive bool Latest bool PreserveFDs uint + PreserveFD []uint Privileged bool Tty bool User string @@ -361,6 +349,7 @@ type ContainerRunOptions struct { InputStream *os.File OutputStream *os.File PreserveFDs uint + PreserveFD []uint Rm bool SigProxy bool Spec *specgen.SpecGenerator @@ -484,13 +473,7 @@ type ContainerStatsOptions struct { Interval int } -// ContainerStatsReport is used for streaming container stats. -type ContainerStatsReport struct { - // Error from reading stats. - Error error - // Results, set when there is no error. - Stats []define.ContainerStats -} +type ContainerStatsReport = types.ContainerStatsReport // ContainerRenameOptions describes input options for renaming a container. type ContainerRenameOptions struct { @@ -498,7 +481,7 @@ type ContainerRenameOptions struct { NewName string } -// ContainerCloneOptions contains options for cloning an existing continer +// ContainerCloneOptions contains options for cloning an existing container type ContainerCloneOptions struct { ID string Destroy bool @@ -510,7 +493,4 @@ type ContainerCloneOptions struct { } // ContainerUpdateOptions containers options for updating an existing containers cgroup configuration -type ContainerUpdateOptions struct { - NameOrID string - Specgen *specgen.SpecGenerator -} +type ContainerUpdateOptions = types.ContainerUpdateOptions diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/engine.go similarity index 96% rename from vendor/github.com/containers/podman/v4/pkg/domain/entities/engine.go rename to vendor/github.com/containers/podman/v5/pkg/domain/entities/engine.go index 60091319b..1a2fcefd0 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/entities/engine.go @@ -29,7 +29,7 @@ type PodmanConfig struct { ContainersConf *config.Config ContainersConfDefaultsRO *config.Config // The read-only! defaults from containers.conf. DBBackend string // Hidden: change the database backend - DockerConfig string // Used for Docker compatibility + DockerConfig string // Location of authentication config file CgroupUsage string // rootless code determines Usage message ConmonPath string // --conmon flag will set Engine.ConmonPath CPUProfile string // Hidden: Should CPU profile be taken diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine_container.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/engine_container.go similarity index 94% rename from vendor/github.com/containers/podman/v4/pkg/domain/entities/engine_container.go rename to vendor/github.com/containers/podman/v5/pkg/domain/entities/engine_container.go index e697b6011..15cf309bf 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine_container.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/entities/engine_container.go @@ -4,14 +4,15 @@ import ( "context" "io" - "github.com/containers/common/libnetwork/types" + netTypes "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/config" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/domain/entities/reports" - "github.com/containers/podman/v4/pkg/specgen" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/domain/entities/reports" + "github.com/containers/podman/v5/pkg/domain/entities/types" + "github.com/containers/podman/v5/pkg/specgen" ) -type ContainerCopyFunc func() error +type ContainerCopyFunc = types.ContainerCopyFunc type ContainerEngine interface { //nolint:interfacebloat AutoUpdate(ctx context.Context, options AutoUpdateOptions) ([]*AutoUpdateReport, []error) @@ -65,12 +66,12 @@ type ContainerEngine interface { //nolint:interfacebloat Locks(ctx context.Context) (*LocksReport, error) Migrate(ctx context.Context, options SystemMigrateOptions) error NetworkConnect(ctx context.Context, networkname string, options NetworkConnectOptions) error - NetworkCreate(ctx context.Context, network types.Network, createOptions *types.NetworkCreateOptions) (*types.Network, error) + NetworkCreate(ctx context.Context, network netTypes.Network, createOptions *netTypes.NetworkCreateOptions) (*netTypes.Network, error) NetworkUpdate(ctx context.Context, networkname string, options NetworkUpdateOptions) error NetworkDisconnect(ctx context.Context, networkname string, options NetworkDisconnectOptions) error NetworkExists(ctx context.Context, networkname string) (*BoolReport, error) - NetworkInspect(ctx context.Context, namesOrIds []string, options InspectOptions) ([]types.Network, []error, error) - NetworkList(ctx context.Context, options NetworkListOptions) ([]types.Network, error) + NetworkInspect(ctx context.Context, namesOrIds []string, options InspectOptions) ([]NetworkInspectReport, []error, error) + NetworkList(ctx context.Context, options NetworkListOptions) ([]netTypes.Network, error) NetworkPrune(ctx context.Context, options NetworkPruneOptions) ([]*NetworkPruneReport, error) NetworkReload(ctx context.Context, names []string, options NetworkReloadOptions) ([]*NetworkReloadReport, error) NetworkRm(ctx context.Context, namesOrIds []string, options NetworkRmOptions) ([]*NetworkRmReport, error) diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine_image.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/engine_image.go similarity index 94% rename from vendor/github.com/containers/podman/v4/pkg/domain/entities/engine_image.go rename to vendor/github.com/containers/podman/v5/pkg/domain/entities/engine_image.go index fee4656fb..8179b9a67 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine_image.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/entities/engine_image.go @@ -5,7 +5,7 @@ import ( "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/ssh" - "github.com/containers/podman/v4/pkg/domain/entities/reports" + "github.com/containers/podman/v5/pkg/domain/entities/reports" ) type ImageEngine interface { //nolint:interfacebloat @@ -36,6 +36,7 @@ type ImageEngine interface { //nolint:interfacebloat ManifestExists(ctx context.Context, name string) (*BoolReport, error) ManifestInspect(ctx context.Context, name string, opts ManifestInspectOptions) ([]byte, error) ManifestAdd(ctx context.Context, listName string, imageNames []string, opts ManifestAddOptions) (string, error) + ManifestAddArtifact(ctx context.Context, name string, files []string, opts ManifestAddArtifactOptions) (string, error) ManifestAnnotate(ctx context.Context, names, image string, opts ManifestAnnotateOptions) (string, error) ManifestRemoveDigest(ctx context.Context, names, image string) (string, error) ManifestRm(ctx context.Context, names []string) (*ImageRemoveReport, []error) diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/events.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/events.go similarity index 56% rename from vendor/github.com/containers/podman/v4/pkg/domain/entities/events.go rename to vendor/github.com/containers/podman/v5/pkg/domain/entities/events.go index 34a6fe048..d9c762553 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/events.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/entities/events.go @@ -4,49 +4,54 @@ import ( "strconv" "time" - libpodEvents "github.com/containers/podman/v4/libpod/events" + libpodEvents "github.com/containers/podman/v5/libpod/events" + types "github.com/containers/podman/v5/pkg/domain/entities/types" dockerEvents "github.com/docker/docker/api/types/events" ) -// Event combines various event-related data such as time, event type, status -// and more. -type Event struct { - // TODO: it would be nice to have full control over the types at some - // point and fork such Docker types. - dockerEvents.Message - HealthStatus string `json:",omitempty"` -} +type Event = types.Event // ConvertToLibpodEvent converts an entities event to a libpod one. func ConvertToLibpodEvent(e Event) *libpodEvents.Event { - exitCode, err := strconv.Atoi(e.Actor.Attributes["containerExitCode"]) - if err != nil { - return nil + var exitCode int + if ec, ok := e.Actor.Attributes["containerExitCode"]; ok { + var err error + exitCode, err = strconv.Atoi(ec) + if err != nil { + return nil + } } - status, err := libpodEvents.StringToStatus(e.Action) + status, err := libpodEvents.StringToStatus(string(e.Action)) if err != nil { return nil } - t, err := libpodEvents.StringToType(e.Type) + t, err := libpodEvents.StringToType(string(e.Type)) if err != nil { return nil } image := e.Actor.Attributes["image"] name := e.Actor.Attributes["name"] - details := e.Actor.Attributes + network := e.Actor.Attributes["network"] podID := e.Actor.Attributes["podId"] + errorString := e.Actor.Attributes["error"] + details := e.Actor.Attributes delete(details, "image") delete(details, "name") + delete(details, "network") + delete(details, "podId") + delete(details, "error") delete(details, "containerExitCode") return &libpodEvents.Event{ - ContainerExitCode: exitCode, + ContainerExitCode: &exitCode, ID: e.Actor.ID, Image: image, Name: name, + Network: network, Status: status, Time: time.Unix(0, e.TimeNano), Type: t, HealthStatus: e.HealthStatus, + Error: errorString, Details: libpodEvents.Details{ PodID: podID, Attributes: details, @@ -55,22 +60,30 @@ func ConvertToLibpodEvent(e Event) *libpodEvents.Event { } // ConvertToEntitiesEvent converts a libpod event to an entities one. -func ConvertToEntitiesEvent(e libpodEvents.Event) *Event { +func ConvertToEntitiesEvent(e libpodEvents.Event) *types.Event { attributes := e.Details.Attributes if attributes == nil { attributes = make(map[string]string) } attributes["image"] = e.Image attributes["name"] = e.Name - attributes["containerExitCode"] = strconv.Itoa(e.ContainerExitCode) + if e.ContainerExitCode != nil { + attributes["containerExitCode"] = strconv.Itoa(*e.ContainerExitCode) + } attributes["podId"] = e.PodID + if e.Network != "" { + attributes["network"] = e.Network + } + if e.Error != "" { + attributes["error"] = e.Error + } message := dockerEvents.Message{ // Compatibility with clients that still look for deprecated API elements Status: e.Status.String(), ID: e.ID, From: e.Image, - Type: e.Type.String(), - Action: e.Status.String(), + Type: dockerEvents.Type(e.Type.String()), + Action: dockerEvents.Action(e.Status.String()), Actor: dockerEvents.Actor{ ID: e.ID, Attributes: attributes, @@ -79,8 +92,8 @@ func ConvertToEntitiesEvent(e libpodEvents.Event) *Event { Time: e.Time.Unix(), TimeNano: e.Time.UnixNano(), } - return &Event{ - message, - e.HealthStatus, + return &types.Event{ + Message: message, + HealthStatus: e.HealthStatus, } } diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/filters.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/filters.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/domain/entities/filters.go rename to vendor/github.com/containers/podman/v5/pkg/domain/entities/filters.go diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/generate.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/generate.go similarity index 78% rename from vendor/github.com/containers/podman/v4/pkg/domain/entities/generate.go rename to vendor/github.com/containers/podman/v5/pkg/domain/entities/generate.go index e67c5bb8d..8a54ce290 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/generate.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/entities/generate.go @@ -1,6 +1,8 @@ package entities -import "io" +import ( + "github.com/containers/podman/v5/pkg/domain/entities/types" +) // GenerateSystemdOptions control the generation of systemd unit files. type GenerateSystemdOptions struct { @@ -22,10 +24,7 @@ type GenerateSystemdOptions struct { } // GenerateSystemdReport -type GenerateSystemdReport struct { - // Units of the generate process. key = unit name -> value = unit content - Units map[string]string -} +type GenerateSystemdReport = types.GenerateSystemdReport // GenerateKubeOptions control the generation of Kubernetes YAML files. type GenerateKubeOptions struct { @@ -44,16 +43,9 @@ type GenerateKubeOptions struct { type KubeGenerateOptions = GenerateKubeOptions // GenerateKubeReport -// -// FIXME: Podman4.0 should change io.Reader to io.ReaderCloser -type GenerateKubeReport struct { - // Reader - the io.Reader to reader the generated YAML file. - Reader io.Reader -} +type GenerateKubeReport = types.GenerateKubeReport -type GenerateSpecReport struct { - Data []byte -} +type GenerateSpecReport = types.GenerateSpecReport type GenerateSpecOptions struct { ID string diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/healthcheck.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/healthcheck.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/domain/entities/healthcheck.go rename to vendor/github.com/containers/podman/v5/pkg/domain/entities/healthcheck.go diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/images.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/images.go similarity index 78% rename from vendor/github.com/containers/podman/v4/pkg/domain/entities/images.go rename to vendor/github.com/containers/podman/v5/pkg/domain/entities/images.go index 45cebb55f..f70663d9f 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/images.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/entities/images.go @@ -3,15 +3,13 @@ package entities import ( "io" "net/url" - "time" "github.com/containers/common/pkg/config" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/signature/signer" "github.com/containers/image/v5/types" encconfig "github.com/containers/ocicrypt/config" - "github.com/containers/podman/v4/pkg/inspect" - "github.com/containers/podman/v4/pkg/trust" + entitiesTypes "github.com/containers/podman/v5/pkg/domain/entities/types" "github.com/docker/docker/api/types/container" "github.com/opencontainers/go-digest" v1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -54,37 +52,7 @@ func (i *Image) Id() string { //nolint:revive,stylecheck } // swagger:model LibpodImageSummary -type ImageSummary struct { - ID string `json:"Id"` - ParentId string //nolint:revive,stylecheck - RepoTags []string - RepoDigests []string - Created int64 - Size int64 - SharedSize int - VirtualSize int64 - Labels map[string]string - Containers int - ReadOnly bool `json:",omitempty"` - Dangling bool `json:",omitempty"` - - // Podman extensions - Names []string `json:",omitempty"` - Digest string `json:",omitempty"` - History []string `json:",omitempty"` -} - -func (i *ImageSummary) Id() string { //nolint:revive,stylecheck - return i.ID -} - -func (i *ImageSummary) IsReadOnly() bool { - return i.ReadOnly -} - -func (i *ImageSummary) IsDangling() bool { - return i.Dangling -} +type ImageSummary = entitiesTypes.ImageSummary // ImageRemoveOptions can be used to alter image removal. type ImageRemoveOptions struct { @@ -102,30 +70,12 @@ type ImageRemoveOptions struct { // ImageRemoveReport is the response for removing one or more image(s) from storage // and images what was untagged vs actually removed. -type ImageRemoveReport struct { - // Deleted images. - Deleted []string `json:",omitempty"` - // Untagged images. Can be longer than Deleted. - Untagged []string `json:",omitempty"` - // ExitCode describes the exit codes as described in the `podman rmi` - // man page. - ExitCode int -} +type ImageRemoveReport = entitiesTypes.ImageRemoveReport type ImageHistoryOptions struct{} -type ImageHistoryLayer struct { - ID string `json:"id"` - Created time.Time `json:"created,omitempty"` - CreatedBy string `json:",omitempty"` - Tags []string `json:"tags,omitempty"` - Size int64 `json:"size"` - Comment string `json:"comment,omitempty"` -} - -type ImageHistoryReport struct { - Layers []ImageHistoryLayer -} +type ImageHistoryLayer = entitiesTypes.ImageHistoryLayer +type ImageHistoryReport = entitiesTypes.ImageHistoryReport // ImagePullOptions are the arguments for pulling images. type ImagePullOptions struct { @@ -152,6 +102,10 @@ type ImagePullOptions struct { // Quiet can be specified to suppress pull progress when pulling. Ignored // for remote calls. Quiet bool + // Retry number of times to retry pull in case of failure + Retry *uint + // RetryDelay between retries in case of pull failures + RetryDelay string // SignaturePolicy to use when pulling. Ignored for remote calls. SignaturePolicy string // SkipTLSVerify to skip HTTPS and certificate verification. @@ -166,16 +120,7 @@ type ImagePullOptions struct { } // ImagePullReport is the response from pulling one or more images. -type ImagePullReport struct { - // Stream used to provide output from c/image - Stream string `json:"stream,omitempty"` - // Error contains text of errors from c/image - Error string `json:"error,omitempty"` - // Images contains the ID's of the images pulled - Images []string `json:"images,omitempty"` - // ID contains image id (retained for backwards compatibility) - ID string `json:"id,omitempty"` -} +type ImagePullReport = entitiesTypes.ImagePullReport // ImagePushOptions are the arguments for pushing images. type ImagePushOptions struct { @@ -206,6 +151,10 @@ type ImagePushOptions struct { // RemoveSignatures, discard any pre-existing signatures in the image. // Ignored for remote calls. RemoveSignatures bool + // Retry number of times to retry push in case of failure + Retry *uint + // RetryDelay between retries in case of push failures + RetryDelay string // SignaturePolicy to use when pulling. Ignored for remote calls. SignaturePolicy string // Signers, if non-empty, asks for signatures to be added during the copy @@ -261,14 +210,7 @@ type ImagePushReport struct { // ImagePushStream is the response from pushing an image. Only used in the // remote API. -type ImagePushStream struct { - // ManifestDigest is the digest of the manifest of the pushed image. - ManifestDigest string `json:"manifestdigest,omitempty"` - // Stream used to provide push progress - Stream string `json:"stream,omitempty"` - // Error contains text of errors from pushing - Error string `json:"error,omitempty"` -} +type ImagePushStream = entitiesTypes.ImagePushStream // ImageSearchOptions are the arguments for searching images. type ImageSearchOptions struct { @@ -296,22 +238,7 @@ type ImageSearchOptions struct { } // ImageSearchReport is the response from searching images. -type ImageSearchReport struct { - // Index is the image index (e.g., "docker.io" or "quay.io") - Index string - // Name is the canonical name of the image (e.g., "docker.io/library/alpine"). - Name string - // Description of the image. - Description string - // Stars is the number of stars of the image. - Stars int - // Official indicates if it's an official image. - Official string - // Automated indicates if the image was created by an automated build. - Automated string - // Tag is the repository tag - Tag string -} +type ImageSearchReport = entitiesTypes.ImageSearchReport // Image List Options type ImageListOptions struct { @@ -329,9 +256,7 @@ type ImageTagOptions struct{} type ImageUntagOptions struct{} // ImageInspectReport is the data when inspecting an image. -type ImageInspectReport struct { - *inspect.ImageData -} +type ImageInspectReport = entitiesTypes.ImageInspectReport type ImageLoadOptions struct { Input string @@ -339,9 +264,7 @@ type ImageLoadOptions struct { SignaturePolicy string } -type ImageLoadReport struct { - Names []string -} +type ImageLoadReport = entitiesTypes.ImageLoadReport type ImageImportOptions struct { Architecture string @@ -356,9 +279,7 @@ type ImageImportOptions struct { SourceIsURL bool } -type ImageImportReport struct { - Id string //nolint:revive,stylecheck -} +type ImageImportReport = entitiesTypes.ImageImportReport // ImageSaveOptions provide options for saving images. type ImageSaveOptions struct { @@ -413,9 +334,7 @@ type ImageTreeOptions struct { } // ImageTreeReport provides results from ImageEngine.Tree() -type ImageTreeReport struct { - Tree string // TODO: Refactor move presentation work out of server -} +type ImageTreeReport = entitiesTypes.ImageTreeReport // ShowTrustOptions are the cli options for showing trust type ShowTrustOptions struct { @@ -426,12 +345,7 @@ type ShowTrustOptions struct { } // ShowTrustReport describes the results of show trust -type ShowTrustReport struct { - Raw []byte - SystemRegistriesDirPath string - JSONOutput []byte - Policies []*trust.Policy -} +type ShowTrustReport = entitiesTypes.ShowTrustReport // SetTrustOptions describes the CLI options for setting trust type SetTrustOptions struct { @@ -466,18 +380,10 @@ type ImageUnmountOptions struct { } // ImageMountReport describes the response from image mount -type ImageMountReport struct { - Id string //nolint:revive,stylecheck - Name string - Repositories []string - Path string -} +type ImageMountReport = entitiesTypes.ImageMountReport // ImageUnmountReport describes the response from umounting an image -type ImageUnmountReport struct { - Err error - Id string //nolint:revive,stylecheck -} +type ImageUnmountReport = entitiesTypes.ImageUnmountReport const ( LocalFarmImageBuilderName = "(local)" @@ -485,10 +391,4 @@ const ( ) // FarmInspectReport describes the response from farm inspect -type FarmInspectReport struct { - NativePlatforms []string - EmulatedPlatforms []string - OS string - Arch string - Variant string -} +type FarmInspectReport = entitiesTypes.FarmInspectReport diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/machine.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/machine.go similarity index 95% rename from vendor/github.com/containers/podman/v4/pkg/domain/entities/machine.go rename to vendor/github.com/containers/podman/v5/pkg/domain/entities/machine.go index bd8be9779..74c025db5 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/machine.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/entities/machine.go @@ -1,6 +1,6 @@ package entities -import "github.com/containers/podman/v4/libpod/define" +import "github.com/containers/podman/v5/libpod/define" type ListReporter struct { Name string diff --git a/vendor/github.com/containers/podman/v5/pkg/domain/entities/manifest.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/manifest.go new file mode 100644 index 000000000..7e2a5c46d --- /dev/null +++ b/vendor/github.com/containers/podman/v5/pkg/domain/entities/manifest.go @@ -0,0 +1,138 @@ +package entities + +import ( + "github.com/containers/image/v5/types" + entitiesTypes "github.com/containers/podman/v5/pkg/domain/entities/types" +) + +// ManifestCreateOptions provides model for creating manifest list or image index +type ManifestCreateOptions struct { + // True when adding lists to include all images + All bool `schema:"all"` + // Amend an extant list if there's already one with the desired name + Amend bool `schema:"amend"` + // Should TLS registry certificate be verified? + SkipTLSVerify types.OptionalBool `json:"-" schema:"-"` + // Annotations to set on the list, which forces it to be OCI format + Annotations map[string]string `json:"annotations" schema:"annotations"` +} + +// ManifestInspectOptions provides model for inspecting manifest +type ManifestInspectOptions struct { + // Path to an authentication file. + Authfile string `json:"-" schema:"-"` + // Should TLS registry certificate be verified? + SkipTLSVerify types.OptionalBool `json:"-" schema:"-"` +} + +// ManifestAddOptions provides model for adding digests to manifest list +// +// swagger:model +type ManifestAddOptions struct { + ManifestAnnotateOptions + // True when operating on a list to include all images + All bool `json:"all" schema:"all"` + // authfile to use when pushing manifest list + Authfile string `json:"-" schema:"-"` + // Home directory for certificates when pushing a manifest list + CertDir string `json:"-" schema:"-"` + // Password to authenticate to registry when pushing manifest list + Password string `json:"-" schema:"-"` + // Should TLS registry certificate be verified? + SkipTLSVerify types.OptionalBool `json:"-" schema:"-"` + // Username to authenticate to registry when pushing manifest list + Username string `json:"-" schema:"-"` + // Images is an optional list of image references to add to manifest list + Images []string `json:"images" schema:"images"` +} + +// ManifestAddArtifactOptions provides the model for creating artifact manifests +// for files and adding those manifests to a manifest list +// +// swagger:model +type ManifestAddArtifactOptions struct { + ManifestAnnotateOptions + // Note to future maintainers: keep these fields synchronized with ManifestModifyOptions! + Type *string `json:"artifact_type" schema:"artifact_type"` + LayerType string `json:"artifact_layer_type" schema:"artifact_layer_type"` + ConfigType string `json:"artifact_config_type" schema:"artifact_config_type"` + Config string `json:"artifact_config" schema:"artifact_config"` + ExcludeTitles bool `json:"artifact_exclude_titles" schema:"artifact_exclude_titles"` + Annotations map[string]string `json:"artifact_annotations" schema:"artifact_annotations"` + Subject string `json:"artifact_subject" schema:"artifact_subject"` + Files []string `json:"artifact_files" schema:"-"` +} + +// ManifestAnnotateOptions provides model for annotating manifest list +type ManifestAnnotateOptions struct { + // Annotation to add to the item in the manifest list + Annotation []string `json:"annotation" schema:"annotation"` + // Annotations to add to the item in the manifest list by a map which is preferred over Annotation + Annotations map[string]string `json:"annotations" schema:"annotations"` + // Arch overrides the architecture for the item in the manifest list + Arch string `json:"arch" schema:"arch"` + // Feature list for the item in the manifest list + Features []string `json:"features" schema:"features"` + // OS overrides the operating system for the item in the manifest list + OS string `json:"os" schema:"os"` + // OS features for the item in the manifest list + OSFeatures []string `json:"os_features" schema:"os_features"` + // OSVersion overrides the operating system for the item in the manifest list + OSVersion string `json:"os_version" schema:"os_version"` + // Variant for the item in the manifest list + Variant string `json:"variant" schema:"variant"` + // IndexAnnotation is a slice of key=value annotations to add to the manifest list itself + IndexAnnotation []string `json:"index_annotation" schema:"annotation"` + // IndexAnnotations is a map of key:value annotations to add to the manifest list itself, by a map which is preferred over IndexAnnotation + IndexAnnotations map[string]string `json:"index_annotations" schema:"annotations"` + // IndexSubject is a subject value to set in the manifest list itself + IndexSubject string `json:"subject" schema:"subject"` +} + +// ManifestModifyOptions provides the model for mutating a manifest +// +// swagger 2.0 does not support oneOf for schema validation. +// +// Operation "update" uses all fields. +// Operation "remove" uses fields: Operation and Images +// Operation "annotate" uses fields: Operation and Annotations +// +// swagger:model +type ManifestModifyOptions struct { + Operation string `json:"operation" schema:"operation"` // Valid values: update, remove, annotate + ManifestAddOptions + ManifestRemoveOptions + // The following are all of the fields from ManifestAddArtifactOptions. + // We can't just embed the whole structure because it embeds a + // ManifestAnnotateOptions, which would conflict with the one that + // ManifestAddOptions embeds. + ArtifactType *string `json:"artifact_type" schema:"artifact_type"` + ArtifactLayerType string `json:"artifact_layer_type" schema:"artifact_layer_type"` + ArtifactConfigType string `json:"artifact_config_type" schema:"artifact_config_type"` + ArtifactConfig string `json:"artifact_config" schema:"artifact_config"` + ArtifactExcludeTitles bool `json:"artifact_exclude_titles" schema:"artifact_exclude_titles"` + ArtifactAnnotations map[string]string `json:"artifact_annotations" schema:"artifact_annotations"` + ArtifactSubject string `json:"artifact_subject" schema:"artifact_subject"` + ArtifactFiles []string `json:"artifact_files" schema:"-"` +} + +// ManifestPushReport provides the model for the pushed manifest +// +// swagger:model +type ManifestPushReport = entitiesTypes.ManifestPushReport + +// ManifestRemoveOptions provides the model for removing digests from a manifest +// +// swagger:model +type ManifestRemoveOptions struct { +} + +// ManifestRemoveReport provides the model for the removed manifest +// +// swagger:model +type ManifestRemoveReport = entitiesTypes.ManifestRemoveReport + +// ManifestModifyReport provides the model for removed digests and changed manifest +// +// swagger:model +type ManifestModifyReport = entitiesTypes.ManifestModifyReport diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/network.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/network.go similarity index 80% rename from vendor/github.com/containers/podman/v4/pkg/domain/entities/network.go rename to vendor/github.com/containers/podman/v5/pkg/domain/entities/network.go index 66420b25e..1edc335c3 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/network.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/entities/network.go @@ -3,7 +3,7 @@ package entities import ( "net" - "github.com/containers/common/libnetwork/types" + entitiesTypes "github.com/containers/podman/v5/pkg/domain/entities/types" ) // NetworkListOptions describes options for listing networks in cli @@ -21,11 +21,7 @@ type NetworkReloadOptions struct { } // NetworkReloadReport describes the results of reloading a container network. -type NetworkReloadReport struct { - //nolint:stylecheck,revive - Id string - Err error -} +type NetworkReloadReport = entitiesTypes.NetworkReloadReport // NetworkRmOptions describes options for removing networks type NetworkRmOptions struct { @@ -34,10 +30,7 @@ type NetworkRmOptions struct { } // NetworkRmReport describes the results of network removal -type NetworkRmReport struct { - Name string - Err error -} +type NetworkRmReport = entitiesTypes.NetworkRmReport // NetworkCreateOptions describes options to create a network type NetworkCreateOptions struct { @@ -67,9 +60,7 @@ type NetworkUpdateOptions struct { } // NetworkCreateReport describes a created network for the cli -type NetworkCreateReport struct { - Name string -} +type NetworkCreateReport = entitiesTypes.NetworkCreateReport // NetworkDisconnectOptions describes options for disconnecting // containers from networks @@ -80,20 +71,17 @@ type NetworkDisconnectOptions struct { // NetworkConnectOptions describes options for connecting // a container to a network -type NetworkConnectOptions struct { - Container string `json:"container"` - types.PerNetworkOptions -} +type NetworkConnectOptions = entitiesTypes.NetworkConnectOptions // NetworkPruneReport containers the name of network and an error // associated in its pruning (removal) // swagger:model NetworkPruneReport -type NetworkPruneReport struct { - Name string - Error error -} +type NetworkPruneReport = entitiesTypes.NetworkPruneReport // NetworkPruneOptions describes options for pruning unused networks type NetworkPruneOptions struct { Filters map[string][]string } + +type NetworkInspectReport = entitiesTypes.NetworkInspectReport +type NetworkContainerInfo = entitiesTypes.NetworkContainerInfo diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/play.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/play.go similarity index 72% rename from vendor/github.com/containers/podman/v4/pkg/domain/entities/play.go rename to vendor/github.com/containers/podman/v5/pkg/domain/entities/play.go index 579fe8b7d..eaec3690b 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/play.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/entities/play.go @@ -4,6 +4,7 @@ import ( "net" "github.com/containers/image/v5/types" + entitiesTypes "github.com/containers/podman/v5/pkg/domain/entities/types" ) // PlayKubeOptions controls playing kube YAML files. @@ -75,45 +76,19 @@ type PlayKubeOptions struct { PublishAllPorts bool // Wait - indicates whether to return after having created the pods Wait bool + // SystemContext - used when building the image + SystemContext *types.SystemContext } // PlayKubePod represents a single pod and associated containers created by play kube -type PlayKubePod struct { - // ID - ID of the pod created as a result of play kube. - ID string - // Containers - the IDs of the containers running in the created pod. - Containers []string - // InitContainers - the IDs of the init containers to be run in the created pod. - InitContainers []string - // Logs - non-fatal errors and log messages while processing. - Logs []string - // ContainerErrors - any errors that occurred while starting containers - // in the pod. - ContainerErrors []string -} +type PlayKubePod = entitiesTypes.PlayKubePod // PlayKubeVolume represents a single volume created by play kube. -type PlayKubeVolume struct { - // Name - Name of the volume created by play kube. - Name string -} +type PlayKubeVolume entitiesTypes.PlayKubeVolume // PlayKubeReport contains the results of running play kube. -type PlayKubeReport struct { - // Pods - pods created by play kube. - Pods []PlayKubePod - // Volumes - volumes created by play kube. - Volumes []PlayKubeVolume - PlayKubeTeardown - // Secrets - secrets created by play kube - Secrets []PlaySecret - // ServiceContainerID - ID of the service container if one is created - ServiceContainerID string - // If set, exit with the specified exit code. - ExitCode *int32 -} - -type KubePlayReport = PlayKubeReport +type PlayKubeReport = entitiesTypes.PlayKubeReport +type KubePlayReport = entitiesTypes.KubePlayReport // PlayKubeDownOptions are options for tearing down pods type PlayKubeDownOptions struct { @@ -122,13 +97,6 @@ type PlayKubeDownOptions struct { } // PlayKubeDownReport contains the results of tearing down play kube -type PlayKubeTeardown struct { - StopReport []*PodStopReport - RmReport []*PodRmReport - VolumeRmReport []*VolumeRmReport - SecretRmReport []*SecretRmReport -} +type PlayKubeTeardown = entitiesTypes.PlayKubeTeardown -type PlaySecret struct { - CreateReport *SecretCreateReport -} +type PlaySecret = entitiesTypes.PlaySecret diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/pods.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/pods.go similarity index 83% rename from vendor/github.com/containers/podman/v4/pkg/domain/entities/pods.go rename to vendor/github.com/containers/podman/v5/pkg/domain/entities/pods.go index 0baff93a6..63b88e335 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/pods.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/entities/pods.go @@ -3,12 +3,11 @@ package entities import ( "errors" "strings" - "time" commonFlag "github.com/containers/common/pkg/flag" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/specgen" - "github.com/containers/podman/v4/pkg/util" + "github.com/containers/podman/v5/pkg/domain/entities/types" + "github.com/containers/podman/v5/pkg/specgen" + "github.com/containers/podman/v5/pkg/util" "github.com/opencontainers/runtime-spec/specs-go" ) @@ -18,51 +17,25 @@ type PodKillOptions struct { Signal string } -type PodKillReport struct { - Errs []error - Id string //nolint:revive,stylecheck -} +type PodKillReport = types.PodKillReport -type ListPodsReport struct { - Cgroup string - Containers []*ListPodContainer - Created time.Time - Id string //nolint:revive,stylecheck - InfraId string //nolint:revive,stylecheck - Name string - Namespace string - // Network names connected to infra container - Networks []string - Status string - Labels map[string]string -} +type ListPodsReport = types.ListPodsReport -type ListPodContainer struct { - Id string //nolint:revive,stylecheck - Names string - Status string - RestartCount uint -} +type ListPodContainer = types.ListPodContainer type PodPauseOptions struct { All bool Latest bool } -type PodPauseReport struct { - Errs []error - Id string //nolint:revive,stylecheck -} +type PodPauseReport = types.PodPauseReport type PodunpauseOptions struct { All bool Latest bool } -type PodUnpauseReport struct { - Errs []error - Id string //nolint:revive,stylecheck -} +type PodUnpauseReport = types.PodUnpauseReport type PodStopOptions struct { All bool @@ -71,30 +44,20 @@ type PodStopOptions struct { Timeout int } -type PodStopReport struct { - Errs []error - Id string //nolint:revive,stylecheck -} +type PodStopReport = types.PodStopReport type PodRestartOptions struct { All bool Latest bool } -type PodRestartReport struct { - Errs []error - Id string //nolint:revive,stylecheck -} - +type PodRestartReport = types.PodRestartReport type PodStartOptions struct { All bool Latest bool } -type PodStartReport struct { - Errs []error - Id string //nolint:revive,stylecheck -} +type PodStartReport = types.PodStartReport type PodRmOptions struct { All bool @@ -104,17 +67,9 @@ type PodRmOptions struct { Timeout *uint } -type PodRmReport struct { - RemovedCtrs map[string]error - Err error - Id string //nolint:revive,stylecheck -} +type PodRmReport = types.PodRmReport -// PddSpec is an abstracted version of PodSpecGen designed to eventually accept options -// not meant to be in a specgen -type PodSpec struct { - PodSpecGen specgen.PodSpecGenerator -} +type PodSpec = types.PodSpec // PodCreateOptions provides all possible options for creating a pod and its infra container. // The JSON tags below are made to match the respective field in ContainerCreateOptions for the purpose of mapping. @@ -211,6 +166,7 @@ type ContainerCreateOptions struct { EnvFile []string Expose []string GIDMap []string + GPUs []string GroupAdd []string HealthCmd string HealthInterval string @@ -250,6 +206,7 @@ type ContainerCreateOptions struct { PodIDFile string Personality string PreserveFDs uint + PreserveFD []uint Privileged bool PublishAll bool Pull string @@ -259,6 +216,8 @@ type ContainerCreateOptions struct { Restart string Replace bool Requires []string + Retry *uint `json:"retry,omitempty"` + RetryDelay string `json:"retry_delay,omitempty"` Rm bool RootFS bool Secrets []string @@ -314,19 +273,15 @@ type ContainerCreateOptions struct { func NewInfraContainerCreateOptions() ContainerCreateOptions { options := ContainerCreateOptions{ IsInfra: true, - ImageVolume: define.TypeBind, + ImageVolume: "anonymous", MemorySwappiness: -1, } return options } -type PodCreateReport struct { - Id string //nolint:revive,stylecheck -} +type PodCreateReport = types.PodCreateReport -type PodCloneReport struct { - Id string //nolint:revive,stylecheck -} +type PodCloneReport = types.PodCloneReport func (p *PodCreateOptions) CPULimits() *specs.LinuxCPU { cpu := &specs.LinuxCPU{} @@ -445,10 +400,7 @@ type PodPruneOptions struct { Force bool `json:"force" schema:"force"` } -type PodPruneReport struct { - Err error - Id string //nolint:revive,stylecheck -} +type PodPruneReport = types.PodPruneReport type PodTopOptions struct { // CLI flags. @@ -472,9 +424,7 @@ type PodPSOptions struct { Sort string } -type PodInspectReport struct { - *define.InspectPodData -} +type PodInspectReport = types.PodInspectReport // PodStatsOptions are options for the pod stats command. type PodStatsOptions struct { @@ -485,35 +435,7 @@ type PodStatsOptions struct { } // PodStatsReport includes pod-resource statistics data. -type PodStatsReport struct { - // Percentage of CPU utilized by pod - // example: 75.5% - CPU string - // Humanized Memory usage and maximum - // example: 12mb / 24mb - MemUsage string - // Memory usage and maximum in bytes - // example: 1,000,000 / 4,000,000 - MemUsageBytes string - // Percentage of Memory utilized by pod - // example: 50.5% - Mem string - // Network usage inbound + outbound - NetIO string - // Humanized disk usage read + write - BlockIO string - // Container PID - PIDS string - // Pod ID - // example: 62310217a19e - Pod string - // Container ID - // example: e43534f89a7d - CID string - // Pod Name - // example: elastic_pascal - Name string -} +type PodStatsReport = types.PodStatsReport // ValidatePodStatsOptions validates the specified slice and options. Allows // for sharing code in the front- and the back-end. @@ -544,7 +466,7 @@ func ValidatePodStatsOptions(args []string, options *PodStatsOptions) error { // PodLogsOptionsToContainerLogsOptions converts PodLogOptions to ContainerLogOptions func PodLogsOptionsToContainerLogsOptions(options PodLogsOptions) ContainerLogsOptions { // PodLogsOptions are similar but contains few extra fields like ctrName - // So cast other values as is so we can re-use the code + // So cast other values as is so we can reuse the code containerLogsOpts := ContainerLogsOptions{ Details: options.Details, Latest: options.Latest, diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/reports/containers.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/reports/containers.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/domain/entities/reports/containers.go rename to vendor/github.com/containers/podman/v5/pkg/domain/entities/reports/containers.go diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/reports/prune.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/reports/prune.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/domain/entities/reports/prune.go rename to vendor/github.com/containers/podman/v5/pkg/domain/entities/reports/prune.go diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/reports/scp.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/reports/scp.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/domain/entities/reports/scp.go rename to vendor/github.com/containers/podman/v5/pkg/domain/entities/reports/scp.go diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/secrets.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/secrets.go similarity index 70% rename from vendor/github.com/containers/podman/v4/pkg/domain/entities/secrets.go rename to vendor/github.com/containers/podman/v5/pkg/domain/entities/secrets.go index ce3fac8d8..cd35ab4cc 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/secrets.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/entities/secrets.go @@ -1,14 +1,11 @@ package entities import ( - "time" - - "github.com/containers/podman/v4/pkg/errorhandling" + "github.com/containers/podman/v5/pkg/domain/entities/types" + "github.com/containers/podman/v5/pkg/errorhandling" ) -type SecretCreateReport struct { - ID string -} +type SecretCreateReport = types.SecretCreateReport type SecretCreateOptions struct { Driver string @@ -25,51 +22,24 @@ type SecretListRequest struct { Filters map[string][]string } -type SecretListReport struct { - ID string - Name string - Driver string - CreatedAt string - UpdatedAt string -} +type SecretListReport = types.SecretListReport type SecretRmOptions struct { All bool Ignore bool } -type SecretRmReport struct { - ID string - Err error -} +type SecretRmReport = types.SecretRmReport -type SecretInfoReport struct { - ID string - CreatedAt time.Time - UpdatedAt time.Time - Spec SecretSpec - SecretData string `json:"SecretData,omitempty"` -} +type SecretInfoReport = types.SecretInfoReport -type SecretInfoReportCompat struct { - SecretInfoReport - Version SecretVersion -} +type SecretInfoReportCompat = types.SecretInfoReportCompat -type SecretVersion struct { - Index int -} +type SecretVersion = types.SecretVersion -type SecretSpec struct { - Name string - Driver SecretDriverSpec - Labels map[string]string -} +type SecretSpec = types.SecretSpec -type SecretDriverSpec struct { - Name string - Options map[string]string -} +type SecretDriverSpec = types.SecretDriverSpec // swagger:model SecretCreate type SecretCreateRequest struct { diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/set.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/set.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/domain/entities/set.go rename to vendor/github.com/containers/podman/v5/pkg/domain/entities/set.go diff --git a/vendor/github.com/containers/podman/v5/pkg/domain/entities/system.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/system.go new file mode 100644 index 000000000..5d11f0a32 --- /dev/null +++ b/vendor/github.com/containers/podman/v5/pkg/domain/entities/system.go @@ -0,0 +1,25 @@ +package entities + +import ( + "github.com/containers/podman/v5/pkg/domain/entities/types" +) + +// ServiceOptions provides the input for starting an API and sidecar pprof services +type ServiceOptions = types.ServiceOptions +type SystemPruneOptions = types.SystemPruneOptions +type SystemPruneReport = types.SystemPruneReport +type SystemMigrateOptions = types.SystemMigrateOptions +type SystemDfOptions = types.SystemDfOptions +type SystemDfReport = types.SystemDfReport +type SystemDfImageReport = types.SystemDfImageReport +type SystemDfContainerReport = types.SystemDfContainerReport +type SystemDfVolumeReport = types.SystemDfVolumeReport +type SystemVersionReport = types.SystemVersionReport +type SystemUnshareOptions = types.SystemUnshareOptions +type ComponentVersion = types.SystemComponentVersion +type ListRegistriesReport = types.ListRegistriesReport + +// swagger:model AuthConfig +type AuthConfig = types.AuthConfig +type AuthReport = types.AuthReport +type LocksReport = types.LocksReport diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/types.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/types.go similarity index 77% rename from vendor/github.com/containers/podman/v4/pkg/domain/entities/types.go rename to vendor/github.com/containers/podman/v5/pkg/domain/entities/types.go index 6cf354a83..52901c03f 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/types.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/entities/types.go @@ -2,13 +2,12 @@ package entities import ( "net" - "os" - buildahDefine "github.com/containers/buildah/define" "github.com/containers/common/libnetwork/types" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/libpod/events" - "github.com/containers/podman/v4/pkg/specgen" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/libpod/events" + entitiesTypes "github.com/containers/podman/v5/pkg/domain/entities/types" + "github.com/containers/podman/v5/pkg/specgen" "github.com/containers/storage/pkg/archive" dockerAPI "github.com/docker/docker/api/types" ) @@ -99,43 +98,16 @@ type EventsOptions struct { } // ContainerCreateResponse is the response struct for creating a container -type ContainerCreateResponse struct { - // ID of the container created - // required: true - ID string `json:"Id"` - // Warnings during container creation - // required: true - Warnings []string `json:"Warnings"` -} +type ContainerCreateResponse = entitiesTypes.ContainerCreateResponse // BuildOptions describe the options for building container images. -type BuildOptions struct { - buildahDefine.BuildOptions - ContainerFiles []string - FarmBuildOptions - // Files that need to be closed after the build - // so need to pass this to the main build functions - LogFileToClose *os.File - TmpDirToClose string -} +type BuildOptions = entitiesTypes.BuildOptions // BuildReport is the image-build report. -type BuildReport struct { - // ID of the image. - ID string - // Format to save the image in - SaveFormat string -} +type BuildReport = entitiesTypes.BuildReport // FarmBuildOptions describes the options for building container images on farm nodes -type FarmBuildOptions struct { - // Cleanup removes built images from farm nodes on success - Cleanup bool - // Authfile is the path to the file holding registry credentials - Authfile string - // SkipTLSVerify skips tls verification when set to true - SkipTLSVerify bool -} +type FarmBuildOptions = entitiesTypes.FarmBuildOptions type IDOrNameResponse struct { // The Id or Name of an object diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/types/auth.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/types/auth.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/domain/entities/types/auth.go rename to vendor/github.com/containers/podman/v5/pkg/domain/entities/types/auth.go diff --git a/vendor/github.com/containers/podman/v5/pkg/domain/entities/types/container_ps.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/types/container_ps.go new file mode 100644 index 000000000..73f70dbe4 --- /dev/null +++ b/vendor/github.com/containers/podman/v5/pkg/domain/entities/types/container_ps.go @@ -0,0 +1,115 @@ +package types + +import ( + "time" + + netTypes "github.com/containers/common/libnetwork/types" + define "github.com/containers/podman/v5/pkg/ps/define" +) + +// ListContainer describes a container suitable for listing +type ListContainer struct { + // AutoRemove + AutoRemove bool + // Container command + Command []string + // Container creation time + Created time.Time + // Human-readable container creation time. + CreatedAt string + // CIDFile specified at creation time. + CIDFile string + // If container has exited/stopped + Exited bool + // Time container exited + ExitedAt int64 + // If container has exited, the return code from the command + ExitCode int32 + // The unique identifier for the container + ID string `json:"Id"` + // Container image + Image string + // Container image ID + ImageID string + // If this container is a Pod infra container + IsInfra bool + // Labels for container + Labels map[string]string + // User volume mounts + Mounts []string + // The names assigned to the container + Names []string + // Namespaces the container belongs to. Requires the + // namespace boolean to be true + Namespaces ListContainerNamespaces + // The network names assigned to the container + Networks []string + // The process id of the container + Pid int + // If the container is part of Pod, the Pod ID. Requires the pod + // boolean to be set + Pod string + // If the container is part of Pod, the Pod name. Requires the pod + // boolean to be set + PodName string + // Port mappings + Ports []netTypes.PortMapping + // Restarts is how many times the container was restarted by its + // restart policy. This is NOT incremented by normal container restarts + // (only by restart policy). + Restarts uint + // Size of the container rootfs. Requires the size boolean to be true + Size *define.ContainerSize + // Time when container started + StartedAt int64 + // State of container + State string + // Status is a human-readable approximation of a duration for json output + Status string +} + +// ListContainerNamespaces contains the identifiers of the container's Linux namespaces +type ListContainerNamespaces struct { + // Mount namespace + MNT string `json:"Mnt,omitempty"` + // Cgroup namespace + Cgroup string `json:"Cgroup,omitempty"` + // IPC namespace + IPC string `json:"Ipc,omitempty"` + // Network namespace + NET string `json:"Net,omitempty"` + // PID namespace + PIDNS string `json:"Pidns,omitempty"` + // UTS namespace + UTS string `json:"Uts,omitempty"` + // User namespace + User string `json:"User,omitempty"` +} + +func (l ListContainer) CGROUPNS() string { + return l.Namespaces.Cgroup +} + +func (l ListContainer) IPC() string { + return l.Namespaces.IPC +} + +func (l ListContainer) MNT() string { + return l.Namespaces.MNT +} + +func (l ListContainer) NET() string { + return l.Namespaces.NET +} + +func (l ListContainer) PIDNS() string { + return l.Namespaces.PIDNS +} + +func (l ListContainer) USERNS() string { + return l.Namespaces.User +} + +func (l ListContainer) UTS() string { + return l.Namespaces.UTS +} diff --git a/vendor/github.com/containers/podman/v5/pkg/domain/entities/types/containers.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/types/containers.go new file mode 100644 index 000000000..f9d922e22 --- /dev/null +++ b/vendor/github.com/containers/podman/v5/pkg/domain/entities/types/containers.go @@ -0,0 +1,41 @@ +package types + +import ( + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/specgen" +) + +type ContainerCopyFunc func() error + +type ContainerStatReport struct { + define.FileInfo +} + +type CheckpointReport struct { + Err error `json:"-"` + Id string `json:"Id"` //nolint:revive,stylecheck + RawInput string `json:"-"` + RuntimeDuration int64 `json:"runtime_checkpoint_duration"` + CRIUStatistics *define.CRIUCheckpointRestoreStatistics `json:"criu_statistics"` +} + +type RestoreReport struct { + Err error `json:"-"` + Id string `json:"Id"` //nolint:revive,stylecheck + RawInput string `json:"-"` + RuntimeDuration int64 `json:"runtime_restore_duration"` + CRIUStatistics *define.CRIUCheckpointRestoreStatistics `json:"criu_statistics"` +} + +// ContainerStatsReport is used for streaming container stats. +type ContainerStatsReport struct { + // Error from reading stats. + Error error + // Results, set when there is no error. + Stats []define.ContainerStats +} + +type ContainerUpdateOptions struct { + NameOrID string + Specgen *specgen.SpecGenerator +} diff --git a/vendor/github.com/containers/podman/v5/pkg/domain/entities/types/events.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/types/events.go new file mode 100644 index 000000000..a9e9dd6a1 --- /dev/null +++ b/vendor/github.com/containers/podman/v5/pkg/domain/entities/types/events.go @@ -0,0 +1,14 @@ +package types + +import ( + dockerEvents "github.com/docker/docker/api/types/events" +) + +// Event combines various event-related data such as time, event type, status +// and more. +type Event struct { + // TODO: it would be nice to have full control over the types at some + // point and fork such Docker types. + dockerEvents.Message + HealthStatus string `json:",omitempty"` +} diff --git a/vendor/github.com/containers/podman/v5/pkg/domain/entities/types/generate.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/types/generate.go new file mode 100644 index 000000000..45d4241a6 --- /dev/null +++ b/vendor/github.com/containers/podman/v5/pkg/domain/entities/types/generate.go @@ -0,0 +1,20 @@ +package types + +import ( + "io" +) + +type GenerateSystemdReport struct { + // Units of the generate process. key = unit name -> value = unit content + Units map[string]string +} + +type GenerateKubeReport struct { + // FIXME: Podman4.0 should change io.Reader to io.ReaderCloser + // Reader - the io.Reader to reader the generated YAML file. + Reader io.Reader +} + +type GenerateSpecReport struct { + Data []byte +} diff --git a/vendor/github.com/containers/podman/v5/pkg/domain/entities/types/images.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/types/images.go new file mode 100644 index 000000000..1a63d5d80 --- /dev/null +++ b/vendor/github.com/containers/podman/v5/pkg/domain/entities/types/images.go @@ -0,0 +1,151 @@ +package types + +import ( + "time" + + "github.com/containers/podman/v5/pkg/inspect" + "github.com/containers/podman/v5/pkg/trust" +) + +// swagger:model LibpodImageSummary +type ImageSummary struct { + ID string `json:"Id"` + ParentId string //nolint:revive,stylecheck + RepoTags []string + RepoDigests []string + Created int64 + Size int64 + SharedSize int + VirtualSize int64 + Labels map[string]string + Containers int + ReadOnly bool `json:",omitempty"` + Dangling bool `json:",omitempty"` + + // Podman extensions + Names []string `json:",omitempty"` + Digest string `json:",omitempty"` + History []string `json:",omitempty"` +} + +func (i *ImageSummary) Id() string { //nolint:revive,stylecheck + return i.ID +} + +func (i *ImageSummary) IsReadOnly() bool { + return i.ReadOnly +} + +func (i *ImageSummary) IsDangling() bool { + return i.Dangling +} + +type ImageInspectReport struct { + *inspect.ImageData +} + +type ImageTreeReport struct { + Tree string // TODO: Refactor move presentation work out of server +} + +type ImageLoadReport struct { + Names []string +} + +type ImageImportReport struct { + Id string //nolint:revive,stylecheck +} + +// ImageSearchReport is the response from searching images. +type ImageSearchReport struct { + // Index is the image index (e.g., "docker.io" or "quay.io") + Index string + // Name is the canonical name of the image (e.g., "docker.io/library/alpine"). + Name string + // Description of the image. + Description string + // Stars is the number of stars of the image. + Stars int + // Official indicates if it's an official image. + Official string + // Automated indicates if the image was created by an automated build. + Automated string + // Tag is the repository tag + Tag string +} + +// ShowTrustReport describes the results of show trust +type ShowTrustReport struct { + Raw []byte + SystemRegistriesDirPath string + JSONOutput []byte + Policies []*trust.Policy +} + +// ImageMountReport describes the response from image mount +type ImageMountReport struct { + Id string //nolint:revive,stylecheck + Name string + Repositories []string + Path string +} + +// ImageUnmountReport describes the response from umounting an image +type ImageUnmountReport struct { + Err error + Id string //nolint:revive,stylecheck +} + +// FarmInspectReport describes the response from farm inspect +type FarmInspectReport struct { + NativePlatforms []string + EmulatedPlatforms []string + OS string + Arch string + Variant string +} + +// ImageRemoveReport is the response for removing one or more image(s) from storage +// and images what was untagged vs actually removed. +type ImageRemoveReport struct { + // Deleted images. + Deleted []string `json:",omitempty"` + // Untagged images. Can be longer than Deleted. + Untagged []string `json:",omitempty"` + // ExitCode describes the exit codes as described in the `podman rmi` + // man page. + ExitCode int +} + +type ImageHistoryLayer struct { + ID string `json:"id"` + Created time.Time `json:"created,omitempty"` + CreatedBy string `json:",omitempty"` + Tags []string `json:"tags,omitempty"` + Size int64 `json:"size"` + Comment string `json:"comment,omitempty"` +} + +type ImageHistoryReport struct { + Layers []ImageHistoryLayer +} + +type ImagePullReport struct { + // Stream used to provide output from c/image + Stream string `json:"stream,omitempty"` + // Error contains text of errors from c/image + Error string `json:"error,omitempty"` + // Images contains the ID's of the images pulled + Images []string `json:"images,omitempty"` + // ID contains image id (retained for backwards compatibility) + ID string `json:"id,omitempty"` +} + +type ImagePushStream struct { + // ManifestDigest is the digest of the manifest of the pushed image. + ManifestDigest string `json:"manifestdigest,omitempty"` + // Stream used to provide push progress + Stream string `json:"stream,omitempty"` + // Error contains text of errors from pushing + Error string `json:"error,omitempty"` +} diff --git a/vendor/github.com/containers/podman/v5/pkg/domain/entities/types/manifest.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/types/manifest.go new file mode 100644 index 000000000..493950bc7 --- /dev/null +++ b/vendor/github.com/containers/podman/v5/pkg/domain/entities/types/manifest.go @@ -0,0 +1,36 @@ +package types + +// swagger:model +type ManifestPushReport struct { + // ID of the pushed manifest + ID string `json:"Id"` + // Stream used to provide push progress + Stream string `json:"stream,omitempty"` + // Error contains text of errors from pushing + Error string `json:"error,omitempty"` +} + +// swagger:model +type ManifestModifyReport struct { + // Manifest List ID + ID string `json:"Id"` + // Images added to or removed from manifest list, otherwise not provided. + Images []string `json:"images,omitempty" schema:"images"` + // Files added to manifest list, otherwise not provided. + Files []string `json:"files,omitempty" schema:"files"` + // Errors associated with operation + Errors []error `json:"errors,omitempty"` +} + +// swagger:model +type ManifestRemoveReport struct { + // Deleted manifest list. + Deleted []string `json:",omitempty"` + // Untagged images. Can be longer than Deleted. + Untagged []string `json:",omitempty"` + // Errors associated with operation + Errors []string `json:",omitempty"` + // ExitCode describes the exit codes as described in the `podman rmi` + // man page. + ExitCode int +} diff --git a/vendor/github.com/containers/podman/v5/pkg/domain/entities/types/network.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/types/network.go new file mode 100644 index 000000000..ae233b823 --- /dev/null +++ b/vendor/github.com/containers/podman/v5/pkg/domain/entities/types/network.go @@ -0,0 +1,51 @@ +package types + +import ( + commonTypes "github.com/containers/common/libnetwork/types" +) + +// NetworkPruneReport containers the name of network and an error +// associated in its pruning (removal) +// swagger:model NetworkPruneReport +type NetworkPruneReport struct { + Name string + Error error +} + +// NetworkReloadReport describes the results of reloading a container network. +type NetworkReloadReport struct { + //nolint:stylecheck,revive + Id string + Err error +} + +// NetworkConnectOptions describes options for connecting +// a container to a network +type NetworkConnectOptions struct { + Container string `json:"container"` + commonTypes.PerNetworkOptions +} + +// NetworkRmReport describes the results of network removal +type NetworkRmReport struct { + Name string + Err error +} + +type NetworkCreateReport struct { + Name string +} + +type NetworkInspectReport struct { + commonTypes.Network + + Containers map[string]NetworkContainerInfo `json:"containers"` +} + +type NetworkContainerInfo struct { + // Name of the container + Name string `json:"name"` + + // Interfaces configured for this container with their addresses + Interfaces map[string]commonTypes.NetInterface `json:"interfaces,omitempty"` +} diff --git a/vendor/github.com/containers/podman/v5/pkg/domain/entities/types/play.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/types/play.go new file mode 100644 index 000000000..7f744106c --- /dev/null +++ b/vendor/github.com/containers/podman/v5/pkg/domain/entities/types/play.go @@ -0,0 +1,48 @@ +package types + +type PlayKubePod struct { + // ID - ID of the pod created as a result of play kube. + ID string + // Containers - the IDs of the containers running in the created pod. + Containers []string + // InitContainers - the IDs of the init containers to be run in the created pod. + InitContainers []string + // Logs - non-fatal errors and log messages while processing. + Logs []string + // ContainerErrors - any errors that occurred while starting containers + // in the pod. + ContainerErrors []string +} + +type PlayKubeVolume struct { + // Name - Name of the volume created by play kube. + Name string +} + +type PlayKubeReport struct { + // Pods - pods created by play kube. + Pods []PlayKubePod + // Volumes - volumes created by play kube. + Volumes []PlayKubeVolume + PlayKubeTeardown + // Secrets - secrets created by play kube + Secrets []PlaySecret + // ServiceContainerID - ID of the service container if one is created + ServiceContainerID string + // If set, exit with the specified exit code. + ExitCode *int32 +} + +type KubePlayReport = PlayKubeReport + +// PlayKubeDownReport contains the results of tearing down play kube +type PlayKubeTeardown struct { + StopReport []*PodStopReport + RmReport []*PodRmReport + VolumeRmReport []*VolumeRmReport + SecretRmReport []*SecretRmReport +} + +type PlaySecret struct { + CreateReport *SecretCreateReport +} diff --git a/vendor/github.com/containers/podman/v5/pkg/domain/entities/types/pods.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/types/pods.go new file mode 100644 index 000000000..4a4255800 --- /dev/null +++ b/vendor/github.com/containers/podman/v5/pkg/domain/entities/types/pods.go @@ -0,0 +1,118 @@ +package types + +import ( + "time" + + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/specgen" +) + +type PodPruneReport struct { + Err error + Id string //nolint:revive,stylecheck +} + +type PodPauseReport struct { + Errs []error + Id string //nolint:revive,stylecheck +} +type PodUnpauseReport struct { + Errs []error + Id string //nolint:revive,stylecheck +} + +type PodStopReport struct { + Errs []error + Id string //nolint:revive,stylecheck +} + +type PodRestartReport struct { + Errs []error + Id string //nolint:revive,stylecheck +} + +type PodStartReport struct { + Errs []error + Id string //nolint:revive,stylecheck +} + +type PodRmReport struct { + RemovedCtrs map[string]error + Err error + Id string //nolint:revive,stylecheck +} + +type PodCreateReport struct { + Id string //nolint:revive,stylecheck +} + +type PodCloneReport struct { + Id string //nolint:revive,stylecheck +} + +// PodStatsReport includes pod-resource statistics data. +type PodStatsReport struct { + // Percentage of CPU utilized by pod + // example: 75.5% + CPU string + // Humanized Memory usage and maximum + // example: 12mb / 24mb + MemUsage string + // Memory usage and maximum in bytes + // example: 1,000,000 / 4,000,000 + MemUsageBytes string + // Percentage of Memory utilized by pod + // example: 50.5% + Mem string + // Network usage inbound + outbound + NetIO string + // Humanized disk usage read + write + BlockIO string + // Container PID + PIDS string + // Pod ID + // example: 62310217a19e + Pod string + // Container ID + // example: e43534f89a7d + CID string + // Pod Name + // example: elastic_pascal + Name string +} + +// PodSpec is an abstracted version of PodSpecGen designed to eventually accept options +// not meant to be in a specgen +type PodSpec struct { + PodSpecGen specgen.PodSpecGenerator +} + +type PodInspectReport struct { + *define.InspectPodData +} + +type PodKillReport struct { + Errs []error + Id string //nolint:revive,stylecheck +} + +type ListPodsReport struct { + Cgroup string + Containers []*ListPodContainer + Created time.Time + Id string //nolint:revive,stylecheck + InfraId string //nolint:revive,stylecheck + Name string + Namespace string + // Network names connected to infra container + Networks []string + Status string + Labels map[string]string +} + +type ListPodContainer struct { + Id string //nolint:revive,stylecheck + Names string + Status string + RestartCount uint +} diff --git a/vendor/github.com/containers/podman/v5/pkg/domain/entities/types/secrets.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/types/secrets.go new file mode 100644 index 000000000..0ba544db7 --- /dev/null +++ b/vendor/github.com/containers/podman/v5/pkg/domain/entities/types/secrets.go @@ -0,0 +1,50 @@ +package types + +import ( + "time" +) + +type SecretSpec struct { + Name string + Driver SecretDriverSpec + Labels map[string]string +} + +type SecretVersion struct { + Index int +} + +type SecretDriverSpec struct { + Name string + Options map[string]string +} + +type SecretCreateReport struct { + ID string +} + +type SecretListReport struct { + ID string + Name string + Driver string + CreatedAt string + UpdatedAt string +} + +type SecretRmReport struct { + ID string + Err error +} + +type SecretInfoReport struct { + ID string + CreatedAt time.Time + UpdatedAt time.Time + Spec SecretSpec + SecretData string `json:"SecretData,omitempty"` +} + +type SecretInfoReportCompat struct { + SecretInfoReport + Version SecretVersion +} diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/system.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/types/system.go similarity index 91% rename from vendor/github.com/containers/podman/v4/pkg/domain/entities/system.go rename to vendor/github.com/containers/podman/v5/pkg/domain/entities/types/system.go index 473db3530..3d1361ef0 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/system.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/entities/types/system.go @@ -1,11 +1,10 @@ -package entities +package types import ( "time" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/domain/entities/reports" - "github.com/containers/podman/v4/pkg/domain/entities/types" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/domain/entities/reports" ) // ServiceOptions provides the input for starting an API and sidecar pprof services @@ -100,21 +99,12 @@ type SystemUnshareOptions struct { RootlessNetNS bool } -type ComponentVersion struct { - types.Version -} - // ListRegistriesReport is the report when querying for a sorted list of // registries which may be contacted during certain operations. type ListRegistriesReport struct { Registries []string } -// swagger:model AuthConfig -type AuthConfig struct { - types.AuthConfig -} - // AuthReport describes the response for authentication check type AuthReport struct { IdentityToken string diff --git a/vendor/github.com/containers/podman/v5/pkg/domain/entities/types/types.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/types/types.go new file mode 100644 index 000000000..6530402f3 --- /dev/null +++ b/vendor/github.com/containers/podman/v5/pkg/domain/entities/types/types.go @@ -0,0 +1,79 @@ +// copied from github.com/docker/docker/api/types +package types + +import ( + "os" + + buildahDefine "github.com/containers/buildah/define" +) + +// ComponentVersion describes the version information for a specific component. +type ComponentVersion struct { + Name string + Version string + Details map[string]string `json:",omitempty"` +} + +// Version contains response of Engine API: +// GET "/version" +type Version struct { + Platform struct{ Name string } `json:",omitempty"` + Components []ComponentVersion `json:",omitempty"` + + // The following fields are deprecated, they relate to the Engine component and are kept for backwards compatibility + + Version string + APIVersion string `json:"ApiVersion"` + MinAPIVersion string `json:"MinAPIVersion,omitempty"` + GitCommit string + GoVersion string + Os string + Arch string + KernelVersion string `json:",omitempty"` + Experimental bool `json:",omitempty"` + BuildTime string `json:",omitempty"` +} + +// SystemComponentVersion is the type used by pkg/domain/entities +type SystemComponentVersion struct { + Version +} + +// ContainerCreateResponse is the response struct for creating a container +type ContainerCreateResponse struct { + // ID of the container created + // required: true + ID string `json:"Id"` + // Warnings during container creation + // required: true + Warnings []string `json:"Warnings"` +} + +// FarmBuildOptions describes the options for building container images on farm nodes +type FarmBuildOptions struct { + // Cleanup removes built images from farm nodes on success + Cleanup bool + // Authfile is the path to the file holding registry credentials + Authfile string + // SkipTLSVerify skips tls verification when set to true + SkipTLSVerify *bool +} + +// BuildOptions describe the options for building container images. +type BuildOptions struct { + buildahDefine.BuildOptions + ContainerFiles []string + FarmBuildOptions + // Files that need to be closed after the build + // so need to pass this to the main build functions + LogFileToClose *os.File + TmpDirToClose string +} + +// BuildReport is the image-build report. +type BuildReport struct { + // ID of the image. + ID string + // Format to save the image in + SaveFormat string +} diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/volumes.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/types/volumes.go similarity index 60% rename from vendor/github.com/containers/podman/v4/pkg/domain/entities/volumes.go rename to vendor/github.com/containers/podman/v5/pkg/domain/entities/types/volumes.go index 4acb7fc77..3a47d9d7b 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/volumes.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/entities/types/volumes.go @@ -1,12 +1,9 @@ -package entities +package types import ( - "net/url" - - "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v5/libpod/define" ) -// VolumeCreateOptions provides details for creating volumes // swagger:model type VolumeCreateOptions struct { // New volume's name. Can be left blank @@ -23,50 +20,22 @@ type VolumeCreateOptions struct { IgnoreIfExists bool `schema:"ignoreIfExist"` } -type VolumeConfigResponse struct { - define.InspectVolumeData -} - -type VolumeRmOptions struct { - All bool - Force bool - Ignore bool - Timeout *uint -} - type VolumeRmReport struct { Err error Id string //nolint:revive,stylecheck } - type VolumeInspectReport struct { *VolumeConfigResponse } -// VolumePruneOptions describes the options needed -// to prune a volume from the CLI -type VolumePruneOptions struct { - Filters url.Values `json:"filters" schema:"filters"` -} - -type VolumeListOptions struct { - Filter map[string][]string -} - type VolumeListReport struct { VolumeConfigResponse } -// VolumeReloadReport describes the response from reload volume plugins type VolumeReloadReport struct { define.VolumeReload } -/* - * Docker API compatibility types - */ - -// VolumeMountReport describes the response from volume mount type VolumeMountReport struct { Err error Id string //nolint:revive,stylecheck @@ -74,8 +43,11 @@ type VolumeMountReport struct { Path string } -// VolumeUnmountReport describes the response from umounting a volume type VolumeUnmountReport struct { Err error Id string //nolint:revive,stylecheck } + +type VolumeConfigResponse struct { + define.InspectVolumeData +} diff --git a/vendor/github.com/containers/podman/v5/pkg/domain/entities/volumes.go b/vendor/github.com/containers/podman/v5/pkg/domain/entities/volumes.go new file mode 100644 index 000000000..a0acd3a7f --- /dev/null +++ b/vendor/github.com/containers/podman/v5/pkg/domain/entities/volumes.go @@ -0,0 +1,49 @@ +package entities + +import ( + "net/url" + + "github.com/containers/podman/v5/pkg/domain/entities/types" +) + +// VolumeCreateOptions provides details for creating volumes +// swagger:model +type VolumeCreateOptions = types.VolumeCreateOptions + +type VolumeConfigResponse = types.VolumeConfigResponse + +type VolumeRmOptions struct { + All bool + Force bool + Ignore bool + Timeout *uint +} + +type VolumeRmReport = types.VolumeRmReport + +type VolumeInspectReport = types.VolumeInspectReport + +// VolumePruneOptions describes the options needed +// to prune a volume from the CLI +type VolumePruneOptions struct { + Filters url.Values `json:"filters" schema:"filters"` +} + +type VolumeListOptions struct { + Filter map[string][]string +} + +type VolumeListReport = types.VolumeListReport + +// VolumeReloadReport describes the response from reload volume plugins +type VolumeReloadReport = types.VolumeReloadReport + +/* + * Docker API compatibility types + */ + +// VolumeMountReport describes the response from volume mount +type VolumeMountReport = types.VolumeMountReport + +// VolumeUnmountReport describes the response from umounting a volume +type VolumeUnmountReport = types.VolumeUnmountReport diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/filters/containers.go b/vendor/github.com/containers/podman/v5/pkg/domain/filters/containers.go similarity index 89% rename from vendor/github.com/containers/podman/v4/pkg/domain/filters/containers.go rename to vendor/github.com/containers/podman/v5/pkg/domain/filters/containers.go index c25247337..d69b46fa3 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/filters/containers.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/filters/containers.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package filters @@ -12,8 +11,9 @@ import ( "github.com/containers/common/pkg/filters" "github.com/containers/common/pkg/util" - "github.com/containers/podman/v4/libpod" - "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v5/libpod" + "github.com/containers/podman/v5/libpod/define" + "golang.org/x/exp/slices" ) // GenerateContainerFilterFuncs return ContainerFilter functions based of filter. @@ -98,10 +98,10 @@ func GenerateContainerFilterFuncs(filter string, filterValues []string, r *libpo var imageNameWithoutTag string // Compare with ImageID, ImageName // Will match ImageName if running image has tag latest for other tags exact complete filter must be given - imageNameSlice := strings.SplitN(rootfsImageName, ":", 2) - if len(imageNameSlice) == 2 { - imageNameWithoutTag = imageNameSlice[0] - imageTag = imageNameSlice[1] + name, tag, hasColon := strings.Cut(rootfsImageName, ":") + if hasColon { + imageNameWithoutTag = name + imageTag = tag } if (rootfsImageID == filterValue) || @@ -144,13 +144,8 @@ func GenerateContainerFilterFuncs(filter string, filterValues []string, r *libpo //- volume=(|) return func(c *libpod.Container) bool { containerConfig := c.ConfigNoCopy() - var dest string for _, filterValue := range filterValues { - arr := strings.SplitN(filterValue, ":", 2) - source := arr[0] - if len(arr) == 2 { - dest = arr[1] - } + source, dest, _ := strings.Cut(filterValue, ":") for _, mount := range containerConfig.Spec.Mounts { if dest != "" && (mount.Source == source && mount.Destination == dest) { return true @@ -233,19 +228,10 @@ func GenerateContainerFilterFuncs(filter string, filterValues []string, r *libpo // check if networkMode is configured as `container:` // perform a match against filter `container:` // networks is already going to be empty if `container:` is configured as Mode - if strings.HasPrefix(networkMode, "container:") { - networkModeContainerPart := strings.SplitN(networkMode, ":", 2) - if len(networkModeContainerPart) < 2 { - return false - } - networkModeContainerID := networkModeContainerPart[1] + if networkModeContainerID, ok := strings.CutPrefix(networkMode, "container:"); ok { for _, val := range filterValues { - if strings.HasPrefix(val, "container:") { - filterNetworkModePart := strings.SplitN(val, ":", 2) - if len(filterNetworkModePart) < 2 { - return false - } - filterNetworkModeIDorName := filterNetworkModePart[1] + if idOrName, ok := strings.CutPrefix(val, "container:"); ok { + filterNetworkModeIDorName := idOrName filterID, err := r.LookupContainerID(filterNetworkModeIDorName) if err != nil { return false @@ -264,7 +250,7 @@ func GenerateContainerFilterFuncs(filter string, filterValues []string, r *libpo return false } for _, net := range networks { - if util.StringInSlice(net, inputNetNames) { + if slices.Contains(inputNetNames, net) { return true } } diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/filters/pods.go b/vendor/github.com/containers/podman/v5/pkg/domain/filters/pods.go similarity index 90% rename from vendor/github.com/containers/podman/v4/pkg/domain/filters/pods.go rename to vendor/github.com/containers/podman/v5/pkg/domain/filters/pods.go index bf41b74ac..3529f0127 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/filters/pods.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/filters/pods.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package filters @@ -11,8 +10,9 @@ import ( "github.com/containers/common/pkg/filters" "github.com/containers/common/pkg/util" - "github.com/containers/podman/v4/libpod" - "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v5/libpod" + "github.com/containers/podman/v5/libpod/define" + "golang.org/x/exp/slices" ) // GeneratePodFilterFunc takes a filter and filtervalue (key, value) @@ -64,7 +64,7 @@ func GeneratePodFilterFunc(filter string, filterValues []string, r *libpod.Runti }, nil case "ctr-status": for _, filterValue := range filterValues { - if !util.StringInSlice(filterValue, []string{"created", "running", "paused", "stopped", "exited", "unknown"}) { + if !slices.Contains([]string{"created", "running", "paused", "stopped", "exited", "unknown"}, filterValue) { return nil, fmt.Errorf("%s is not a valid status", filterValue) } } @@ -101,7 +101,7 @@ func GeneratePodFilterFunc(filter string, filterValues []string, r *libpod.Runti }, nil case "status": for _, filterValue := range filterValues { - if !util.StringInSlice(filterValue, []string{"stopped", "running", "paused", "exited", "dead", "created", "degraded"}) { + if !slices.Contains([]string{"stopped", "running", "paused", "exited", "dead", "created", "degraded"}, filterValue) { return nil, fmt.Errorf("%s is not a valid pod status", filterValue) } } @@ -162,7 +162,7 @@ func GeneratePodFilterFunc(filter string, filterValues []string, r *libpod.Runti return false } for _, net := range networks { - if util.StringInSlice(net, inputNetNames) { + if slices.Contains(inputNetNames, net) { return true } } diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/filters/volumes.go b/vendor/github.com/containers/podman/v5/pkg/domain/filters/volumes.go similarity index 92% rename from vendor/github.com/containers/podman/v4/pkg/domain/filters/volumes.go rename to vendor/github.com/containers/podman/v5/pkg/domain/filters/volumes.go index cff7aa979..07e2cac86 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/filters/volumes.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/filters/volumes.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package filters @@ -9,8 +8,8 @@ import ( "time" "github.com/containers/common/pkg/filters" - "github.com/containers/podman/v4/libpod" - "github.com/containers/podman/v4/pkg/util" + "github.com/containers/podman/v5/libpod" + "github.com/containers/podman/v5/pkg/util" ) func GenerateVolumeFilters(filter string, filterValues []string, runtime *libpod.Runtime) (libpod.VolumeFilter, error) { @@ -50,14 +49,7 @@ func GenerateVolumeFilters(filter string, filterValues []string, runtime *libpod case "opt": return func(v *libpod.Volume) bool { for _, val := range filterValues { - filterArray := strings.SplitN(val, "=", 2) - filterKey := filterArray[0] - var filterVal string - if len(filterArray) > 1 { - filterVal = filterArray[1] - } else { - filterVal = "" - } + filterKey, filterVal, _ := strings.Cut(val, "=") for labelKey, labelValue := range v.Options() { if labelKey == filterKey && (filterVal == "" || labelValue == filterVal) { diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/apply.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/apply.go similarity index 97% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/apply.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/apply.go index 51cd4e909..903154636 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/apply.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/apply.go @@ -12,8 +12,8 @@ import ( "os" "strings" - "github.com/containers/podman/v4/pkg/domain/entities" - k8sAPI "github.com/containers/podman/v4/pkg/k8s.io/api/core/v1" + "github.com/containers/podman/v5/pkg/domain/entities" + k8sAPI "github.com/containers/podman/v5/pkg/k8s.io/api/core/v1" "sigs.k8s.io/yaml" ) diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/archive.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/archive.go similarity index 93% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/archive.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/archive.go index de96cf8b0..49dc9f3f0 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/archive.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/archive.go @@ -4,7 +4,7 @@ import ( "context" "io" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/domain/entities" ) func (ic *ContainerEngine) ContainerCopyFromArchive(ctx context.Context, nameOrID, containerPath string, reader io.Reader, options entities.CopyOptions) (entities.ContainerCopyFunc, error) { diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/auto-update.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/auto-update.go similarity index 68% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/auto-update.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/auto-update.go index 028ea5a07..57504e92a 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/auto-update.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/auto-update.go @@ -3,8 +3,8 @@ package abi import ( "context" - "github.com/containers/podman/v4/pkg/autoupdate" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/autoupdate" + "github.com/containers/podman/v5/pkg/domain/entities" ) func (ic *ContainerEngine) AutoUpdate(ctx context.Context, options entities.AutoUpdateOptions) ([]*entities.AutoUpdateReport, []error) { diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/config.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/config.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/config.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/config.go diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/containers.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/containers.go similarity index 97% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/containers.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/containers.go index 6f6d86668..9016398bb 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/containers.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/containers.go @@ -15,24 +15,24 @@ import ( "github.com/containers/common/pkg/cgroups" "github.com/containers/common/pkg/config" "github.com/containers/image/v5/manifest" - "github.com/containers/podman/v4/libpod" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/libpod/logs" - "github.com/containers/podman/v4/pkg/api/handlers" - "github.com/containers/podman/v4/pkg/checkpoint" - "github.com/containers/podman/v4/pkg/domain/entities" - "github.com/containers/podman/v4/pkg/domain/entities/reports" - dfilters "github.com/containers/podman/v4/pkg/domain/filters" - "github.com/containers/podman/v4/pkg/domain/infra/abi/terminal" - "github.com/containers/podman/v4/pkg/errorhandling" - parallelctr "github.com/containers/podman/v4/pkg/parallel/ctr" - "github.com/containers/podman/v4/pkg/ps" - "github.com/containers/podman/v4/pkg/rootless" - "github.com/containers/podman/v4/pkg/signal" - "github.com/containers/podman/v4/pkg/specgen" - "github.com/containers/podman/v4/pkg/specgen/generate" - "github.com/containers/podman/v4/pkg/specgenutil" - "github.com/containers/podman/v4/pkg/util" + "github.com/containers/podman/v5/libpod" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/libpod/logs" + "github.com/containers/podman/v5/pkg/api/handlers" + "github.com/containers/podman/v5/pkg/checkpoint" + "github.com/containers/podman/v5/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/domain/entities/reports" + dfilters "github.com/containers/podman/v5/pkg/domain/filters" + "github.com/containers/podman/v5/pkg/domain/infra/abi/terminal" + "github.com/containers/podman/v5/pkg/errorhandling" + parallelctr "github.com/containers/podman/v5/pkg/parallel/ctr" + "github.com/containers/podman/v5/pkg/ps" + "github.com/containers/podman/v5/pkg/rootless" + "github.com/containers/podman/v5/pkg/signal" + "github.com/containers/podman/v5/pkg/specgen" + "github.com/containers/podman/v5/pkg/specgen/generate" + "github.com/containers/podman/v5/pkg/specgenutil" + "github.com/containers/podman/v5/pkg/util" "github.com/containers/storage" "github.com/sirupsen/logrus" ) @@ -822,6 +822,7 @@ func makeExecConfig(options entities.ExecOptions, rt *libpod.Runtime) (*libpod.E execConfig.WorkDir = options.WorkDir execConfig.DetachKeys = &options.DetachKeys execConfig.PreserveFDs = options.PreserveFDs + execConfig.PreserveFD = options.PreserveFD execConfig.AttachStdin = options.Interactive // Make an exit command @@ -871,6 +872,7 @@ func (ic *ContainerEngine) ContainerExec(ctx context.Context, nameOrID string, o if err != nil { return ec, err } + containers, err := getContainers(ic.Libpod, getContainersOptions{latest: options.Latest, names: []string{nameOrID}}) if err != nil { return ec, err @@ -1205,7 +1207,8 @@ func (ic *ContainerEngine) GetContainerExitCode(ctx context.Context, ctr *libpod exitCode, err := ctr.Wait(ctx) if err != nil { logrus.Errorf("Waiting for container %s: %v", ctr.ID(), err) - return define.ExecErrorCodeNotFound + intExitCode := int(define.ExecErrorCodeNotFound) + return intExitCode } return int(exitCode) } @@ -1698,7 +1701,8 @@ func (ic *ContainerEngine) ContainerClone(ctx context.Context, ctrCloneOpts enti } // if we do not pass term, running ctrs exit - spec.Terminal = c.Terminal() + localTerm := c.Terminal() + spec.Terminal = &localTerm // Print warnings if len(out) > 0 { diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/containers_freebsd.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/containers_freebsd.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/containers_freebsd.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/containers_freebsd.go diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/containers_linux.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/containers_linux.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/containers_linux.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/containers_linux.go diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/containers_runlabel.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/containers_runlabel.go similarity index 96% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/containers_runlabel.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/containers_runlabel.go index 463988c87..2ee64441f 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/containers_runlabel.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/containers_runlabel.go @@ -10,10 +10,10 @@ import ( "github.com/containers/common/libimage" "github.com/containers/common/pkg/config" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/domain/entities" - envLib "github.com/containers/podman/v4/pkg/env" - "github.com/containers/podman/v4/utils" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/domain/entities" + envLib "github.com/containers/podman/v5/pkg/env" + "github.com/containers/podman/v5/utils" "github.com/google/shlex" "github.com/sirupsen/logrus" ) @@ -135,7 +135,7 @@ func generateRunlabelCommand(runlabel string, img *libimage.Image, inputName str name = splitImageName[len(splitImageName)-1] // make sure to remove the tag from the image name, otherwise the name cannot // be used as container name because a colon is an illegal character - name = strings.SplitN(name, ":", 2)[0] + name, _, _ = strings.Cut(name, ":") } // Append the user-specified arguments to the runlabel (command). diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/containers_stat.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/containers_stat.go similarity index 88% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/containers_stat.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/containers_stat.go index 2183d39f0..99cde0035 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/containers_stat.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/containers_stat.go @@ -3,7 +3,7 @@ package abi import ( "context" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/domain/entities" ) func (ic *ContainerEngine) ContainerStat(ctx context.Context, nameOrID string, containerPath string) (*entities.ContainerStatReport, error) { diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/events.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/events.go similarity index 76% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/events.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/events.go index dffe1ac1a..ffbd119f1 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/events.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/events.go @@ -3,8 +3,8 @@ package abi import ( "context" - "github.com/containers/podman/v4/libpod/events" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v5/libpod/events" + "github.com/containers/podman/v5/pkg/domain/entities" ) func (ic *ContainerEngine) Events(ctx context.Context, opts entities.EventsOptions) error { diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/farm.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/farm.go similarity index 93% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/farm.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/farm.go index b4b47dd30..a055fdef4 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/farm.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/farm.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package abi @@ -10,8 +9,8 @@ import ( "github.com/containers/buildah/pkg/parse" lplatform "github.com/containers/common/libimage/platform" - "github.com/containers/podman/v4/pkg/domain/entities" - "github.com/containers/podman/v4/pkg/emulation" + "github.com/containers/podman/v5/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/emulation" ) // FarmNodeName returns the local engine's name. diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/generate.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/generate.go similarity index 95% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/generate.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/generate.go index 801bab349..e424500fb 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/generate.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/generate.go @@ -7,13 +7,13 @@ import ( "fmt" "strings" - "github.com/containers/podman/v4/libpod" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/domain/entities" - k8sAPI "github.com/containers/podman/v4/pkg/k8s.io/api/core/v1" - "github.com/containers/podman/v4/pkg/specgen" - generateUtils "github.com/containers/podman/v4/pkg/specgen/generate" - "github.com/containers/podman/v4/pkg/systemd/generate" + "github.com/containers/podman/v5/libpod" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/domain/entities" + k8sAPI "github.com/containers/podman/v5/pkg/k8s.io/api/core/v1" + "github.com/containers/podman/v5/pkg/specgen" + generateUtils "github.com/containers/podman/v5/pkg/specgen/generate" + "github.com/containers/podman/v5/pkg/systemd/generate" "sigs.k8s.io/yaml" ) @@ -207,7 +207,7 @@ func (ic *ContainerEngine) GenerateKube(ctx context.Context, nameOrIDs []string, // Generate the kube pods from containers. if len(ctrs) >= 1 { - po, err := libpod.GenerateForKube(ctx, ctrs, options.Service, options.UseLongAnnotations, options.PodmanOnly) + po, err := libpod.GenerateForKube(ctx, ctrs, options.Service, options.PodmanOnly) if err != nil { return nil, err } @@ -283,7 +283,7 @@ func getKubePods(ctx context.Context, pods []*libpod.Pod, options entities.Gener svcs := [][]byte{} for _, p := range pods { - po, sp, err := p.GenerateForKube(ctx, options.Service, options.UseLongAnnotations, options.PodmanOnly) + po, sp, err := p.GenerateForKube(ctx, options.Service, options.PodmanOnly) if err != nil { return nil, nil, err } diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/healthcheck.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/healthcheck.go similarity index 84% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/healthcheck.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/healthcheck.go index 8761521cc..b464c88c4 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/healthcheck.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/healthcheck.go @@ -3,8 +3,8 @@ package abi import ( "context" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/domain/entities" ) func (ic *ContainerEngine) HealthCheckRun(ctx context.Context, nameOrID string, options entities.HealthCheckOptions) (*define.HealthCheckResults, error) { diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/images.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/images.go similarity index 97% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/images.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/images.go index 6c06d68af..f7d5356ae 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/images.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/images.go @@ -14,6 +14,7 @@ import ( "strconv" "strings" "syscall" + "time" bdefine "github.com/containers/buildah/define" "github.com/containers/common/libimage" @@ -27,12 +28,12 @@ import ( "github.com/containers/image/v5/signature" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/transports/alltransports" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/domain/entities" - "github.com/containers/podman/v4/pkg/domain/entities/reports" - domainUtils "github.com/containers/podman/v4/pkg/domain/utils" - "github.com/containers/podman/v4/pkg/errorhandling" - "github.com/containers/podman/v4/pkg/rootless" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/domain/entities/reports" + domainUtils "github.com/containers/podman/v5/pkg/domain/utils" + "github.com/containers/podman/v5/pkg/errorhandling" + "github.com/containers/podman/v5/pkg/rootless" "github.com/containers/storage" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -253,6 +254,15 @@ func (ir *ImageEngine) Pull(ctx context.Context, rawImage string, options entiti pullOptions.InsecureSkipTLSVerify = options.SkipTLSVerify pullOptions.Writer = options.Writer pullOptions.OciDecryptConfig = options.OciDecryptConfig + pullOptions.MaxRetries = options.Retry + + if options.RetryDelay != "" { + duration, err := time.ParseDuration(options.RetryDelay) + if err != nil { + return nil, err + } + pullOptions.RetryDelay = &duration + } if !options.Quiet && pullOptions.Writer == nil { pullOptions.Writer = os.Stderr @@ -333,6 +343,14 @@ func (ir *ImageEngine) Push(ctx context.Context, source string, destination stri pushOptions.OciEncryptLayers = options.OciEncryptLayers pushOptions.CompressionLevel = options.CompressionLevel pushOptions.ForceCompressionFormat = options.ForceCompressionFormat + pushOptions.MaxRetries = options.Retry + if options.RetryDelay != "" { + duration, err := time.ParseDuration(options.RetryDelay) + if err != nil { + return nil, err + } + pushOptions.RetryDelay = &duration + } compressionFormat := options.CompressionFormat if compressionFormat == "" { diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/images_list.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/images_list.go similarity index 91% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/images_list.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/images_list.go index d70fca268..2dafc8218 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/images_list.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/images_list.go @@ -5,9 +5,9 @@ import ( "fmt" "github.com/containers/common/libimage" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/domain/entities" - "github.com/containers/podman/v4/pkg/util" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/domain/entities" + "golang.org/x/exp/slices" ) func (ir *ImageEngine) List(ctx context.Context, opts entities.ImageListOptions) ([]*entities.ImageSummary, error) { @@ -15,7 +15,7 @@ func (ir *ImageEngine) List(ctx context.Context, opts entities.ImageListOptions) Filters: opts.Filter, SetListData: true, } - if !opts.All && !util.StringInSlice("intermediate=true", listImagesOptions.Filters) { + if !opts.All && !slices.Contains(listImagesOptions.Filters, "intermediate=true") { // Filter intermediate images unless we want to list *all*. // NOTE: it's a positive filter, so `intermediate=false` means // to display non-intermediate images. diff --git a/vendor/k8s.io/kubernetes/third_party/forked/golang/LICENSE b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/internal/expansion/LICENSE similarity index 100% rename from vendor/k8s.io/kubernetes/third_party/forked/golang/LICENSE rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/internal/expansion/LICENSE diff --git a/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/internal/expansion/README.md b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/internal/expansion/README.md new file mode 100644 index 000000000..23a7a3d14 --- /dev/null +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/internal/expansion/README.md @@ -0,0 +1,5 @@ +Copied from https://github.com/kubernetes/kubernetes/tree/master/third_party/forked/golang/expansion . + +This is to eliminate a direct dependency on `k8s.io/kubernetes`. + +Ref: https://github.com/kubernetes/kubernetes/issues/79384#issuecomment-505627280 diff --git a/vendor/k8s.io/kubernetes/third_party/forked/golang/expansion/expand.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/internal/expansion/expand.go similarity index 100% rename from vendor/k8s.io/kubernetes/third_party/forked/golang/expansion/expand.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/internal/expansion/expand.go diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/manifest.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/manifest.go similarity index 63% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/manifest.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/manifest.go index 24b89872e..db638e1ed 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/manifest.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/manifest.go @@ -6,24 +6,27 @@ import ( "encoding/json" "fmt" "os" + "path" "strings" "errors" "github.com/containers/common/libimage" cp "github.com/containers/image/v5/copy" + "github.com/containers/image/v5/docker" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/compression" "github.com/containers/image/v5/pkg/shortnames" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/transports/alltransports" "github.com/containers/image/v5/types" - "github.com/containers/podman/v4/pkg/domain/entities" - envLib "github.com/containers/podman/v4/pkg/env" + "github.com/containers/podman/v5/pkg/domain/entities" + envLib "github.com/containers/podman/v5/pkg/env" "github.com/containers/storage" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" ) // ManifestCreate implements logic for creating manifest lists via ImageEngine @@ -45,6 +48,14 @@ func (ir *ImageEngine) ManifestCreate(ctx context.Context, name string, images [ } } + annotateOptions := &libimage.ManifestListAnnotateOptions{} + if len(opts.Annotations) != 0 { + annotateOptions.IndexAnnotations = opts.Annotations + if err := manifestList.AnnotateInstance("", annotateOptions); err != nil { + return "", err + } + } + addOptions := &libimage.ManifestListAddOptions{All: opts.All} for _, image := range images { if _, err := manifestList.Add(ctx, image, addOptions); err != nil { @@ -214,6 +225,13 @@ func (ir *ImageEngine) ManifestAdd(ctx context.Context, name string, images []st Password: opts.Password, } + images = slices.Clone(images) + for _, image := range opts.Images { + if !slices.Contains(images, image) { + images = append(images, image) + } + } + for _, image := range images { instanceDigest, err := manifestList.Add(ctx, image, addOptions) if err != nil { @@ -226,15 +244,16 @@ func (ir *ImageEngine) ManifestAdd(ctx context.Context, name string, images []st OS: opts.OS, OSVersion: opts.OSVersion, Variant: opts.Variant, + Subject: opts.IndexSubject, } if len(opts.Annotation) != 0 { annotations := make(map[string]string) for _, annotationSpec := range opts.Annotation { - spec := strings.SplitN(annotationSpec, "=", 2) - if len(spec) != 2 { - return "", fmt.Errorf("no value given for annotation %q", spec[0]) + key, val, hasVal := strings.Cut(annotationSpec, "=") + if !hasVal { + return "", fmt.Errorf("no value given for annotation %q", key) } - annotations[spec[0]] = spec[1] + annotations[key] = val } opts.Annotations = envLib.Join(opts.Annotations, annotations) } @@ -247,11 +266,99 @@ func (ir *ImageEngine) ManifestAdd(ctx context.Context, name string, images []st return manifestList.ID(), nil } +func mergeAnnotations(preferred map[string]string, aux []string) (map[string]string, error) { + if len(aux) != 0 { + auxAnnotations := make(map[string]string) + for _, annotationSpec := range aux { + key, val, hasVal := strings.Cut(annotationSpec, "=") + if !hasVal { + return nil, fmt.Errorf("no value given for annotation %q", key) + } + auxAnnotations[key] = val + } + if preferred == nil { + preferred = make(map[string]string) + } + preferred = envLib.Join(auxAnnotations, preferred) + } + return preferred, nil +} + // ManifestAnnotate updates an entry of the manifest list func (ir *ImageEngine) ManifestAnnotate(ctx context.Context, name, image string, opts entities.ManifestAnnotateOptions) (string, error) { - instanceDigest, err := digest.Parse(image) + manifestList, err := ir.Libpod.LibimageRuntime().LookupManifestList(name) if err != nil { - return "", fmt.Errorf(`invalid image digest "%s": %v`, image, err) + return "", err + } + + annotateOptions := &libimage.ManifestListAnnotateOptions{ + Architecture: opts.Arch, + Features: opts.Features, + OS: opts.OS, + OSVersion: opts.OSVersion, + Variant: opts.Variant, + Subject: opts.IndexSubject, + } + if annotateOptions.Annotations, err = mergeAnnotations(opts.Annotations, opts.Annotation); err != nil { + return "", err + } + if annotateOptions.IndexAnnotations, err = mergeAnnotations(opts.IndexAnnotations, opts.IndexAnnotation); err != nil { + return "", err + } + + var instanceDigest digest.Digest + if image == "" { + if len(opts.Annotations) != 0 { + return "", errors.New("setting annotation on an item in a manifest list requires an instance digest") + } + if len(opts.Annotation) != 0 { + return "", errors.New("setting annotation on an item in a manifest list requires an instance digest") + } + if opts.Arch != "" { + return "", errors.New("setting architecture on an item in a manifest list requires an instance digest") + } + if len(opts.Features) != 0 { + return "", errors.New("setting features on an item in a manifest list requires an instance digest") + } + if opts.OS != "" { + return "", errors.New("setting OS on an item in a manifest list requires an instance digest") + } + if len(opts.OSFeatures) != 0 { + return "", errors.New("setting OS features on an item in a manifest list requires an instance digest") + } + if opts.OSVersion != "" { + return "", errors.New("setting OS version on an item in a manifest list requires an instance digest") + } + if opts.Variant != "" { + return "", errors.New("setting variant on an item in a manifest list requires an instance digest") + } + } else { + if len(opts.IndexAnnotations) != 0 { + return "", errors.New("setting index-wide annotation in a manifest list requires no instance digest") + } + if len(opts.IndexAnnotation) != 0 { + return "", errors.New("setting index-wide annotation in a manifest list requires no instance digest") + } + if len(opts.IndexSubject) != 0 { + return "", errors.New("setting subject for a manifest list requires no instance digest") + } + instanceDigest, err = ir.digestFromDigestOrManifestListMember(ctx, manifestList, image) + if err != nil { + return "", fmt.Errorf("finding instance for %q: %w", image, err) + } + } + + if err := manifestList.AnnotateInstance(instanceDigest, annotateOptions); err != nil { + return "", err + } + + return manifestList.ID(), nil +} + +// ManifestAddArtifact creates artifact manifest for files and adds them to the manifest list +func (ir *ImageEngine) ManifestAddArtifact(ctx context.Context, name string, files []string, opts entities.ManifestAddArtifactOptions) (string, error) { + if len(files) < 1 { + return "", errors.New("manifest add artifact requires at least one file") } manifestList, err := ir.Libpod.LibimageRuntime().LookupManifestList(name) @@ -259,25 +366,42 @@ func (ir *ImageEngine) ManifestAnnotate(ctx context.Context, name, image string, return "", err } + files = slices.Clone(files) + for _, file := range opts.Files { + if !slices.Contains(files, file) { + files = append(files, file) + } + } + + addArtifactOptions := &libimage.ManifestListAddArtifactOptions{ + Type: opts.Type, + ConfigType: opts.ConfigType, + Config: opts.Config, + LayerType: opts.LayerType, + ExcludeTitles: opts.ExcludeTitles, + Annotations: opts.Annotations, + Subject: opts.Subject, + } + + instanceDigest, err := manifestList.AddArtifact(ctx, addArtifactOptions, files...) + if err != nil { + return "", err + } + annotateOptions := &libimage.ManifestListAnnotateOptions{ Architecture: opts.Arch, Features: opts.Features, OS: opts.OS, OSVersion: opts.OSVersion, Variant: opts.Variant, + Subject: opts.IndexSubject, } - if len(opts.Annotation) != 0 { - annotations := make(map[string]string) - for _, annotationSpec := range opts.Annotation { - spec := strings.SplitN(annotationSpec, "=", 2) - if len(spec) != 2 { - return "", fmt.Errorf("no value given for annotation %q", spec[0]) - } - annotations[spec[0]] = spec[1] - } - opts.Annotations = envLib.Join(opts.Annotations, annotations) + if annotateOptions.Annotations, err = mergeAnnotations(opts.Annotations, opts.Annotation); err != nil { + return "", err + } + if annotateOptions.IndexAnnotations, err = mergeAnnotations(opts.IndexAnnotations, opts.IndexAnnotation); err != nil { + return "", err } - annotateOptions.Annotations = opts.Annotations if err := manifestList.AnnotateInstance(instanceDigest, annotateOptions); err != nil { return "", err @@ -286,6 +410,53 @@ func (ir *ImageEngine) ManifestAnnotate(ctx context.Context, name, image string, return manifestList.ID(), nil } +func (ir *ImageEngine) digestFromDigestOrManifestListMember(ctx context.Context, list *libimage.ManifestList, name string) (digest.Digest, error) { + instanceDigest, err := digest.Parse(name) + if err == nil { + return instanceDigest, nil + } + listData, inspectErr := list.Inspect() + if inspectErr != nil { + return "", fmt.Errorf(`inspecting list "%s" for instance list: %v`, list.ID(), err) + } + // maybe the name is a file name we previously attached as part of an artifact manifest + for _, descriptor := range listData.Manifests { + if slices.Contains(descriptor.Files, path.Base(name)) || slices.Contains(descriptor.Files, name) { + return descriptor.Digest, nil + } + } + // maybe it's the name of an image we added to the list? + ref, err := alltransports.ParseImageName(name) + if err != nil { + withDocker := fmt.Sprintf("%s://%s", docker.Transport.Name(), name) + ref, err = alltransports.ParseImageName(withDocker) + if err != nil { + image, _, err := ir.Libpod.LibimageRuntime().LookupImage(name, &libimage.LookupImageOptions{ManifestList: true}) + if err != nil { + return "", fmt.Errorf("locating image named %q to check if it's in the manifest list: %w", name, err) + } + if ref, err = image.StorageReference(); err != nil { + return "", fmt.Errorf("reading image reference %q to check if it's in the manifest list: %w", name, err) + } + } + } + // read the manifest of this image + src, err := ref.NewImageSource(ctx, ir.Libpod.SystemContext()) + if err != nil { + return "", fmt.Errorf("reading local image %q to check if it's in the manifest list: %w", name, err) + } + defer src.Close() + manifestBytes, _, err := src.GetManifest(ctx, nil) + if err != nil { + return "", fmt.Errorf("locating image named %q to check if it's in the manifest list: %w", name, err) + } + refDigest, err := manifest.Digest(manifestBytes) + if err != nil { + return "", fmt.Errorf("digesting manifest of local image %q: %w", name, err) + } + return refDigest, nil +} + // ManifestRemoveDigest removes specified digest from the specified manifest list func (ir *ImageEngine) ManifestRemoveDigest(ctx context.Context, name, image string) (string, error) { instanceDigest, err := digest.Parse(image) diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/network.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/network.go similarity index 79% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/network.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/network.go index b51ba764b..3db382448 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/network.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/network.go @@ -6,11 +6,13 @@ import ( "fmt" "strconv" + "github.com/containers/common/libnetwork/pasta" + "github.com/containers/common/libnetwork/slirp4netns" "github.com/containers/common/libnetwork/types" netutil "github.com/containers/common/libnetwork/util" - "github.com/containers/common/pkg/util" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/domain/entities" + "golang.org/x/exp/slices" ) func (ic *ContainerEngine) NetworkUpdate(ctx context.Context, netName string, options entities.NetworkUpdateOptions) error { @@ -62,9 +64,13 @@ func (ic *ContainerEngine) NetworkList(ctx context.Context, options entities.Net return nets, err } -func (ic *ContainerEngine) NetworkInspect(ctx context.Context, namesOrIds []string, options entities.InspectOptions) ([]types.Network, []error, error) { +func (ic *ContainerEngine) NetworkInspect(ctx context.Context, namesOrIds []string, options entities.InspectOptions) ([]entities.NetworkInspectReport, []error, error) { var errs []error - networks := make([]types.Network, 0, len(namesOrIds)) + statuses, err := ic.GetContainerNetStatuses() + if err != nil { + return nil, nil, fmt.Errorf("failed to get network status for containers: %w", err) + } + networks := make([]entities.NetworkInspectReport, 0, len(namesOrIds)) for _, name := range namesOrIds { net, err := ic.Libpod.Network().NetworkInspect(name) if err != nil { @@ -75,7 +81,22 @@ func (ic *ContainerEngine) NetworkInspect(ctx context.Context, namesOrIds []stri return nil, nil, fmt.Errorf("inspecting network %s: %w", name, err) } } - networks = append(networks, net) + containerMap := make(map[string]entities.NetworkContainerInfo) + for _, st := range statuses { + // Make sure to only show the info for the correct network + if sb, ok := st.Status[net.Name]; ok { + containerMap[st.ID] = entities.NetworkContainerInfo{ + Name: st.Name, + Interfaces: sb.Interfaces, + } + } + } + + netReport := entities.NetworkInspectReport{ + Network: net, + Containers: containerMap, + } + networks = append(networks, netReport) } return networks, errs, nil } @@ -121,7 +142,7 @@ func (ic *ContainerEngine) NetworkRm(ctx context.Context, namesOrIds []string, o if err != nil { return reports, err } - if util.StringInSlice(name, networks) { + if slices.Contains(networks, name) { // if user passes force, we nuke containers and pods if !options.Force { // Without the force option, we return an error @@ -150,8 +171,7 @@ func (ic *ContainerEngine) NetworkRm(ctx context.Context, namesOrIds []string, o } func (ic *ContainerEngine) NetworkCreate(ctx context.Context, network types.Network, createOptions *types.NetworkCreateOptions) (*types.Network, error) { - // TODO (5.0): Stop accepting "pasta" as value here - if util.StringInSlice(network.Name, []string{"none", "host", "bridge", "private", "slirp4netns", "container", "ns", "default"}) { + if slices.Contains([]string{"none", "host", "bridge", "private", slirp4netns.BinaryName, pasta.BinaryName, "container", "ns", "default"}, network.Name) { return nil, fmt.Errorf("cannot create network with name %q because it conflicts with a valid network mode", network.Name) } network, err := ic.Libpod.Network().NetworkCreate(network, createOptions) @@ -242,3 +262,36 @@ func (ic *ContainerEngine) createDanglingFilterFunc(wantDangling bool) (types.Fi return wantDangling }, nil } + +type ContainerNetStatus struct { + // Name of the container + Name string + // ID of the container + ID string + // Status contains the net status, the key is the network name + Status map[string]types.StatusBlock +} + +func (ic *ContainerEngine) GetContainerNetStatuses() ([]ContainerNetStatus, error) { + cons, err := ic.Libpod.GetAllContainers() + if err != nil { + return nil, err + } + statuses := make([]ContainerNetStatus, 0, len(cons)) + for _, con := range cons { + status, err := con.GetNetworkStatus() + if err != nil { + if errors.Is(err, define.ErrNoSuchCtr) || errors.Is(err, define.ErrCtrRemoved) { + continue + } + return nil, err + } + + statuses = append(statuses, ContainerNetStatus{ + ID: con.ID(), + Name: con.Name(), + Status: status, + }) + } + return statuses, nil +} diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/parse/parse.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/parse/parse.go similarity index 81% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/parse/parse.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/parse/parse.go index 0cb95aa04..571e52a95 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/parse/parse.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/parse/parse.go @@ -5,8 +5,8 @@ import ( "strconv" "strings" - "github.com/containers/podman/v4/libpod" - "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v5/libpod" + "github.com/containers/podman/v5/libpod/define" "github.com/docker/go-units" "github.com/sirupsen/logrus" ) @@ -27,64 +27,64 @@ func VolumeOptions(opts map[string]string) ([]libpod.VolumeCreateOption, error) for _, o := range splitVal { // Options will be formatted as either "opt" or // "opt=value" - splitO := strings.SplitN(o, "=", 2) - switch strings.ToLower(splitO[0]) { + opt, val, hasVal := strings.Cut(o, "=") + switch strings.ToLower(opt) { case "size": - size, err := units.FromHumanSize(splitO[1]) + size, err := units.FromHumanSize(val) if err != nil { - return nil, fmt.Errorf("cannot convert size %s to integer: %w", splitO[1], err) + return nil, fmt.Errorf("cannot convert size %s to integer: %w", val, err) } libpodOptions = append(libpodOptions, libpod.WithVolumeSize(uint64(size))) finalVal = append(finalVal, o) // set option "SIZE": "$size" - volumeOptions["SIZE"] = splitO[1] + volumeOptions["SIZE"] = val case "inodes": - inodes, err := strconv.ParseUint(splitO[1], 10, 64) + inodes, err := strconv.ParseUint(val, 10, 64) if err != nil { - return nil, fmt.Errorf("cannot convert inodes %s to integer: %w", splitO[1], err) + return nil, fmt.Errorf("cannot convert inodes %s to integer: %w", val, err) } libpodOptions = append(libpodOptions, libpod.WithVolumeInodes(inodes)) finalVal = append(finalVal, o) // set option "INODES": "$size" - volumeOptions["INODES"] = splitO[1] + volumeOptions["INODES"] = val case "uid": - if len(splitO) != 2 { + if !hasVal { return nil, fmt.Errorf("uid option must provide a UID: %w", define.ErrInvalidArg) } - intUID, err := strconv.Atoi(splitO[1]) + intUID, err := strconv.Atoi(val) if err != nil { - return nil, fmt.Errorf("cannot convert UID %s to integer: %w", splitO[1], err) + return nil, fmt.Errorf("cannot convert UID %s to integer: %w", val, err) } logrus.Debugf("Removing uid= from options and adding WithVolumeUID for UID %d", intUID) libpodOptions = append(libpodOptions, libpod.WithVolumeUID(intUID), libpod.WithVolumeNoChown()) finalVal = append(finalVal, o) // set option "UID": "$uid" - volumeOptions["UID"] = splitO[1] + volumeOptions["UID"] = val case "gid": - if len(splitO) != 2 { + if !hasVal { return nil, fmt.Errorf("gid option must provide a GID: %w", define.ErrInvalidArg) } - intGID, err := strconv.Atoi(splitO[1]) + intGID, err := strconv.Atoi(val) if err != nil { - return nil, fmt.Errorf("cannot convert GID %s to integer: %w", splitO[1], err) + return nil, fmt.Errorf("cannot convert GID %s to integer: %w", val, err) } logrus.Debugf("Removing gid= from options and adding WithVolumeGID for GID %d", intGID) libpodOptions = append(libpodOptions, libpod.WithVolumeGID(intGID), libpod.WithVolumeNoChown()) finalVal = append(finalVal, o) // set option "GID": "$gid" - volumeOptions["GID"] = splitO[1] + volumeOptions["GID"] = val case "noquota": logrus.Debugf("Removing noquota from options and adding WithVolumeDisableQuota") libpodOptions = append(libpodOptions, libpod.WithVolumeDisableQuota()) // set option "NOQUOTA": "true" volumeOptions["NOQUOTA"] = "true" case "timeout": - if len(splitO) != 2 { + if !hasVal { return nil, fmt.Errorf("timeout option must provide a valid timeout in seconds: %w", define.ErrInvalidArg) } - intTimeout, err := strconv.Atoi(splitO[1]) + intTimeout, err := strconv.Atoi(val) if err != nil { - return nil, fmt.Errorf("cannot convert Timeout %s to an integer: %w", splitO[1], err) + return nil, fmt.Errorf("cannot convert Timeout %s to an integer: %w", val, err) } if intTimeout < 0 { return nil, fmt.Errorf("volume timeout cannot be negative (got %d)", intTimeout) diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/play.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/play.go similarity index 93% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/play.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/play.go index ffa174d58..cd98d5e24 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/play.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/play.go @@ -20,26 +20,28 @@ import ( "github.com/containers/common/pkg/secrets" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/types" - "github.com/containers/podman/v4/cmd/podman/parse" - "github.com/containers/podman/v4/libpod" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/domain/entities" - v1apps "github.com/containers/podman/v4/pkg/k8s.io/api/apps/v1" - v1 "github.com/containers/podman/v4/pkg/k8s.io/api/core/v1" - metav1 "github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/containers/podman/v4/pkg/specgen" - "github.com/containers/podman/v4/pkg/specgen/generate" - "github.com/containers/podman/v4/pkg/specgen/generate/kube" - "github.com/containers/podman/v4/pkg/specgenutil" - "github.com/containers/podman/v4/pkg/systemd/notifyproxy" - "github.com/containers/podman/v4/pkg/util" - "github.com/containers/podman/v4/utils" + "github.com/containers/podman/v5/cmd/podman/parse" + "github.com/containers/podman/v5/libpod" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/annotations" + "github.com/containers/podman/v5/pkg/domain/entities" + entitiesTypes "github.com/containers/podman/v5/pkg/domain/entities/types" + "github.com/containers/podman/v5/pkg/domain/infra/abi/internal/expansion" + v1apps "github.com/containers/podman/v5/pkg/k8s.io/api/apps/v1" + v1 "github.com/containers/podman/v5/pkg/k8s.io/api/core/v1" + metav1 "github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/containers/podman/v5/pkg/specgen" + "github.com/containers/podman/v5/pkg/specgen/generate" + "github.com/containers/podman/v5/pkg/specgen/generate/kube" + "github.com/containers/podman/v5/pkg/specgenutil" + "github.com/containers/podman/v5/pkg/systemd/notifyproxy" + "github.com/containers/podman/v5/pkg/util" + "github.com/containers/podman/v5/utils" "github.com/coreos/go-systemd/v22/daemon" "github.com/opencontainers/go-digest" "github.com/opencontainers/selinux/go-selinux" "github.com/sirupsen/logrus" yamlv3 "gopkg.in/yaml.v3" - "k8s.io/kubernetes/third_party/forked/golang/expansion" "sigs.k8s.io/yaml" ) @@ -74,7 +76,6 @@ func (ic *ContainerEngine) createServiceContainer(ctx context.Context, name stri } ctrOpts := entities.ContainerCreateOptions{ // Inherited from infra containers - ImageVolume: define.TypeBind, IsInfra: false, MemorySwappiness: -1, ReadOnly: true, @@ -124,6 +125,51 @@ func (ic *ContainerEngine) createServiceContainer(ctx context.Context, name stri return ctr, nil } +func prepareVolumesFrom(forContainer, podName string, ctrNames, annotations map[string]string) ([]string, error) { + annotationVolsFrom := define.VolumesFromAnnotation + "/" + forContainer + + volsFromCtrs, ok := annotations[annotationVolsFrom] + + // No volumes-from specified + if !ok || volsFromCtrs == "" { + return nil, nil + } + + // The volumes-from string is a semicolon-separated container names + // optionally with respective mount options. + volumesFrom := strings.Split(volsFromCtrs, ";") + for idx, volsFromCtr := range volumesFrom { + // Each entry is of format "container[:mount-options]" + fields := strings.Split(volsFromCtr, ":") + if len(fields) != 1 && len(fields) != 2 { + return nil, fmt.Errorf("invalid annotation %s value", annotationVolsFrom) + } + + if fields[0] == "" { + return nil, fmt.Errorf("from container name cannot be empty in annotation %s", annotationVolsFrom) + } + + // Source and target containers cannot be same + if fields[0] == forContainer { + return nil, fmt.Errorf("to and from container names cannot be same in annotation %s", annotationVolsFrom) + } + + // Update the source container name if it belongs to the pod + // the source container must exist before the target container + // in the kube yaml. Otherwise, the source container will be + // treated as an external container. This also helps in avoiding + // cyclic dependencies between containers within the pod. + if _, ok := ctrNames[fields[0]]; ok { + volumesFrom[idx] = podName + "-" + fields[0] + if len(fields) == 2 { + volumesFrom[idx] = volumesFrom[idx] + ":" + fields[1] + } + } + } + + return volumesFrom, nil +} + // Creates the name for a k8s entity based on the provided content of a // K8s yaml file and a given suffix. func k8sName(content []byte, suffix string) string { @@ -244,11 +290,7 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options podTemplateSpec.ObjectMeta = podYAML.ObjectMeta podTemplateSpec.Spec = podYAML.Spec - for name, val := range podYAML.Annotations { - if len(val) > define.MaxKubeAnnotation && !options.UseLongAnnotations { - return nil, fmt.Errorf("annotation %q=%q value length exceeds Kubernetes max %d", name, val, define.MaxKubeAnnotation) - } - } + for name, val := range options.Annotations { if podYAML.Annotations == nil { podYAML.Annotations = make(map[string]string) @@ -256,6 +298,10 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options podYAML.Annotations[name] = val } + if err := annotations.ValidateAnnotations(podYAML.Annotations); err != nil { + return nil, err + } + r, proxies, err := ic.playKubePod(ctx, podTemplateSpec.ObjectMeta.Name, &podTemplateSpec, options, &ipIndex, podYAML.Annotations, configMaps, serviceContainer) if err != nil { return nil, err @@ -475,6 +521,10 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY return nil, nil, fmt.Errorf("pod does not have a name") } + if _, ok := annotations[define.VolumesFromAnnotation]; ok { + return nil, nil, fmt.Errorf("annotation %s without target volume is reserved for internal use", define.VolumesFromAnnotation) + } + podOpt := entities.PodCreateOptions{ Infra: true, Net: &entities.NetOptions{NoHosts: options.NoHosts}, @@ -491,14 +541,7 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY } if len(options.Networks) > 0 { - var pastaNetworkNameExists bool - - _, err := ic.Libpod.Network().NetworkInspect("pasta") - if err == nil { - pastaNetworkNameExists = true - } - - ns, networks, netOpts, err := specgen.ParseNetworkFlag(options.Networks, pastaNetworkNameExists) + ns, networks, netOpts, err := specgen.ParseNetworkFlag(options.Networks) if err != nil { return nil, nil, err } @@ -509,7 +552,11 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY } if options.Userns == "" { - options.Userns = "host" + if v, ok := annotations[define.UserNsAnnotation]; ok { + options.Userns = v + } else { + options.Userns = "host" + } if podYAML.Spec.HostUsers != nil && !*podYAML.Spec.HostUsers { options.Userns = "auto" } @@ -625,7 +672,7 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY // error out instead reuse the current volume. vol, err = ic.Libpod.GetVolume(v.Source) if err != nil { - return nil, nil, fmt.Errorf("cannot re-use local volume for volume from configmap %q: %w", v.Source, err) + return nil, nil, fmt.Errorf("cannot reuse local volume for volume from configmap %q: %w", v.Source, err) } } else { return nil, nil, fmt.Errorf("cannot create a local volume for volume from configmap %q: %w", v.Source, err) @@ -781,6 +828,13 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY initCtrType = define.OneShotInitContainer } + var volumesFrom []string + if list, err := prepareVolumesFrom(initCtr.Name, podName, ctrNames, annotations); err != nil { + return nil, nil, err + } else if list != nil { + volumesFrom = list + } + specgenOpts := kube.CtrSpecGenOptions{ Annotations: annotations, ConfigMaps: configMaps, @@ -801,6 +855,7 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY SecretsManager: secretsManager, UserNSIsHost: p.Userns.IsHost(), Volumes: volumes, + VolumesFrom: volumesFrom, UtsNSIsHost: p.UtsNs.IsHost(), } specGen, err := kube.ToSpecGen(ctx, &specgenOpts) @@ -857,6 +912,13 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY labels[k] = v } + var volumesFrom []string + if list, err := prepareVolumesFrom(container.Name, podName, ctrNames, annotations); err != nil { + return nil, nil, err + } else if list != nil { + volumesFrom = list + } + specgenOpts := kube.CtrSpecGenOptions{ Annotations: annotations, ConfigMaps: configMaps, @@ -877,6 +939,7 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY SecretsManager: secretsManager, UserNSIsHost: p.Userns.IsHost(), Volumes: volumes, + VolumesFrom: volumesFrom, UtsNSIsHost: p.UtsNs.IsHost(), } @@ -1021,6 +1084,7 @@ func (ic *ContainerEngine) getImageAndLabelInfo(ctx context.Context, cwd string, } buildOpts.Isolation = isolation buildOpts.CommonBuildOpts = commonOpts + buildOpts.SystemContext = options.SystemContext buildOpts.Output = container.Image buildOpts.ContextDirectory = filepath.Dir(buildFile) buildOpts.ReportWriter = writer @@ -1142,6 +1206,8 @@ func (ic *ContainerEngine) playKubePVC(ctx context.Context, mountLabel string, p opts["o"] = v case util.VolumeImportSourceAnnotation: importFrom = v + case util.VolumeImageAnnotation: + opts["image"] = v } } volOptions = append(volOptions, libpod.WithVolumeOptions(opts)) @@ -1179,7 +1245,7 @@ func (ic *ContainerEngine) playKubePVC(ctx context.Context, mountLabel string, p } } - report.Volumes = append(report.Volumes, entities.PlayKubeVolume{ + report.Volumes = append(report.Volumes, entitiesTypes.PlayKubeVolume{ Name: vol.Name(), }) diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/play_utils.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/play_utils.go similarity index 88% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/play_utils.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/play_utils.go index 482a158e6..16d30acd1 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/play_utils.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/play_utils.go @@ -1,6 +1,6 @@ package abi -import "github.com/containers/podman/v4/libpod/define" +import "github.com/containers/podman/v5/libpod/define" // getSdNotifyMode returns the `sdNotifyAnnotation/$name` for the specified // name. If name is empty, it'll only look for `sdNotifyAnnotation`. diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/pods.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/pods.go similarity index 97% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/pods.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/pods.go index 65c16afa2..72a4ca0f6 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/pods.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/pods.go @@ -7,13 +7,13 @@ import ( "strconv" "strings" - "github.com/containers/podman/v4/libpod" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/domain/entities" - dfilters "github.com/containers/podman/v4/pkg/domain/filters" - "github.com/containers/podman/v4/pkg/signal" - "github.com/containers/podman/v4/pkg/specgen" - "github.com/containers/podman/v4/pkg/specgen/generate" + "github.com/containers/podman/v5/libpod" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/domain/entities" + dfilters "github.com/containers/podman/v5/pkg/domain/filters" + "github.com/containers/podman/v5/pkg/signal" + "github.com/containers/podman/v5/pkg/specgen" + "github.com/containers/podman/v5/pkg/specgen/generate" "github.com/sirupsen/logrus" ) @@ -121,7 +121,7 @@ func (ic *ContainerEngine) PodLogs(ctx context.Context, nameOrID string, options } // PodLogsOptions are similar but contains few extra fields like ctrName - // So cast other values as is so we can re-use the code + // So cast other values as is so we can reuse the code containerLogsOpts := entities.PodLogsOptionsToContainerLogsOptions(options) return ic.ContainerLogs(ctx, ctrNames, containerLogsOpts) diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/pods_stats.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/pods_stats.go similarity index 86% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/pods_stats.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/pods_stats.go index 447ceab9d..5576f7680 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/pods_stats.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/pods_stats.go @@ -7,10 +7,10 @@ import ( "strconv" "github.com/containers/common/pkg/cgroups" - "github.com/containers/podman/v4/libpod" - "github.com/containers/podman/v4/pkg/domain/entities" - "github.com/containers/podman/v4/pkg/rootless" - "github.com/containers/podman/v4/utils" + "github.com/containers/podman/v5/libpod" + "github.com/containers/podman/v5/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/rootless" + "github.com/containers/podman/v5/utils" "github.com/docker/go-units" ) @@ -44,12 +44,19 @@ func (ic *ContainerEngine) podsToStatsReport(pods []*libpod.Pod) ([]*entities.Po } podID := pods[i].ID()[:12] for j := range podStats { + var podNetInput uint64 + var podNetOutput uint64 + for _, stats := range podStats[j].Network { + podNetInput += stats.RxBytes + podNetOutput += stats.TxBytes + } + r := entities.PodStatsReport{ CPU: floatToPercentString(podStats[j].CPU), MemUsage: combineHumanValues(podStats[j].MemUsage, podStats[j].MemLimit), MemUsageBytes: combineBytesValues(podStats[j].MemUsage, podStats[j].MemLimit), Mem: floatToPercentString(podStats[j].MemPerc), - NetIO: combineHumanValues(podStats[j].NetInput, podStats[j].NetOutput), + NetIO: combineHumanValues(podNetInput, podNetOutput), BlockIO: combineHumanValues(podStats[j].BlockInput, podStats[j].BlockOutput), PIDS: pidsToString(podStats[j].PIDs), CID: podStats[j].ContainerID[:12], diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/runtime.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/runtime.go similarity index 93% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/runtime.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/runtime.go index f44f65701..bfa182f24 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/runtime.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/runtime.go @@ -3,7 +3,7 @@ package abi import ( "sync" - "github.com/containers/podman/v4/libpod" + "github.com/containers/podman/v5/libpod" ) // Image-related runtime linked against libpod library diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/secrets.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/secrets.go similarity index 82% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/secrets.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/secrets.go index 69d7ae5f5..02fbc9426 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/secrets.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/secrets.go @@ -9,8 +9,8 @@ import ( "strings" "github.com/containers/common/pkg/secrets" - "github.com/containers/podman/v4/pkg/domain/entities" - "github.com/containers/podman/v4/pkg/domain/utils" + "github.com/containers/podman/v5/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/domain/utils" ) func (ic *ContainerEngine) SecretCreate(ctx context.Context, name string, reader io.Reader, options entities.SecretCreateOptions) (*entities.SecretCreateReport, error) { @@ -90,21 +90,7 @@ func (ic *ContainerEngine) SecretInspect(ctx context.Context, nameOrIDs []string if secret.UpdatedAt.IsZero() { secret.UpdatedAt = secret.CreatedAt } - report := &entities.SecretInfoReport{ - ID: secret.ID, - CreatedAt: secret.CreatedAt, - UpdatedAt: secret.UpdatedAt, - Spec: entities.SecretSpec{ - Name: secret.Name, - Driver: entities.SecretDriverSpec{ - Name: secret.Driver, - Options: secret.DriverOptions, - }, - Labels: secret.Labels, - }, - SecretData: string(data), - } - reports = append(reports, report) + reports = append(reports, secretToReportWithData(*secret, string(data))) } return reports, errs, nil @@ -126,19 +112,7 @@ func (ic *ContainerEngine) SecretList(ctx context.Context, opts entities.SecretL return nil, err } if result { - reportItem := entities.SecretInfoReport{ - ID: secret.ID, - CreatedAt: secret.CreatedAt, - UpdatedAt: secret.CreatedAt, - Spec: entities.SecretSpec{ - Name: secret.Name, - Driver: entities.SecretDriverSpec{ - Name: secret.Driver, - Options: secret.DriverOptions, - }, - }, - } - report = append(report, &reportItem) + report = append(report, secretToReport(secret)) } } return report, nil @@ -188,3 +162,24 @@ func (ic *ContainerEngine) SecretExists(ctx context.Context, nameOrID string) (* return &entities.BoolReport{Value: secret != nil}, nil } + +func secretToReport(secret secrets.Secret) *entities.SecretInfoReport { + return secretToReportWithData(secret, "") +} + +func secretToReportWithData(secret secrets.Secret, data string) *entities.SecretInfoReport { + return &entities.SecretInfoReport{ + ID: secret.ID, + CreatedAt: secret.CreatedAt, + UpdatedAt: secret.UpdatedAt, + Spec: entities.SecretSpec{ + Name: secret.Name, + Driver: entities.SecretDriverSpec{ + Name: secret.Driver, + Options: secret.DriverOptions, + }, + Labels: secret.Labels, + }, + SecretData: data, + } +} diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/system.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/system.go similarity index 72% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/system.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/system.go index 24ee64d29..75dc5e65a 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/system.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/system.go @@ -9,17 +9,12 @@ import ( "os/exec" "path/filepath" - "github.com/containers/common/pkg/cgroups" - "github.com/containers/common/pkg/config" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/domain/entities" - "github.com/containers/podman/v4/pkg/domain/entities/reports" - "github.com/containers/podman/v4/pkg/rootless" - "github.com/containers/podman/v4/pkg/util" - "github.com/containers/podman/v4/utils" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/domain/entities/reports" + "github.com/containers/podman/v5/pkg/util" "github.com/containers/storage" "github.com/containers/storage/pkg/directory" - "github.com/containers/storage/pkg/unshare" "github.com/sirupsen/logrus" ) @@ -35,7 +30,7 @@ func (ic *ContainerEngine) Info(ctx context.Context) (*define.Info, error) { // service may be run with another URI. if ic.Libpod.RemoteURI() == "" { xdg := defaultRunPath - if path, err := util.GetRuntimeDir(); err != nil { + if path, err := util.GetRootlessRuntimeDir(); err != nil { // Info is as good as we can guess... return info, err } else if path != "" { @@ -65,91 +60,6 @@ func (ic *ContainerEngine) Info(ctx context.Context) (*define.Info, error) { return info, err } -func (ic *ContainerEngine) SetupRootless(_ context.Context, noMoveProcess bool) error { - runsUnderSystemd := utils.RunsOnSystemd() - if !runsUnderSystemd { - isPid1 := os.Getpid() == 1 - if _, found := os.LookupEnv("container"); isPid1 || found { - if err := utils.MaybeMoveToSubCgroup(); err != nil { - // it is a best effort operation, so just print the - // error for debugging purposes. - logrus.Debugf("Could not move to subcgroup: %v", err) - } - } - } - - // do it only after podman has already re-execed and running with uid==0. - hasCapSysAdmin, err := unshare.HasCapSysAdmin() - if err != nil { - return err - } - // check for both euid == 0 and CAP_SYS_ADMIN because we may be running in a container with CAP_SYS_ADMIN set. - if os.Geteuid() == 0 && hasCapSysAdmin { - ownsCgroup, err := cgroups.UserOwnsCurrentSystemdCgroup() - if err != nil { - logrus.Infof("Failed to detect the owner for the current cgroup: %v", err) - } - if !ownsCgroup { - conf, err := ic.Config(context.Background()) - if err != nil { - return err - } - unitName := fmt.Sprintf("podman-%d.scope", os.Getpid()) - if runsUnderSystemd || conf.Engine.CgroupManager == config.SystemdCgroupsManager { - if err := utils.RunUnderSystemdScope(os.Getpid(), "user.slice", unitName); err != nil { - logrus.Debugf("Failed to add podman to systemd sandbox cgroup: %v", err) - } - } - } - return nil - } - - pausePidPath, err := util.GetRootlessPauseProcessPidPath() - if err != nil { - return fmt.Errorf("could not get pause process pid file path: %w", err) - } - - became, ret, err := rootless.TryJoinPauseProcess(pausePidPath) - if err != nil { - return err - } - if became { - os.Exit(ret) - } - if noMoveProcess { - return nil - } - - // if there is no pid file, try to join existing containers, and create a pause process. - ctrs, err := ic.Libpod.GetRunningContainers() - if err != nil { - logrus.Error(err.Error()) - os.Exit(1) - } - - paths := []string{} - for _, ctr := range ctrs { - paths = append(paths, ctr.ConfigNoCopy().ConmonPidFile) - } - - if len(paths) > 0 { - became, ret, err = rootless.TryJoinFromFilePaths(pausePidPath, true, paths) - } else { - became, ret, err = rootless.BecomeRootInUserNS(pausePidPath) - if err == nil { - utils.MovePauseProcessToScope(pausePidPath) - } - } - if err != nil { - logrus.Error(fmt.Errorf("invalid internal status, try resetting the pause process with %q: %w", os.Args[0]+" system migrate", err)) - os.Exit(1) - } - if became { - os.Exit(ret) - } - return nil -} - // SystemPrune removes unused data from the system. Pruning pods, containers, networks, volumes and images. func (ic *ContainerEngine) SystemPrune(ctx context.Context, options entities.SystemPruneOptions) (*entities.SystemPruneReport, error) { var systemPruneReport = new(entities.SystemPruneReport) @@ -401,17 +311,7 @@ func (ic *ContainerEngine) Unshare(ctx context.Context, args []string, options e } if options.RootlessNetNS { - rootlessNetNS, err := ic.Libpod.GetRootlessNetNs(true) - if err != nil { - return err - } - // Make sure to unlock, unshare can run for a long time. - rootlessNetNS.Lock.Unlock() - // We do not want to clean up the netns after unshare. - // The problem is that we cannot know if we need to clean up and - // secondly unshare should allow user to set up the namespace with - // special things, e.g. potentially macvlan or something like that. - return rootlessNetNS.Do(unshare) + return ic.Libpod.Network().RunInRootlessNetns(unshare) } return unshare() } diff --git a/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/system_freebsd.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/system_freebsd.go new file mode 100644 index 000000000..c6ec91943 --- /dev/null +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/system_freebsd.go @@ -0,0 +1,13 @@ +package abi + +import ( + "context" +) + +// Default path for system runtime state +const defaultRunPath = "/var/run" + +// SetupRootless in a NOP for freebsd as it only configures the rootless userns on linux. +func (ic *ContainerEngine) SetupRootless(_ context.Context, noMoveProcess bool) error { + return nil +} diff --git a/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/system_linux.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/system_linux.go new file mode 100644 index 000000000..abe00d89a --- /dev/null +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/system_linux.go @@ -0,0 +1,103 @@ +package abi + +import ( + "context" + "fmt" + "os" + + "github.com/containers/common/pkg/cgroups" + "github.com/containers/common/pkg/config" + "github.com/containers/common/pkg/systemd" + "github.com/containers/podman/v5/pkg/rootless" + "github.com/containers/podman/v5/pkg/util" + "github.com/containers/storage/pkg/unshare" + "github.com/sirupsen/logrus" +) + +// Default path for system runtime state +const defaultRunPath = "/run" + +func (ic *ContainerEngine) SetupRootless(_ context.Context, noMoveProcess bool) error { + runsUnderSystemd := systemd.RunsOnSystemd() + if !runsUnderSystemd { + isPid1 := os.Getpid() == 1 + if _, found := os.LookupEnv("container"); isPid1 || found { + if err := cgroups.MaybeMoveToSubCgroup(); err != nil { + // it is a best effort operation, so just print the + // error for debugging purposes. + logrus.Debugf("Could not move to subcgroup: %v", err) + } + } + } + + // do it only after podman has already re-execed and running with uid==0. + hasCapSysAdmin, err := unshare.HasCapSysAdmin() + if err != nil { + return err + } + // check for both euid == 0 and CAP_SYS_ADMIN because we may be running in a container with CAP_SYS_ADMIN set. + if os.Geteuid() == 0 && hasCapSysAdmin { + ownsCgroup, err := cgroups.UserOwnsCurrentSystemdCgroup() + if err != nil { + logrus.Infof("Failed to detect the owner for the current cgroup: %v", err) + } + if !ownsCgroup { + conf, err := ic.Config(context.Background()) + if err != nil { + return err + } + unitName := fmt.Sprintf("podman-%d.scope", os.Getpid()) + if runsUnderSystemd || conf.Engine.CgroupManager == config.SystemdCgroupsManager { + if err := systemd.RunUnderSystemdScope(os.Getpid(), "user.slice", unitName); err != nil { + logrus.Debugf("Failed to add podman to systemd sandbox cgroup: %v", err) + } + } + } + return nil + } + + pausePidPath, err := util.GetRootlessPauseProcessPidPath() + if err != nil { + return fmt.Errorf("could not get pause process pid file path: %w", err) + } + + became, ret, err := rootless.TryJoinPauseProcess(pausePidPath) + if err != nil { + return err + } + if became { + os.Exit(ret) + } + if noMoveProcess { + return nil + } + + // if there is no pid file, try to join existing containers, and create a pause process. + ctrs, err := ic.Libpod.GetRunningContainers() + if err != nil { + logrus.Error(err.Error()) + os.Exit(1) + } + + paths := []string{} + for _, ctr := range ctrs { + paths = append(paths, ctr.ConfigNoCopy().ConmonPidFile) + } + + if len(paths) > 0 { + became, ret, err = rootless.TryJoinFromFilePaths(pausePidPath, true, paths) + } else { + became, ret, err = rootless.BecomeRootInUserNS(pausePidPath) + if err == nil { + systemd.MovePauseProcessToScope(pausePidPath) + } + } + if err != nil { + logrus.Error(fmt.Errorf("invalid internal status, try resetting the pause process with %q: %w", os.Args[0]+" system migrate", err)) + os.Exit(1) + } + if became { + os.Exit(ret) + } + return nil +} diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/terminal/sigproxy_commn.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/terminal/sigproxy_commn.go similarity index 86% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/terminal/sigproxy_commn.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/terminal/sigproxy_commn.go index d42685508..e4a601f93 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/terminal/sigproxy_commn.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/terminal/sigproxy_commn.go @@ -1,5 +1,4 @@ //go:build linux || freebsd -// +build linux freebsd package terminal @@ -8,10 +7,10 @@ import ( "os" "syscall" - "github.com/containers/podman/v4/libpod" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/libpod/shutdown" - "github.com/containers/podman/v4/pkg/signal" + "github.com/containers/podman/v5/libpod" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/libpod/shutdown" + "github.com/containers/podman/v5/pkg/signal" "github.com/sirupsen/logrus" ) diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/terminal/terminal.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/terminal/terminal.go similarity index 97% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/terminal/terminal.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/terminal/terminal.go index 37dadd92a..c7b75c6c2 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/terminal/terminal.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/terminal/terminal.go @@ -7,7 +7,7 @@ import ( "os/signal" "github.com/containers/common/pkg/resize" - lsignal "github.com/containers/podman/v4/pkg/signal" + lsignal "github.com/containers/podman/v5/pkg/signal" "github.com/moby/term" "github.com/sirupsen/logrus" ) diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/terminal/terminal_common.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/terminal/terminal_common.go similarity index 96% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/terminal/terminal_common.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/terminal/terminal_common.go index f9a012b7d..e4f3afeb3 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/terminal/terminal_common.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/terminal/terminal_common.go @@ -1,5 +1,4 @@ //go:build linux || freebsd -// +build linux freebsd package terminal @@ -10,8 +9,8 @@ import ( "os" "github.com/containers/common/pkg/resize" - "github.com/containers/podman/v4/libpod" - "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v5/libpod" + "github.com/containers/podman/v5/libpod/define" "github.com/sirupsen/logrus" "golang.org/x/term" ) diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/terminal/terminal_unsupported.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/terminal/terminal_unsupported.go similarity index 87% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/terminal/terminal_unsupported.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/terminal/terminal_unsupported.go index 21ed6c8d4..a1b5ac37d 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/terminal/terminal_unsupported.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/terminal/terminal_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux && !freebsd -// +build !linux,!freebsd package terminal @@ -8,8 +7,8 @@ import ( "errors" "os" - "github.com/containers/podman/v4/libpod" - "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v5/libpod" + "github.com/containers/podman/v5/libpod/define" ) // ExecAttachCtr execs and attaches to a container diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/trust.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/trust.go similarity index 93% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/trust.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/trust.go index 9b30920d7..e23ed4bd5 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/trust.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/trust.go @@ -5,8 +5,8 @@ import ( "fmt" "os" - "github.com/containers/podman/v4/pkg/domain/entities" - "github.com/containers/podman/v4/pkg/trust" + "github.com/containers/podman/v5/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/trust" ) func (ir *ImageEngine) ShowTrust(ctx context.Context, args []string, options entities.ShowTrustOptions) (*entities.ShowTrustReport, error) { diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/volumes.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/volumes.go similarity index 95% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/volumes.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/volumes.go index 1423ac571..d99ab0f99 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/volumes.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/abi/volumes.go @@ -5,12 +5,12 @@ import ( "errors" "fmt" - "github.com/containers/podman/v4/libpod" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/domain/entities" - "github.com/containers/podman/v4/pkg/domain/entities/reports" - "github.com/containers/podman/v4/pkg/domain/filters" - "github.com/containers/podman/v4/pkg/domain/infra/abi/parse" + "github.com/containers/podman/v5/libpod" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/domain/entities/reports" + "github.com/containers/podman/v5/pkg/domain/filters" + "github.com/containers/podman/v5/pkg/domain/infra/abi/parse" ) func (ic *ContainerEngine) VolumeCreate(ctx context.Context, opts entities.VolumeCreateOptions) (*entities.IDOrNameResponse, error) { diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/runtime_abi.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/runtime_abi.go similarity index 88% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/runtime_abi.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/runtime_abi.go index 1f9c81706..21704fa76 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/runtime_abi.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/runtime_abi.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package infra @@ -7,9 +6,9 @@ import ( "context" "fmt" - "github.com/containers/podman/v4/pkg/bindings" - "github.com/containers/podman/v4/pkg/domain/entities" - "github.com/containers/podman/v4/pkg/domain/infra/tunnel" + "github.com/containers/podman/v5/pkg/bindings" + "github.com/containers/podman/v5/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/domain/infra/tunnel" ) // NewContainerEngine factory provides a libpod runtime for container-related operations diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/runtime_libpod.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/runtime_libpod.go similarity index 92% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/runtime_libpod.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/runtime_libpod.go index fa23bd4d2..6c4d4494d 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/runtime_libpod.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/runtime_libpod.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package infra @@ -14,11 +13,11 @@ import ( "syscall" "github.com/containers/common/pkg/cgroups" - "github.com/containers/podman/v4/libpod" - "github.com/containers/podman/v4/pkg/domain/entities" - "github.com/containers/podman/v4/pkg/namespaces" - "github.com/containers/podman/v4/pkg/rootless" - "github.com/containers/podman/v4/pkg/util" + "github.com/containers/podman/v5/libpod" + "github.com/containers/podman/v5/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/namespaces" + "github.com/containers/podman/v5/pkg/rootless" + "github.com/containers/podman/v5/pkg/util" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/types" "github.com/sirupsen/logrus" @@ -34,7 +33,6 @@ var ( ) type engineOpts struct { - noStore bool withFDS bool reset bool renumber bool @@ -45,7 +43,6 @@ type engineOpts struct { func GetRuntime(ctx context.Context, flags *flag.FlagSet, cfg *entities.PodmanConfig) (*libpod.Runtime, error) { runtimeSync.Do(func() { runtimeLib, runtimeErr = getRuntime(ctx, flags, &engineOpts{ - noStore: false, withFDS: true, reset: cfg.IsReset, renumber: cfg.IsRenumber, @@ -55,16 +52,6 @@ func GetRuntime(ctx context.Context, flags *flag.FlagSet, cfg *entities.PodmanCo return runtimeLib, runtimeErr } -// GetRuntimeNoStore generates a new libpod runtime configured by command line options -func GetRuntimeNoStore(ctx context.Context, fs *flag.FlagSet, cfg *entities.PodmanConfig) (*libpod.Runtime, error) { - return getRuntime(ctx, fs, &engineOpts{ - noStore: true, - withFDS: true, - reset: false, - config: cfg, - }) -} - func getRuntime(ctx context.Context, fs *flag.FlagSet, opts *engineOpts) (*libpod.Runtime, error) { options := []libpod.RuntimeOption{} storageOpts := types.StoreOptions{} @@ -148,9 +135,6 @@ func getRuntime(ctx context.Context, fs *flag.FlagSet, opts *engineOpts) (*libpo options = append(options, libpod.WithStorageConfig(storageOpts)) } - if !storageSet && opts.noStore { - options = append(options, libpod.WithNoStore()) - } // TODO CLI flags for image config? // TODO CLI flag for signature policy? @@ -243,7 +227,7 @@ func ParseIDMapping(mode namespaces.UsernsMode, uidMapSlice, gidMapSlice []strin options.HostUIDMapping = false options.HostGIDMapping = false options.AutoUserNs = true - opts, err := mode.GetAutoOptions() + opts, err := util.GetAutoOptions(mode) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/runtime_proxy.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/runtime_proxy.go similarity index 83% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/runtime_proxy.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/runtime_proxy.go index e5761d4ab..f8c2daaca 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/runtime_proxy.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/runtime_proxy.go @@ -1,13 +1,12 @@ //go:build !remote -// +build !remote package infra import ( "context" - "github.com/containers/podman/v4/pkg/domain/entities" - "github.com/containers/podman/v4/pkg/domain/infra/abi" + "github.com/containers/podman/v5/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/domain/infra/abi" flag "github.com/spf13/pflag" ) diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/runtime_tunnel.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/runtime_tunnel.go similarity index 90% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/runtime_tunnel.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/runtime_tunnel.go index c3eb660eb..a28385890 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/runtime_tunnel.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/runtime_tunnel.go @@ -1,5 +1,4 @@ //go:build remote -// +build remote package infra @@ -8,9 +7,9 @@ import ( "fmt" "sync" - "github.com/containers/podman/v4/pkg/bindings" - "github.com/containers/podman/v4/pkg/domain/entities" - "github.com/containers/podman/v4/pkg/domain/infra/tunnel" + "github.com/containers/podman/v5/pkg/bindings" + "github.com/containers/podman/v5/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/domain/infra/tunnel" ) var ( diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/auto-update.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/auto-update.go similarity index 81% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/auto-update.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/auto-update.go index 469da5a7a..e8322bf6a 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/auto-update.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/auto-update.go @@ -4,7 +4,7 @@ import ( "context" "errors" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/domain/entities" ) func (ic *ContainerEngine) AutoUpdate(ctx context.Context, options entities.AutoUpdateOptions) ([]*entities.AutoUpdateReport, []error) { diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/containers.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/containers.go similarity index 98% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/containers.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/containers.go index aec85bc5a..9c81fb219 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/containers.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/containers.go @@ -15,17 +15,17 @@ import ( "github.com/containers/common/pkg/config" "github.com/containers/image/v5/docker/reference" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/libpod/events" - "github.com/containers/podman/v4/pkg/api/handlers" - "github.com/containers/podman/v4/pkg/bindings" - "github.com/containers/podman/v4/pkg/bindings/containers" - "github.com/containers/podman/v4/pkg/bindings/images" - "github.com/containers/podman/v4/pkg/domain/entities" - "github.com/containers/podman/v4/pkg/domain/entities/reports" - "github.com/containers/podman/v4/pkg/errorhandling" - "github.com/containers/podman/v4/pkg/specgen" - "github.com/containers/podman/v4/pkg/util" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/libpod/events" + "github.com/containers/podman/v5/pkg/api/handlers" + "github.com/containers/podman/v5/pkg/bindings" + "github.com/containers/podman/v5/pkg/bindings/containers" + "github.com/containers/podman/v5/pkg/bindings/images" + "github.com/containers/podman/v5/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/domain/entities/reports" + "github.com/containers/podman/v5/pkg/errorhandling" + "github.com/containers/podman/v5/pkg/specgen" + "github.com/containers/podman/v5/pkg/util" "github.com/containers/storage/types" "github.com/sirupsen/logrus" ) @@ -788,7 +788,7 @@ func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []stri logrus.Errorf("Cannot get exit code: %v", err) report.ExitCode = define.ExecErrorCodeNotFound } else { - report.ExitCode = event.ContainerExitCode + report.ExitCode = *event.ContainerExitCode } } else { report.ExitCode = int(exitCode) @@ -977,7 +977,7 @@ func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.Conta return &report, nil //nolint: nilerr } - report.ExitCode = lastEvent.ContainerExitCode + report.ExitCode = *lastEvent.ContainerExitCode return &report, err } diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/events.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/events.go similarity index 92% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/events.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/events.go index 30be92e23..d2c0e62ef 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/events.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/events.go @@ -5,9 +5,9 @@ import ( "fmt" "strings" - "github.com/containers/podman/v4/libpod/events" - "github.com/containers/podman/v4/pkg/bindings/system" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v5/libpod/events" + "github.com/containers/podman/v5/pkg/bindings/system" + "github.com/containers/podman/v5/pkg/domain/entities" ) func (ic *ContainerEngine) Events(ctx context.Context, opts entities.EventsOptions) error { diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/farm.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/farm.go similarity index 92% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/farm.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/farm.go index 1dbde3178..dfb2183f8 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/farm.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/farm.go @@ -4,8 +4,8 @@ import ( "context" "fmt" - "github.com/containers/podman/v4/pkg/bindings/system" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/bindings/system" + "github.com/containers/podman/v5/pkg/domain/entities" ) const ( diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/healthcheck.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/healthcheck.go similarity index 61% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/healthcheck.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/healthcheck.go index 67d529201..2be64f701 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/healthcheck.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/healthcheck.go @@ -3,9 +3,9 @@ package tunnel import ( "context" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/bindings/containers" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/bindings/containers" + "github.com/containers/podman/v5/pkg/domain/entities" ) func (ic *ContainerEngine) HealthCheckRun(ctx context.Context, nameOrID string, options entities.HealthCheckOptions) (*define.HealthCheckResults, error) { diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/helpers.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/helpers.go similarity index 94% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/helpers.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/helpers.go index d3741cadc..3d33f179d 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/helpers.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/helpers.go @@ -5,11 +5,11 @@ import ( "errors" "fmt" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/bindings/containers" - "github.com/containers/podman/v4/pkg/bindings/pods" - "github.com/containers/podman/v4/pkg/domain/entities" - "github.com/containers/podman/v4/pkg/errorhandling" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/bindings/containers" + "github.com/containers/podman/v5/pkg/bindings/pods" + "github.com/containers/podman/v5/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/errorhandling" ) // FIXME: the `ignore` parameter is very likely wrong here as it should rather diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/images.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/images.go similarity index 94% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/images.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/images.go index 304c99ccd..2b8daeba7 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/images.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/images.go @@ -15,12 +15,12 @@ import ( "github.com/containers/common/pkg/ssh" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/types" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/bindings/images" - "github.com/containers/podman/v4/pkg/domain/entities" - "github.com/containers/podman/v4/pkg/domain/entities/reports" - "github.com/containers/podman/v4/pkg/domain/utils" - "github.com/containers/podman/v4/pkg/errorhandling" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/bindings/images" + "github.com/containers/podman/v5/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/domain/entities/reports" + "github.com/containers/podman/v5/pkg/domain/utils" + "github.com/containers/podman/v5/pkg/errorhandling" "github.com/containers/storage/pkg/archive" ) @@ -37,8 +37,8 @@ func (ir *ImageEngine) Remove(ctx context.Context, imagesArg []string, opts enti func (ir *ImageEngine) List(ctx context.Context, opts entities.ImageListOptions) ([]*entities.ImageSummary, error) { filters := make(map[string][]string, len(opts.Filter)) for _, filter := range opts.Filter { - f := strings.Split(filter, "=") - filters[f[0]] = f[1:] + f := strings.SplitN(filter, "=", 2) + filters[f[0]] = append(filters[f[0]], f[1]) } options := new(images.ListOptions).WithAll(opts.All).WithFilters(filters) psImages, err := images.List(ir.ClientCtx, options) @@ -123,6 +123,12 @@ func (ir *ImageEngine) Pull(ctx context.Context, rawImage string, opts entities. options.WithSkipTLSVerify(false) } } + if opts.Retry != nil { + options.WithRetry(*opts.Retry) + } + if opts.RetryDelay != "" { + options.WithRetryDelay(opts.RetryDelay) + } pulledImages, err := images.Pull(ir.ClientCtx, rawImage, options) if err != nil { return nil, err @@ -267,6 +273,12 @@ func (ir *ImageEngine) Push(ctx context.Context, source string, destination stri options.WithSkipTLSVerify(false) } } + if opts.Retry != nil { + options.WithRetry(*opts.Retry) + } + if opts.RetryDelay != "" { + options.WithRetryDelay(opts.RetryDelay) + } if err := images.Push(ir.ClientCtx, source, destination, options); err != nil { return nil, err } @@ -336,7 +348,7 @@ func (ir *ImageEngine) Save(ctx context.Context, nameOrID string, tags []string, return err } - return archive.Untar(f, opts.Output, nil) + return archive.Untar(f, opts.Output, &archive.TarOptions{NoLchown: true}) } func (ir *ImageEngine) Search(ctx context.Context, term string, opts entities.ImageSearchOptions) ([]entities.ImageSearchReport, error) { diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/kube.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/kube.go similarity index 94% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/kube.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/kube.go index d03d4ebc7..6a863cff7 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/kube.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/kube.go @@ -6,10 +6,10 @@ import ( "io" "github.com/containers/image/v5/types" - "github.com/containers/podman/v4/pkg/bindings/generate" - "github.com/containers/podman/v4/pkg/bindings/kube" - "github.com/containers/podman/v4/pkg/bindings/play" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/bindings/generate" + "github.com/containers/podman/v5/pkg/bindings/kube" + "github.com/containers/podman/v5/pkg/bindings/play" + "github.com/containers/podman/v5/pkg/domain/entities" ) func (ic *ContainerEngine) GenerateSystemd(ctx context.Context, nameOrID string, opts entities.GenerateSystemdOptions) (*entities.GenerateSystemdReport, error) { diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/manifest.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/manifest.go similarity index 73% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/manifest.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/manifest.go index 5b176e31e..e04236429 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/manifest.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/manifest.go @@ -7,15 +7,16 @@ import ( "strings" "github.com/containers/image/v5/types" - "github.com/containers/podman/v4/pkg/bindings/images" - "github.com/containers/podman/v4/pkg/bindings/manifests" - "github.com/containers/podman/v4/pkg/domain/entities" - envLib "github.com/containers/podman/v4/pkg/env" + "github.com/containers/podman/v5/pkg/bindings/images" + "github.com/containers/podman/v5/pkg/bindings/manifests" + "github.com/containers/podman/v5/pkg/domain/entities" + envLib "github.com/containers/podman/v5/pkg/env" + "golang.org/x/exp/slices" ) // ManifestCreate implements manifest create via ImageEngine func (ir *ImageEngine) ManifestCreate(ctx context.Context, name string, images []string, opts entities.ManifestCreateOptions) (string, error) { - options := new(manifests.CreateOptions).WithAll(opts.All).WithAmend(opts.Amend) + options := new(manifests.CreateOptions).WithAll(opts.All).WithAmend(opts.Amend).WithAnnotation(opts.Annotations) imageID, err := manifests.Create(ir.ClientCtx, name, images, options) if err != nil { return imageID, fmt.Errorf("creating manifest: %w", err) @@ -57,6 +58,13 @@ func (ir *ImageEngine) ManifestInspect(ctx context.Context, name string, opts en // ManifestAdd adds images to the manifest list func (ir *ImageEngine) ManifestAdd(_ context.Context, name string, imageNames []string, opts entities.ManifestAddOptions) (string, error) { + imageNames = slices.Clone(imageNames) + for _, image := range opts.Images { + if !slices.Contains(imageNames, image) { + imageNames = append(imageNames, image) + } + } + options := new(manifests.AddOptions).WithAll(opts.All).WithArch(opts.Arch).WithVariant(opts.Variant) options.WithFeatures(opts.Features).WithImages(imageNames).WithOS(opts.OS).WithOSVersion(opts.OSVersion) options.WithUsername(opts.Username).WithPassword(opts.Password).WithAuthfile(opts.Authfile) @@ -64,11 +72,11 @@ func (ir *ImageEngine) ManifestAdd(_ context.Context, name string, imageNames [] if len(opts.Annotation) != 0 { annotations := make(map[string]string) for _, annotationSpec := range opts.Annotation { - spec := strings.SplitN(annotationSpec, "=", 2) - if len(spec) != 2 { - return "", fmt.Errorf("no value given for annotation %q", spec[0]) + key, val, hasVal := strings.Cut(annotationSpec, "=") + if !hasVal { + return "", fmt.Errorf("no value given for annotation %q", key) } - annotations[spec[0]] = spec[1] + annotations[key] = val } opts.Annotations = envLib.Join(opts.Annotations, annotations) } @@ -89,6 +97,39 @@ func (ir *ImageEngine) ManifestAdd(_ context.Context, name string, imageNames [] return id, nil } +// ManifestAddArtifact creates artifact manifests and adds them to the manifest list +func (ir *ImageEngine) ManifestAddArtifact(_ context.Context, name string, files []string, opts entities.ManifestAddArtifactOptions) (string, error) { + files = slices.Clone(files) + for _, file := range opts.Files { + if !slices.Contains(files, file) { + files = append(files, file) + } + } + options := new(manifests.AddArtifactOptions).WithArch(opts.Arch).WithVariant(opts.Variant) + options.WithFeatures(opts.Features).WithOS(opts.OS).WithOSVersion(opts.OSVersion).WithOSFeatures(opts.OSFeatures) + if len(opts.Annotation) != 0 { + annotations := make(map[string]string) + for _, annotationSpec := range opts.Annotation { + key, val, hasVal := strings.Cut(annotationSpec, "=") + if !hasVal { + return "", fmt.Errorf("no value given for annotation %q", key) + } + annotations[key] = val + } + options.WithAnnotation(annotations) + } + options.WithType(opts.Type).WithConfigType(opts.ConfigType).WithLayerType(opts.LayerType) + options.WithConfig(opts.Config) + options.WithExcludeTitles(opts.ExcludeTitles).WithSubject(opts.Subject) + options.WithAnnotations(opts.Annotations) + options.WithFiles(files) + id, err := manifests.AddArtifact(ir.ClientCtx, name, options) + if err != nil { + return id, fmt.Errorf("adding to manifest list %s: %w", name, err) + } + return id, nil +} + // ManifestAnnotate updates an entry of the manifest list func (ir *ImageEngine) ManifestAnnotate(ctx context.Context, name, images string, opts entities.ManifestAnnotateOptions) (string, error) { options := new(manifests.ModifyOptions).WithArch(opts.Arch).WithVariant(opts.Variant) @@ -97,11 +138,11 @@ func (ir *ImageEngine) ManifestAnnotate(ctx context.Context, name, images string if len(opts.Annotation) != 0 { annotations := make(map[string]string) for _, annotationSpec := range opts.Annotation { - spec := strings.SplitN(annotationSpec, "=", 2) - if len(spec) != 2 { - return "", fmt.Errorf("no value given for annotation %q", spec[0]) + key, val, hasVal := strings.Cut(annotationSpec, "=") + if !hasVal { + return "", fmt.Errorf("no value given for annotation %q", key) } - annotations[spec[0]] = spec[1] + annotations[key] = val } opts.Annotations = envLib.Join(opts.Annotations, annotations) } diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/network.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/network.go similarity index 90% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/network.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/network.go index 36d8ddce4..bd48f6dca 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/network.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/network.go @@ -6,10 +6,10 @@ import ( "fmt" "github.com/containers/common/libnetwork/types" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/bindings/network" - "github.com/containers/podman/v4/pkg/domain/entities" - "github.com/containers/podman/v4/pkg/errorhandling" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/bindings/network" + "github.com/containers/podman/v5/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/errorhandling" ) func (ic *ContainerEngine) NetworkUpdate(ctx context.Context, netName string, opts entities.NetworkUpdateOptions) error { @@ -22,9 +22,9 @@ func (ic *ContainerEngine) NetworkList(ctx context.Context, opts entities.Networ return network.List(ic.ClientCtx, options) } -func (ic *ContainerEngine) NetworkInspect(ctx context.Context, namesOrIds []string, opts entities.InspectOptions) ([]types.Network, []error, error) { +func (ic *ContainerEngine) NetworkInspect(ctx context.Context, namesOrIds []string, opts entities.InspectOptions) ([]entities.NetworkInspectReport, []error, error) { var ( - reports = make([]types.Network, 0, len(namesOrIds)) + reports = make([]entities.NetworkInspectReport, 0, len(namesOrIds)) errs = []error{} ) options := new(network.InspectOptions) diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/pods.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/pods.go similarity index 96% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/pods.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/pods.go index f9314dcfe..f485b8a38 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/pods.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/pods.go @@ -5,11 +5,11 @@ import ( "errors" "fmt" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/bindings/pods" - "github.com/containers/podman/v4/pkg/domain/entities" - "github.com/containers/podman/v4/pkg/errorhandling" - "github.com/containers/podman/v4/pkg/util" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/bindings/pods" + "github.com/containers/podman/v5/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/errorhandling" + "github.com/containers/podman/v5/pkg/util" ) func (ic *ContainerEngine) PodExists(ctx context.Context, nameOrID string) (*entities.BoolReport, error) { @@ -46,7 +46,7 @@ func (ic *ContainerEngine) PodKill(ctx context.Context, namesOrIds []string, opt func (ic *ContainerEngine) PodLogs(ctx context.Context, nameOrIDs string, options entities.PodLogsOptions) error { // PodLogsOptions are similar but contains few extra fields like ctrName - // So cast other values as is so we can re-use the code + // So cast other values as is so we can reuse the code containerLogsOpts := entities.PodLogsOptionsToContainerLogsOptions(options) // interface only accepts slice, keep everything consistent diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/runtime.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/runtime.go similarity index 93% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/runtime.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/runtime.go index 65c1354df..05f8b0078 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/runtime.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/runtime.go @@ -6,8 +6,8 @@ import ( "sync" "syscall" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/signal" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/signal" "github.com/sirupsen/logrus" ) diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/secrets.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/secrets.go similarity index 94% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/secrets.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/secrets.go index 4776f6532..326a75c8b 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/secrets.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/secrets.go @@ -5,9 +5,9 @@ import ( "fmt" "io" - "github.com/containers/podman/v4/pkg/bindings/secrets" - "github.com/containers/podman/v4/pkg/domain/entities" - "github.com/containers/podman/v4/pkg/errorhandling" + "github.com/containers/podman/v5/pkg/bindings/secrets" + "github.com/containers/podman/v5/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/errorhandling" ) func (ic *ContainerEngine) SecretCreate(ctx context.Context, name string, reader io.Reader, options entities.SecretCreateOptions) (*entities.SecretCreateReport, error) { diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/system.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/system.go similarity index 91% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/system.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/system.go index fc82e7b2b..492fd0a89 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/system.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/system.go @@ -4,9 +4,9 @@ import ( "context" "errors" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/bindings/system" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/bindings/system" + "github.com/containers/podman/v5/pkg/domain/entities" ) func (ic *ContainerEngine) Info(ctx context.Context) (*define.Info, error) { diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/trust.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/trust.go similarity index 87% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/trust.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/trust.go index deb9965a3..07097d92a 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/trust.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/trust.go @@ -4,7 +4,7 @@ import ( "context" "errors" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/domain/entities" ) func (ir *ImageEngine) ShowTrust(ctx context.Context, args []string, options entities.ShowTrustOptions) (*entities.ShowTrustReport, error) { diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/volumes.go b/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/volumes.go similarity index 94% rename from vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/volumes.go rename to vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/volumes.go index b70d29783..175d5b146 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/volumes.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/infra/tunnel/volumes.go @@ -5,10 +5,10 @@ import ( "errors" "fmt" - "github.com/containers/podman/v4/pkg/bindings/volumes" - "github.com/containers/podman/v4/pkg/domain/entities" - "github.com/containers/podman/v4/pkg/domain/entities/reports" - "github.com/containers/podman/v4/pkg/errorhandling" + "github.com/containers/podman/v5/pkg/bindings/volumes" + "github.com/containers/podman/v5/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/domain/entities/reports" + "github.com/containers/podman/v5/pkg/errorhandling" ) func (ic *ContainerEngine) VolumeCreate(ctx context.Context, opts entities.VolumeCreateOptions) (*entities.IDOrNameResponse, error) { diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/utils/scp.go b/vendor/github.com/containers/podman/v5/pkg/domain/utils/scp.go similarity index 87% rename from vendor/github.com/containers/podman/v4/pkg/domain/utils/scp.go rename to vendor/github.com/containers/podman/v5/pkg/domain/utils/scp.go index 0bd1b0d3e..93982be9c 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/utils/scp.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/utils/scp.go @@ -12,8 +12,8 @@ import ( "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/ssh" "github.com/containers/image/v5/transports/alltransports" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/domain/entities" "github.com/sirupsen/logrus" ) @@ -33,11 +33,6 @@ func ExecuteTransfer(src, dst string, parentFlags []string, quiet bool, sshMode return nil, nil, nil, nil, err } - confR, err := config.New(nil) // create a hand made config for the remote engine since we might use remote and native at once - if err != nil { - return nil, nil, nil, nil, fmt.Errorf("could not make config: %w", err) - } - locations := []*entities.ImageScpOptions{} cliConnections := []string{} args := []string{src} @@ -84,17 +79,15 @@ func ExecuteTransfer(src, dst string, parentFlags []string, quiet bool, sshMode cliConnections = []string{} } - cfg, err := config.ReadCustomConfig() // get ready to set ssh destination if necessary + cfg, err := config.Default() if err != nil { return nil, nil, nil, nil, err } - var serv map[string]config.Destination - serv, err = GetServiceInformation(&sshInfo, cliConnections, cfg) + err = GetServiceInformation(&sshInfo, cliConnections, cfg) if err != nil { return nil, nil, nil, nil, err } - confR.Engine = config.EngineConfig{Remote: true, CgroupManager: "cgroupfs", ServiceDestinations: serv} // pass the service dest (either remote or something else) to engine saveCmd, loadCmd := CreateCommands(source, dest, parentFlags, podman) switch { @@ -212,22 +205,23 @@ func LoginUser(user string) (*exec.Cmd, error) { // and copies the saved image dir over to the remote host and then loads it onto the machine // returns a string containing output or an error func LoadToRemote(dest entities.ImageScpOptions, localFile string, tag string, url *url.URL, iden string, sshEngine ssh.EngineMode) (string, string, error) { - port, err := strconv.Atoi(url.Port()) - if err != nil { - return "", "", err + port := 0 + urlPort := url.Port() + if urlPort != "" { + var err error + port, err = strconv.Atoi(url.Port()) + if err != nil { + return "", "", err + } } - remoteFile, err := ssh.Exec(&ssh.ConnectionExecOptions{Host: url.String(), Identity: iden, Port: port, User: url.User, Args: []string{"mktemp"}}, sshEngine) + input, err := os.Open(localFile) if err != nil { return "", "", err } + defer input.Close() - opts := ssh.ConnectionScpOptions{User: url.User, Identity: iden, Port: port, Source: localFile, Destination: "ssh://" + url.User.String() + "@" + url.Hostname() + ":" + remoteFile} - scpRep, err := ssh.Scp(&opts, sshEngine) - if err != nil { - return "", "", err - } - out, err := ssh.Exec(&ssh.ConnectionExecOptions{Host: url.String(), Identity: iden, Port: port, User: url.User, Args: []string{"podman", "image", "load", "--input=" + scpRep + ";", "rm", scpRep}}, sshEngine) + out, err := ssh.ExecWithInput(&ssh.ConnectionExecOptions{Host: url.String(), Identity: iden, Port: port, User: url.User, Args: []string{"podman", "image", "load"}}, sshEngine, input) if err != nil { return "", "", err } @@ -238,7 +232,7 @@ func LoadToRemote(dest entities.ImageScpOptions, localFile string, tag string, u outArr := strings.Split(rep, " ") id := outArr[len(outArr)-1] if len(dest.Tag) > 0 { // tag the remote image using the output ID - _, err := ssh.Exec(&ssh.ConnectionExecOptions{Host: url.Hostname(), Identity: iden, Port: port, User: url.User, Args: []string{"podman", "image", "tag", id, dest.Tag}}, sshEngine) + _, err := ssh.Exec(&ssh.ConnectionExecOptions{Host: url.String(), Identity: iden, Port: port, User: url.User, Args: []string{"podman", "image", "tag", id, dest.Tag}}, sshEngine) if err != nil { return "", "", err } @@ -257,9 +251,14 @@ func SaveToRemote(image, localFile string, tag string, uri *url.URL, iden string return fmt.Errorf("renaming of an image is currently not supported: %w", define.ErrInvalidArg) } - port, err := strconv.Atoi(uri.Port()) - if err != nil { - return err + port := 0 + urlPort := uri.Port() + if urlPort != "" { + var err error + port, err = strconv.Atoi(uri.Port()) + if err != nil { + return err + } } remoteFile, err := ssh.Exec(&ssh.ConnectionExecOptions{Host: uri.String(), Identity: iden, Port: port, User: uri.User, Args: []string{"mktemp"}}, sshEngine) @@ -401,15 +400,15 @@ func RemoteArgLength(input string, side int) int { } // GetServiceInformation takes the parsed list of hosts to connect to and validates the information -func GetServiceInformation(sshInfo *entities.ImageScpConnections, cliConnections []string, cfg *config.Config) (map[string]config.Destination, error) { - var serv map[string]config.Destination +func GetServiceInformation(sshInfo *entities.ImageScpConnections, cliConnections []string, cfg *config.Config) error { var urlS string var iden string for i, val := range cliConnections { - splitEnv := strings.SplitN(val, "::", 2) - sshInfo.Connections = append(sshInfo.Connections, splitEnv[0]) - conn, found := cfg.Engine.ServiceDestinations[sshInfo.Connections[i]] - if found { + connection, _, _ := strings.Cut(val, "::") + sshInfo.Connections = append(sshInfo.Connections, connection) + conn, err := cfg.GetConnection(sshInfo.Connections[i], false) + if err == nil { + // connection found urlS = conn.URI iden = conn.Identity } else { // no match, warn user and do a manual connection. @@ -419,17 +418,17 @@ func GetServiceInformation(sshInfo *entities.ImageScpConnections, cliConnections } urlFinal, err := url.Parse(urlS) // create an actual url to pass to exec command if err != nil { - return nil, err + return err } if urlFinal.User.Username() == "" { if urlFinal.User, err = GetUserInfo(urlFinal); err != nil { - return nil, err + return err } } sshInfo.URI = append(sshInfo.URI, urlFinal) sshInfo.Identities = append(sshInfo.Identities, iden) } - return serv, nil + return nil } func GetUserInfo(uri *url.URL) (*url.Userinfo, error) { diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/utils/secrets_filters.go b/vendor/github.com/containers/podman/v5/pkg/domain/utils/secrets_filters.go similarity index 92% rename from vendor/github.com/containers/podman/v4/pkg/domain/utils/secrets_filters.go rename to vendor/github.com/containers/podman/v5/pkg/domain/utils/secrets_filters.go index ab9b681ec..48162ce08 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/utils/secrets_filters.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/utils/secrets_filters.go @@ -5,7 +5,7 @@ import ( "strings" "github.com/containers/common/pkg/secrets" - "github.com/containers/podman/v4/pkg/util" + "github.com/containers/podman/v5/pkg/util" ) func IfPassesSecretsFilter(s secrets.Secret, filters map[string][]string) (bool, error) { diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/utils/utils.go b/vendor/github.com/containers/podman/v5/pkg/domain/utils/utils.go similarity index 92% rename from vendor/github.com/containers/podman/v4/pkg/domain/utils/utils.go rename to vendor/github.com/containers/podman/v5/pkg/domain/utils/utils.go index ee213e1b6..ca7a3210f 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/utils/utils.go +++ b/vendor/github.com/containers/podman/v5/pkg/domain/utils/utils.go @@ -34,8 +34,8 @@ func ToLibpodFilters(f url.Values) (filters []string) { func ToURLValues(f []string) (filters url.Values) { filters = make(url.Values) for _, v := range f { - t := strings.SplitN(v, "=", 2) - filters.Add(t[0], t[1]) + key, val, _ := strings.Cut(v, "=") + filters.Add(key, val) } return } diff --git a/vendor/github.com/containers/podman/v4/pkg/emulation/binfmtmisc_linux.go b/vendor/github.com/containers/podman/v5/pkg/emulation/binfmtmisc_linux.go similarity index 99% rename from vendor/github.com/containers/podman/v4/pkg/emulation/binfmtmisc_linux.go rename to vendor/github.com/containers/podman/v5/pkg/emulation/binfmtmisc_linux.go index 8159f20fd..4ab9c4d00 100644 --- a/vendor/github.com/containers/podman/v4/pkg/emulation/binfmtmisc_linux.go +++ b/vendor/github.com/containers/podman/v5/pkg/emulation/binfmtmisc_linux.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package emulation diff --git a/vendor/github.com/containers/podman/v4/pkg/emulation/binfmtmisc_other.go b/vendor/github.com/containers/podman/v5/pkg/emulation/binfmtmisc_other.go similarity index 82% rename from vendor/github.com/containers/podman/v4/pkg/emulation/binfmtmisc_other.go rename to vendor/github.com/containers/podman/v5/pkg/emulation/binfmtmisc_other.go index 9e7c6a48f..cc8e686ed 100644 --- a/vendor/github.com/containers/podman/v4/pkg/emulation/binfmtmisc_other.go +++ b/vendor/github.com/containers/podman/v5/pkg/emulation/binfmtmisc_other.go @@ -1,5 +1,4 @@ //go:build !linux && !remote -// +build !linux,!remote package emulation diff --git a/vendor/github.com/containers/podman/v4/pkg/emulation/elf.go b/vendor/github.com/containers/podman/v5/pkg/emulation/elf.go similarity index 99% rename from vendor/github.com/containers/podman/v4/pkg/emulation/elf.go rename to vendor/github.com/containers/podman/v5/pkg/emulation/elf.go index 93f8384ed..b686769ec 100644 --- a/vendor/github.com/containers/podman/v4/pkg/emulation/elf.go +++ b/vendor/github.com/containers/podman/v5/pkg/emulation/elf.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package emulation diff --git a/vendor/github.com/containers/podman/v4/pkg/emulation/emulation.go b/vendor/github.com/containers/podman/v5/pkg/emulation/emulation.go similarity index 95% rename from vendor/github.com/containers/podman/v4/pkg/emulation/emulation.go rename to vendor/github.com/containers/podman/v5/pkg/emulation/emulation.go index 52f9c8f48..49b32c1f5 100644 --- a/vendor/github.com/containers/podman/v4/pkg/emulation/emulation.go +++ b/vendor/github.com/containers/podman/v5/pkg/emulation/emulation.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package emulation diff --git a/vendor/github.com/containers/podman/v4/pkg/env/env.go b/vendor/github.com/containers/podman/v5/pkg/env/env.go similarity index 82% rename from vendor/github.com/containers/podman/v4/pkg/env/env.go rename to vendor/github.com/containers/podman/v5/pkg/env/env.go index 8e87834f8..a2165931c 100644 --- a/vendor/github.com/containers/podman/v4/pkg/env/env.go +++ b/vendor/github.com/containers/podman/v5/pkg/env/env.go @@ -8,6 +8,8 @@ import ( "fmt" "os" "strings" + + "golang.org/x/exp/maps" ) const whiteSpaces = " \t" @@ -40,14 +42,9 @@ func Slice(m map[string]string) []string { // map. func Map(slice []string) map[string]string { envmap := make(map[string]string, len(slice)) - for _, val := range slice { - data := strings.SplitN(val, "=", 2) - - if len(data) > 1 { - envmap[data[0]] = data[1] - } else { - envmap[data[0]] = "" - } + for _, line := range slice { + key, val, _ := strings.Cut(line, "=") + envmap[key] = val } return envmap } @@ -55,8 +52,9 @@ func Map(slice []string) map[string]string { // Join joins the two environment maps with override overriding base. func Join(base map[string]string, override map[string]string) map[string]string { if len(base) == 0 { - return override + return maps.Clone(override) } + base = maps.Clone(base) for k, v := range override { base[k] = v } @@ -94,26 +92,25 @@ func ParseFile(path string) (_ map[string]string, err error) { } func parseEnv(env map[string]string, line string) error { - data := strings.SplitN(line, "=", 2) + key, val, hasVal := strings.Cut(line, "=") // catch invalid variables such as "=" or "=A" - if data[0] == "" { + if key == "" { return fmt.Errorf("invalid variable: %q", line) } // trim the front of a variable, but nothing else - name := strings.TrimLeft(data[0], whiteSpaces) - if len(data) > 1 { - env[name] = data[1] + name := strings.TrimLeft(key, whiteSpaces) + if hasVal { + env[name] = val } else { - if strings.HasSuffix(name, "*") { - name = strings.TrimSuffix(name, "*") + if name, hasStar := strings.CutSuffix(name, "*"); hasStar { for _, e := range os.Environ() { - part := strings.SplitN(e, "=", 2) - if len(part) < 2 { + envKey, envVal, hasEq := strings.Cut(e, "=") + if !hasEq { continue } - if strings.HasPrefix(part[0], name) { - env[part[0]] = part[1] + if strings.HasPrefix(envKey, name) { + env[envKey] = envVal } } } else if val, ok := os.LookupEnv(name); ok { diff --git a/vendor/github.com/containers/podman/v4/pkg/env/env_unix.go b/vendor/github.com/containers/podman/v5/pkg/env/env_unix.go similarity index 94% rename from vendor/github.com/containers/podman/v4/pkg/env/env_unix.go rename to vendor/github.com/containers/podman/v5/pkg/env/env_unix.go index 690078f33..5e779cd5f 100644 --- a/vendor/github.com/containers/podman/v4/pkg/env/env_unix.go +++ b/vendor/github.com/containers/podman/v5/pkg/env/env_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package env diff --git a/vendor/github.com/containers/podman/v4/pkg/env/env_windows.go b/vendor/github.com/containers/podman/v5/pkg/env/env_windows.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/env/env_windows.go rename to vendor/github.com/containers/podman/v5/pkg/env/env_windows.go diff --git a/vendor/github.com/containers/podman/v4/pkg/errorhandling/errorhandling.go b/vendor/github.com/containers/podman/v5/pkg/errorhandling/errorhandling.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/errorhandling/errorhandling.go rename to vendor/github.com/containers/podman/v5/pkg/errorhandling/errorhandling.go diff --git a/vendor/github.com/containers/podman/v4/pkg/inspect/inspect.go b/vendor/github.com/containers/podman/v5/pkg/inspect/inspect.go similarity index 97% rename from vendor/github.com/containers/podman/v4/pkg/inspect/inspect.go rename to vendor/github.com/containers/podman/v5/pkg/inspect/inspect.go index 15943858f..26436155b 100644 --- a/vendor/github.com/containers/podman/v4/pkg/inspect/inspect.go +++ b/vendor/github.com/containers/podman/v5/pkg/inspect/inspect.go @@ -4,7 +4,7 @@ import ( "time" "github.com/containers/image/v5/manifest" - "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v5/libpod/define" "github.com/opencontainers/go-digest" v1 "github.com/opencontainers/image-spec/specs-go/v1" ) diff --git a/vendor/github.com/containers/podman/v4/pkg/k8s.io/api/LICENSE b/vendor/github.com/containers/podman/v5/pkg/k8s.io/api/LICENSE similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/k8s.io/api/LICENSE rename to vendor/github.com/containers/podman/v5/pkg/k8s.io/api/LICENSE diff --git a/vendor/github.com/containers/podman/v4/pkg/k8s.io/api/apps/v1/types.go b/vendor/github.com/containers/podman/v5/pkg/k8s.io/api/apps/v1/types.go similarity index 99% rename from vendor/github.com/containers/podman/v4/pkg/k8s.io/api/apps/v1/types.go rename to vendor/github.com/containers/podman/v5/pkg/k8s.io/api/apps/v1/types.go index 86e320617..96d070fab 100644 --- a/vendor/github.com/containers/podman/v4/pkg/k8s.io/api/apps/v1/types.go +++ b/vendor/github.com/containers/podman/v5/pkg/k8s.io/api/apps/v1/types.go @@ -17,9 +17,9 @@ limitations under the License. package v1 import ( - v1 "github.com/containers/podman/v4/pkg/k8s.io/api/core/v1" - metav1 "github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/util/intstr" + v1 "github.com/containers/podman/v5/pkg/k8s.io/api/core/v1" + metav1 "github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/util/intstr" ) const ( diff --git a/vendor/github.com/containers/podman/v4/pkg/k8s.io/api/core/v1/annotation_key_constants.go b/vendor/github.com/containers/podman/v5/pkg/k8s.io/api/core/v1/annotation_key_constants.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/k8s.io/api/core/v1/annotation_key_constants.go rename to vendor/github.com/containers/podman/v5/pkg/k8s.io/api/core/v1/annotation_key_constants.go diff --git a/vendor/github.com/containers/podman/v4/pkg/k8s.io/api/core/v1/resource.go b/vendor/github.com/containers/podman/v5/pkg/k8s.io/api/core/v1/resource.go similarity index 96% rename from vendor/github.com/containers/podman/v4/pkg/k8s.io/api/core/v1/resource.go rename to vendor/github.com/containers/podman/v5/pkg/k8s.io/api/core/v1/resource.go index 91aa95f9a..d2dba9f9b 100644 --- a/vendor/github.com/containers/podman/v4/pkg/k8s.io/api/core/v1/resource.go +++ b/vendor/github.com/containers/podman/v5/pkg/k8s.io/api/core/v1/resource.go @@ -17,7 +17,7 @@ limitations under the License. package v1 import ( - "github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/api/resource" + "github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/api/resource" ) // Returns string version of ResourceName. diff --git a/vendor/github.com/containers/podman/v4/pkg/k8s.io/api/core/v1/types.go b/vendor/github.com/containers/podman/v5/pkg/k8s.io/api/core/v1/types.go similarity index 99% rename from vendor/github.com/containers/podman/v4/pkg/k8s.io/api/core/v1/types.go rename to vendor/github.com/containers/podman/v5/pkg/k8s.io/api/core/v1/types.go index 904e50f18..9b79298a6 100644 --- a/vendor/github.com/containers/podman/v4/pkg/k8s.io/api/core/v1/types.go +++ b/vendor/github.com/containers/podman/v5/pkg/k8s.io/api/core/v1/types.go @@ -17,10 +17,10 @@ limitations under the License. package v1 import ( - "github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/api/resource" - metav1 "github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/types" - "github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/util/intstr" + "github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/api/resource" + metav1 "github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/types" + "github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/util/intstr" ) // Volume represents a named volume in a pod that may be accessed by any container in the pod. @@ -5013,7 +5013,7 @@ type RollingUpdateDaemonSet struct { // pod is available (Ready for at least minReadySeconds) the old DaemonSet pod // on that node is marked deleted. If the old pod becomes unavailable for any // reason (Ready transitions to false, is evicted, or is drained) an updated - // pod is immediatedly created on that node without considering surge limits. + // pod is immediately created on that node without considering surge limits. // Allowing surge implies the possibility that the resources consumed by the // daemonset on any given node can double if the readiness check fails, and // so resource intensive daemonsets should take into account that they may diff --git a/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/LICENSE b/vendor/github.com/containers/podman/v5/pkg/k8s.io/apimachinery/LICENSE similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/LICENSE rename to vendor/github.com/containers/podman/v5/pkg/k8s.io/apimachinery/LICENSE diff --git a/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/api/resource/amount.go b/vendor/github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/api/resource/amount.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/api/resource/amount.go rename to vendor/github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/api/resource/amount.go diff --git a/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/api/resource/math.go b/vendor/github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/api/resource/math.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/api/resource/math.go rename to vendor/github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/api/resource/math.go diff --git a/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/api/resource/quantity.go similarity index 99% rename from vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/api/resource/quantity.go rename to vendor/github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/api/resource/quantity.go index 32ef1af08..164febc53 100644 --- a/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/api/resource/quantity.go +++ b/vendor/github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -87,7 +87,7 @@ import ( // // This format is intended to make it difficult to use these numbers without // writing some sort of special handling code in the hopes that that will -// cause implementors to also use a fixed point implementation. +// cause implementers to also use a fixed point implementation. // // +protobuf=true // +protobuf.embed=string diff --git a/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/api/resource/scale_int.go b/vendor/github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/api/resource/scale_int.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/api/resource/scale_int.go rename to vendor/github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/api/resource/scale_int.go diff --git a/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/api/resource/suffix.go b/vendor/github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/api/resource/suffix.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/api/resource/suffix.go rename to vendor/github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/api/resource/suffix.go diff --git a/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go b/vendor/github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go rename to vendor/github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go diff --git a/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go b/vendor/github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go rename to vendor/github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go diff --git a/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/apis/meta/v1/time.go b/vendor/github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/apis/meta/v1/time.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/apis/meta/v1/time.go rename to vendor/github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/apis/meta/v1/time.go diff --git a/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/apis/meta/v1/types.go similarity index 99% rename from vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/apis/meta/v1/types.go rename to vendor/github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index 41053090f..cb6838c06 100644 --- a/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/vendor/github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -30,7 +30,7 @@ import ( "fmt" "strings" - "github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/types" + "github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/types" ) // TypeMeta describes an individual object in an API response or request diff --git a/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/types/uid.go b/vendor/github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/types/uid.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/types/uid.go rename to vendor/github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/types/uid.go diff --git a/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go b/vendor/github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go similarity index 98% rename from vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go rename to vendor/github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go index a502b5adb..6690a1e6f 100644 --- a/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go +++ b/vendor/github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go @@ -1,5 +1,4 @@ //go:build !notest -// +build !notest /* Copyright 2020 The Kubernetes Authors. diff --git a/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/vendor/github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/util/intstr/intstr.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/util/intstr/intstr.go rename to vendor/github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/util/intstr/intstr.go diff --git a/vendor/github.com/containers/podman/v4/pkg/lookup/lookup.go b/vendor/github.com/containers/podman/v5/pkg/lookup/lookup.go similarity index 98% rename from vendor/github.com/containers/podman/v4/pkg/lookup/lookup.go rename to vendor/github.com/containers/podman/v5/pkg/lookup/lookup.go index dae937c7d..4444182a6 100644 --- a/vendor/github.com/containers/podman/v4/pkg/lookup/lookup.go +++ b/vendor/github.com/containers/podman/v5/pkg/lookup/lookup.go @@ -5,7 +5,7 @@ import ( "strconv" securejoin "github.com/cyphar/filepath-securejoin" - "github.com/opencontainers/runc/libcontainer/user" + "github.com/moby/sys/user" "github.com/sirupsen/logrus" ) diff --git a/vendor/github.com/containers/podman/v4/pkg/namespaces/namespaces.go b/vendor/github.com/containers/podman/v5/pkg/namespaces/namespaces.go similarity index 75% rename from vendor/github.com/containers/podman/v4/pkg/namespaces/namespaces.go rename to vendor/github.com/containers/podman/v5/pkg/namespaces/namespaces.go index 1ca1f9a2e..3c0622c7d 100644 --- a/vendor/github.com/containers/podman/v4/pkg/namespaces/namespaces.go +++ b/vendor/github.com/containers/podman/v5/pkg/namespaces/namespaces.go @@ -4,8 +4,6 @@ import ( "fmt" "strconv" "strings" - - "github.com/containers/storage/types" ) const ( @@ -50,24 +48,21 @@ func (n CgroupMode) IsNS() bool { // NS gets the path associated with a ns: cgroup ns func (n CgroupMode) NS() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" + _, path, _ := strings.Cut(string(n), ":") + return path } // IsContainer indicates whether the container uses a new cgroup namespace. func (n CgroupMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == containerType + typ, _, hasColon := strings.Cut(string(n), ":") + return hasColon && typ == containerType } // Container returns the name of the container whose cgroup namespace is going to be used. func (n CgroupMode) Container() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 && parts[0] == containerType { - return parts[1] + typ, name, hasName := strings.Cut(string(n), ":") + if hasName && typ == containerType { + return name } return "" } @@ -122,81 +117,39 @@ func (n UsernsMode) IsDefaultValue() bool { return n == "" || n == defaultType } -// GetAutoOptions returns an AutoUserNsOptions with the settings to automatically set up -// a user namespace. -func (n UsernsMode) GetAutoOptions() (*types.AutoUserNsOptions, error) { - parts := strings.SplitN(string(n), ":", 2) - if parts[0] != "auto" { - return nil, fmt.Errorf("wrong user namespace mode") - } - options := types.AutoUserNsOptions{} - if len(parts) == 1 { - return &options, nil - } - for _, o := range strings.Split(parts[1], ",") { - v := strings.SplitN(o, "=", 2) - if len(v) != 2 { - return nil, fmt.Errorf("invalid option specified: %q", o) - } - switch v[0] { - case "size": - s, err := strconv.ParseUint(v[1], 10, 32) - if err != nil { - return nil, err - } - options.Size = uint32(s) - case "uidmapping": - mapping, err := types.ParseIDMapping([]string{v[1]}, nil, "", "") - if err != nil { - return nil, err - } - options.AdditionalUIDMappings = append(options.AdditionalUIDMappings, mapping.UIDMap...) - case "gidmapping": - mapping, err := types.ParseIDMapping(nil, []string{v[1]}, "", "") - if err != nil { - return nil, err - } - options.AdditionalGIDMappings = append(options.AdditionalGIDMappings, mapping.GIDMap...) - default: - return nil, fmt.Errorf("unknown option specified: %q", v[0]) - } - } - return &options, nil -} - // GetKeepIDOptions returns a KeepIDUserNsOptions with the settings to keepIDmatically set up // a user namespace. func (n UsernsMode) GetKeepIDOptions() (*KeepIDUserNsOptions, error) { - parts := strings.SplitN(string(n), ":", 2) - if parts[0] != "keep-id" { + nsmode, nsopts, hasOpts := strings.Cut(string(n), ":") + if nsmode != "keep-id" { return nil, fmt.Errorf("wrong user namespace mode") } options := KeepIDUserNsOptions{} - if len(parts) == 1 { + if !hasOpts { return &options, nil } - for _, o := range strings.Split(parts[1], ",") { - v := strings.SplitN(o, "=", 2) - if len(v) != 2 { + for _, o := range strings.Split(nsopts, ",") { + opt, val, hasVal := strings.Cut(o, "=") + if !hasVal { return nil, fmt.Errorf("invalid option specified: %q", o) } - switch v[0] { + switch opt { case "uid": - s, err := strconv.ParseUint(v[1], 10, 32) + s, err := strconv.ParseUint(val, 10, 32) if err != nil { return nil, err } v := uint32(s) options.UID = &v case "gid": - s, err := strconv.ParseUint(v[1], 10, 32) + s, err := strconv.ParseUint(val, 10, 32) if err != nil { return nil, err } v := uint32(s) options.GID = &v default: - return nil, fmt.Errorf("unknown option specified: %q", v[0]) + return nil, fmt.Errorf("unknown option specified: %q", opt) } } return &options, nil @@ -229,24 +182,21 @@ func (n UsernsMode) IsNS() bool { // NS gets the path associated with a ns: userns ns func (n UsernsMode) NS() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" + _, path, _ := strings.Cut(string(n), ":") + return path } // IsContainer indicates whether container uses a container userns. func (n UsernsMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == containerType + typ, _, hasName := strings.Cut(string(n), ":") + return hasName && typ == containerType } // Container is the id of the container which network this container is connected to. func (n UsernsMode) Container() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 && parts[0] == containerType { - return parts[1] + typ, name, hasName := strings.Cut(string(n), ":") + if hasName && typ == containerType { + return name } return "" } @@ -266,15 +216,15 @@ func (n UTSMode) IsHost() bool { // IsContainer indicates whether the container uses a container's UTS namespace. func (n UTSMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == containerType + typ, _, hasName := strings.Cut(string(n), ":") + return hasName && typ == containerType } // Container returns the name of the container whose uts namespace is going to be used. func (n UTSMode) Container() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 && parts[0] == containerType { - return parts[1] + typ, name, hasName := strings.Cut(string(n), ":") + if hasName && typ == containerType { + return name } return "" } @@ -314,8 +264,8 @@ func (n IpcMode) IsShareable() bool { // IsContainer indicates whether the container uses another container's ipc namespace. func (n IpcMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == containerType + typ, _, hasName := strings.Cut(string(n), ":") + return hasName && typ == containerType } // IsNone indicates whether container IpcMode is set to "none". @@ -335,9 +285,9 @@ func (n IpcMode) Valid() bool { // Container returns the name of the container ipc stack is going to be used. func (n IpcMode) Container() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 && parts[0] == containerType { - return parts[1] + typ, name, hasName := strings.Cut(string(n), ":") + if hasName && typ == containerType { + return name } return "" } @@ -357,8 +307,8 @@ func (n PidMode) IsHost() bool { // IsContainer indicates whether the container uses a container's pid namespace. func (n PidMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == containerType + typ, _, hasName := strings.Cut(string(n), ":") + return hasName && typ == containerType } // Valid indicates whether the pid namespace is valid. @@ -378,9 +328,9 @@ func (n PidMode) Valid() bool { // Container returns the name of the container whose pid namespace is going to be used. func (n PidMode) Container() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 && parts[0] == containerType { - return parts[1] + typ, name, hasName := strings.Cut(string(n), ":") + if hasName && typ == containerType { + return name } return "" } @@ -410,15 +360,15 @@ func (n NetworkMode) IsPrivate() bool { // IsContainer indicates whether container uses a container network stack. func (n NetworkMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == containerType + typ, _, hasName := strings.Cut(string(n), ":") + return hasName && typ == containerType } // Container is the id of the container which network this container is connected to. func (n NetworkMode) Container() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 && parts[0] == containerType { - return parts[1] + typ, name, hasName := strings.Cut(string(n), ":") + if hasName && typ == containerType { + return name } return "" } @@ -453,11 +403,8 @@ func (n NetworkMode) IsNS() bool { // NS gets the path associated with a ns: network ns func (n NetworkMode) NS() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" + _, path, _ := strings.Cut(string(n), ":") + return path } // IsPod returns whether the network refers to pod networking diff --git a/vendor/github.com/containers/podman/v4/pkg/parallel/ctr/ctr.go b/vendor/github.com/containers/podman/v5/pkg/parallel/ctr/ctr.go similarity index 91% rename from vendor/github.com/containers/podman/v4/pkg/parallel/ctr/ctr.go rename to vendor/github.com/containers/podman/v5/pkg/parallel/ctr/ctr.go index 8e73aa5ad..1cdb30f8c 100644 --- a/vendor/github.com/containers/podman/v4/pkg/parallel/ctr/ctr.go +++ b/vendor/github.com/containers/podman/v5/pkg/parallel/ctr/ctr.go @@ -1,13 +1,12 @@ //go:build !remote -// +build !remote package ctr import ( "context" - "github.com/containers/podman/v4/libpod" - "github.com/containers/podman/v4/pkg/parallel" + "github.com/containers/podman/v5/libpod" + "github.com/containers/podman/v5/pkg/parallel" "github.com/sirupsen/logrus" ) diff --git a/vendor/github.com/containers/podman/v4/pkg/parallel/parallel.go b/vendor/github.com/containers/podman/v5/pkg/parallel/parallel.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/parallel/parallel.go rename to vendor/github.com/containers/podman/v5/pkg/parallel/parallel.go diff --git a/vendor/github.com/containers/podman/v4/pkg/ps/define/types.go b/vendor/github.com/containers/podman/v5/pkg/ps/define/types.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/ps/define/types.go rename to vendor/github.com/containers/podman/v5/pkg/ps/define/types.go diff --git a/vendor/github.com/containers/podman/v4/pkg/ps/ps.go b/vendor/github.com/containers/podman/v5/pkg/ps/ps.go similarity index 97% rename from vendor/github.com/containers/podman/v4/pkg/ps/ps.go rename to vendor/github.com/containers/podman/v5/pkg/ps/ps.go index ac51c7cf9..a3bb9b9f9 100644 --- a/vendor/github.com/containers/podman/v4/pkg/ps/ps.go +++ b/vendor/github.com/containers/podman/v5/pkg/ps/ps.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package ps @@ -15,11 +14,11 @@ import ( "time" libnetworkTypes "github.com/containers/common/libnetwork/types" - "github.com/containers/podman/v4/libpod" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/domain/entities" - "github.com/containers/podman/v4/pkg/domain/filters" - psdefine "github.com/containers/podman/v4/pkg/ps/define" + "github.com/containers/podman/v5/libpod" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/domain/filters" + psdefine "github.com/containers/podman/v5/pkg/ps/define" "github.com/containers/storage" "github.com/containers/storage/types" "github.com/sirupsen/logrus" diff --git a/vendor/github.com/containers/podman/v4/pkg/rctl/rctl.go b/vendor/github.com/containers/podman/v5/pkg/rctl/rctl.go similarity index 88% rename from vendor/github.com/containers/podman/v4/pkg/rctl/rctl.go rename to vendor/github.com/containers/podman/v5/pkg/rctl/rctl.go index dccf9e7cd..75588c79c 100644 --- a/vendor/github.com/containers/podman/v4/pkg/rctl/rctl.go +++ b/vendor/github.com/containers/podman/v5/pkg/rctl/rctl.go @@ -1,5 +1,4 @@ //go:build freebsd -// +build freebsd package rctl @@ -32,9 +31,8 @@ func GetRacct(filter string) (map[string]uint64, error) { entries := strings.Split(string(buf[:len]), ",") res := make(map[string]uint64) for _, entry := range entries { - kv := strings.SplitN(entry, "=", 2) - key := kv[0] - val, err := strconv.ParseUint(kv[1], 10, 0) + key, valstr, _ := strings.Cut(entry, "=") + val, err := strconv.ParseUint(valstr, 10, 0) if err != nil { logrus.Warnf("unexpected rctl entry, ignoring: %s", entry) } diff --git a/vendor/github.com/containers/podman/v4/pkg/rootless/rootless.go b/vendor/github.com/containers/podman/v5/pkg/rootless/rootless.go similarity index 99% rename from vendor/github.com/containers/podman/v4/pkg/rootless/rootless.go rename to vendor/github.com/containers/podman/v5/pkg/rootless/rootless.go index 6b9b30f35..e1466e91a 100644 --- a/vendor/github.com/containers/podman/v4/pkg/rootless/rootless.go +++ b/vendor/github.com/containers/podman/v5/pkg/rootless/rootless.go @@ -8,7 +8,7 @@ import ( "sync" "github.com/containers/storage/pkg/lockfile" - "github.com/opencontainers/runc/libcontainer/user" + "github.com/moby/sys/user" spec "github.com/opencontainers/runtime-spec/specs-go" ) diff --git a/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_freebsd.c b/vendor/github.com/containers/podman/v5/pkg/rootless/rootless_freebsd.c similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/rootless/rootless_freebsd.c rename to vendor/github.com/containers/podman/v5/pkg/rootless/rootless_freebsd.c diff --git a/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_freebsd.go b/vendor/github.com/containers/podman/v5/pkg/rootless/rootless_freebsd.go similarity index 99% rename from vendor/github.com/containers/podman/v4/pkg/rootless/rootless_freebsd.go rename to vendor/github.com/containers/podman/v5/pkg/rootless/rootless_freebsd.go index 525f789db..2a459398b 100644 --- a/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_freebsd.go +++ b/vendor/github.com/containers/podman/v5/pkg/rootless/rootless_freebsd.go @@ -1,5 +1,4 @@ //go:build freebsd && cgo -// +build freebsd,cgo package rootless diff --git a/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_linux.c b/vendor/github.com/containers/podman/v5/pkg/rootless/rootless_linux.c similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/rootless/rootless_linux.c rename to vendor/github.com/containers/podman/v5/pkg/rootless/rootless_linux.c diff --git a/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_linux.go b/vendor/github.com/containers/podman/v5/pkg/rootless/rootless_linux.go similarity index 99% rename from vendor/github.com/containers/podman/v4/pkg/rootless/rootless_linux.go rename to vendor/github.com/containers/podman/v5/pkg/rootless/rootless_linux.go index d303c8bd0..40f81301f 100644 --- a/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_linux.go +++ b/vendor/github.com/containers/podman/v5/pkg/rootless/rootless_linux.go @@ -1,5 +1,4 @@ //go:build linux && cgo -// +build linux,cgo package rootless @@ -19,7 +18,7 @@ import ( "sync" "unsafe" - "github.com/containers/podman/v4/pkg/errorhandling" + "github.com/containers/podman/v5/pkg/errorhandling" "github.com/containers/storage/pkg/idtools" pmount "github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/unshare" diff --git a/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_unsupported.go b/vendor/github.com/containers/podman/v5/pkg/rootless/rootless_unsupported.go similarity index 98% rename from vendor/github.com/containers/podman/v4/pkg/rootless/rootless_unsupported.go rename to vendor/github.com/containers/podman/v5/pkg/rootless/rootless_unsupported.go index c2e86fa1d..587fb4cb9 100644 --- a/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_unsupported.go +++ b/vendor/github.com/containers/podman/v5/pkg/rootless/rootless_unsupported.go @@ -1,5 +1,4 @@ //go:build !(linux || freebsd) || !cgo -// +build !linux,!freebsd !cgo package rootless diff --git a/vendor/github.com/containers/podman/v4/pkg/seccomp/seccomp.go b/vendor/github.com/containers/podman/v5/pkg/seccomp/seccomp.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/seccomp/seccomp.go rename to vendor/github.com/containers/podman/v5/pkg/seccomp/seccomp.go diff --git a/vendor/github.com/containers/podman/v4/pkg/selinux/selinux.go b/vendor/github.com/containers/podman/v5/pkg/selinux/selinux.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/selinux/selinux.go rename to vendor/github.com/containers/podman/v5/pkg/selinux/selinux.go diff --git a/vendor/github.com/containers/podman/v4/pkg/signal/signal_common.go b/vendor/github.com/containers/podman/v5/pkg/signal/signal_common.go similarity index 90% rename from vendor/github.com/containers/podman/v4/pkg/signal/signal_common.go rename to vendor/github.com/containers/podman/v5/pkg/signal/signal_common.go index a81d0461b..106105829 100644 --- a/vendor/github.com/containers/podman/v4/pkg/signal/signal_common.go +++ b/vendor/github.com/containers/podman/v5/pkg/signal/signal_common.go @@ -71,3 +71,11 @@ func ParseSysSignalToName(s syscall.Signal) (string, error) { } return "", fmt.Errorf("unknown syscall signal: %s", s) } + +func ToDockerFormat(s uint) string { + var signalStr, err = ParseSysSignalToName(syscall.Signal(s)) + if err != nil { + return strconv.FormatUint(uint64(s), 10) + } + return fmt.Sprintf("SIG%s", signalStr) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/signal/signal_linux.go b/vendor/github.com/containers/podman/v5/pkg/signal/signal_linux.go similarity index 98% rename from vendor/github.com/containers/podman/v4/pkg/signal/signal_linux.go rename to vendor/github.com/containers/podman/v5/pkg/signal/signal_linux.go index 81e4ed758..5b07ccdd5 100644 --- a/vendor/github.com/containers/podman/v4/pkg/signal/signal_linux.go +++ b/vendor/github.com/containers/podman/v5/pkg/signal/signal_linux.go @@ -1,5 +1,4 @@ //go:build linux && !mips && !mipsle && !mips64 && !mips64le -// +build linux,!mips,!mipsle,!mips64,!mips64le // Signal handling for Linux only. package signal diff --git a/vendor/github.com/containers/podman/v4/pkg/signal/signal_linux_mipsx.go b/vendor/github.com/containers/podman/v5/pkg/signal/signal_linux_mipsx.go similarity index 98% rename from vendor/github.com/containers/podman/v4/pkg/signal/signal_linux_mipsx.go rename to vendor/github.com/containers/podman/v5/pkg/signal/signal_linux_mipsx.go index c97eeb23d..f587c09a3 100644 --- a/vendor/github.com/containers/podman/v4/pkg/signal/signal_linux_mipsx.go +++ b/vendor/github.com/containers/podman/v5/pkg/signal/signal_linux_mipsx.go @@ -1,6 +1,4 @@ //go:build linux && (mips || mipsle || mips64 || mips64le) -// +build linux -// +build mips mipsle mips64 mips64le // Special signal handling for mips architecture package signal diff --git a/vendor/github.com/containers/podman/v4/pkg/signal/signal_unix.go b/vendor/github.com/containers/podman/v5/pkg/signal/signal_unix.go similarity index 97% rename from vendor/github.com/containers/podman/v4/pkg/signal/signal_unix.go rename to vendor/github.com/containers/podman/v5/pkg/signal/signal_unix.go index 01d99d7bc..6e1ed540e 100644 --- a/vendor/github.com/containers/podman/v4/pkg/signal/signal_unix.go +++ b/vendor/github.com/containers/podman/v5/pkg/signal/signal_unix.go @@ -1,5 +1,4 @@ //go:build aix || darwin || dragonfly || freebsd || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd netbsd openbsd solaris zos // Signal handling for Linux only. package signal diff --git a/vendor/github.com/containers/podman/v4/pkg/signal/signal_unsupported.go b/vendor/github.com/containers/podman/v5/pkg/signal/signal_unsupported.go similarity index 97% rename from vendor/github.com/containers/podman/v4/pkg/signal/signal_unsupported.go rename to vendor/github.com/containers/podman/v5/pkg/signal/signal_unsupported.go index 590aaf978..96c0c8f99 100644 --- a/vendor/github.com/containers/podman/v4/pkg/signal/signal_unsupported.go +++ b/vendor/github.com/containers/podman/v5/pkg/signal/signal_unsupported.go @@ -1,5 +1,4 @@ //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !zos -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!zos // Signal handling for Linux only. package signal diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/container_validate.go b/vendor/github.com/containers/podman/v5/pkg/specgen/container_validate.go similarity index 89% rename from vendor/github.com/containers/podman/v4/pkg/specgen/container_validate.go rename to vendor/github.com/containers/podman/v5/pkg/specgen/container_validate.go index c6a8baddd..f740719af 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/container_validate.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/container_validate.go @@ -5,8 +5,8 @@ import ( "fmt" "strings" - "github.com/containers/common/pkg/util" - "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v5/libpod/define" + "golang.org/x/exp/slices" ) var ( @@ -31,7 +31,7 @@ func (s *SpecGenerator) Validate() error { if len(s.Networks) > 0 { return fmt.Errorf("networks must be defined when the pod is created: %w", define.ErrNetworkOnPodContainer) } - if len(s.PortMappings) > 0 || s.PublishExposedPorts { + if len(s.PortMappings) > 0 || (s.PublishExposedPorts != nil && *s.PublishExposedPorts) { return fmt.Errorf("published or exposed ports must be defined when the pod is created: %w", define.ErrNetworkOnPodContainer) } if len(s.HostAdd) > 0 { @@ -59,7 +59,7 @@ func (s *SpecGenerator) Validate() error { return fmt.Errorf("cannot set hostname when running in the host UTS namespace: %w", ErrInvalidSpecConfig) } // systemd values must be true, false, or always - if len(s.ContainerBasicConfig.Systemd) > 0 && !util.StringInSlice(strings.ToLower(s.ContainerBasicConfig.Systemd), SystemDValues) { + if len(s.ContainerBasicConfig.Systemd) > 0 && !slices.Contains(SystemDValues, strings.ToLower(s.ContainerBasicConfig.Systemd)) { return fmt.Errorf("--systemd values must be one of %q: %w", strings.Join(SystemDValues, ", "), ErrInvalidSpecConfig) } @@ -75,7 +75,7 @@ func (s *SpecGenerator) Validate() error { return exclusiveOptions("rootfs", "image") } // imagevolumemode must be one of ignore, tmpfs, or anonymous if given - if len(s.ContainerStorageConfig.ImageVolumeMode) > 0 && !util.StringInSlice(strings.ToLower(s.ContainerStorageConfig.ImageVolumeMode), ImageVolumeModeValues) { + if len(s.ContainerStorageConfig.ImageVolumeMode) > 0 && !slices.Contains(ImageVolumeModeValues, strings.ToLower(s.ContainerStorageConfig.ImageVolumeMode)) { return fmt.Errorf("invalid ImageVolumeMode %q, value must be one of %s", s.ContainerStorageConfig.ImageVolumeMode, strings.Join(ImageVolumeModeValues, ",")) } @@ -102,7 +102,7 @@ func (s *SpecGenerator) Validate() error { // ContainerNetworkConfig // // useimageresolveconf conflicts with dnsserver, dnssearch, dnsoption - if s.UseImageResolvConf { + if s.UseImageResolvConf != nil && *s.UseImageResolvConf { if len(s.DNSServers) > 0 { return exclusiveOptions("UseImageResolvConf", "DNSServer") } @@ -114,7 +114,7 @@ func (s *SpecGenerator) Validate() error { } } // UseImageHosts and HostAdd are exclusive - if s.UseImageHosts && len(s.HostAdd) > 0 { + if (s.UseImageHosts != nil && *s.UseImageHosts) && len(s.HostAdd) > 0 { return exclusiveOptions("UseImageHosts", "HostAdd") } diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/config_common.go b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/config_common.go similarity index 98% rename from vendor/github.com/containers/podman/v4/pkg/specgen/generate/config_common.go rename to vendor/github.com/containers/podman/v5/pkg/specgen/generate/config_common.go index 66c84b73c..46953a77d 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/config_common.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/config_common.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package generate diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/config_freebsd.go b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/config_freebsd.go similarity index 99% rename from vendor/github.com/containers/podman/v4/pkg/specgen/generate/config_freebsd.go rename to vendor/github.com/containers/podman/v5/pkg/specgen/generate/config_freebsd.go index edae1209b..ec5c8622d 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/config_freebsd.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/config_freebsd.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package generate diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/config_linux.go b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/config_linux.go similarity index 88% rename from vendor/github.com/containers/podman/v4/pkg/specgen/generate/config_linux.go rename to vendor/github.com/containers/podman/v5/pkg/specgen/generate/config_linux.go index 073755293..4d62c93e0 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/config_linux.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/config_linux.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package generate @@ -11,9 +10,10 @@ import ( "path/filepath" "strings" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/rootless" - "github.com/containers/podman/v4/pkg/util" + "github.com/containers/common/pkg/config" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/rootless" + "github.com/containers/podman/v5/pkg/util" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" @@ -93,34 +93,14 @@ func DevicesFromPath(g *generate.Generator, devicePath string) error { } func BlockAccessToKernelFilesystems(privileged, pidModeIsHost bool, mask, unmask []string, g *generate.Generator) { - defaultMaskPaths := []string{"/proc/acpi", - "/proc/kcore", - "/proc/keys", - "/proc/latency_stats", - "/proc/timer_list", - "/proc/timer_stats", - "/proc/sched_debug", - "/proc/scsi", - "/sys/firmware", - "/sys/fs/selinux", - "/sys/dev/block", - } - if !privileged { - for _, mp := range defaultMaskPaths { + for _, mp := range config.DefaultMaskedPaths { // check that the path to mask is not in the list of paths to unmask if shouldMask(mp, unmask) { g.AddLinuxMaskedPaths(mp) } } - for _, rp := range []string{ - "/proc/asound", - "/proc/bus", - "/proc/fs", - "/proc/irq", - "/proc/sys", - "/proc/sysrq-trigger", - } { + for _, rp := range config.DefaultReadOnlyPaths { if shouldMask(rp, unmask) { g.AddLinuxReadonlyPaths(rp) } diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/config_linux_seccomp.go b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/config_linux_seccomp.go similarity index 94% rename from vendor/github.com/containers/podman/v4/pkg/specgen/generate/config_linux_seccomp.go rename to vendor/github.com/containers/podman/v5/pkg/specgen/generate/config_linux_seccomp.go index 1b41805ae..04f2a3c3e 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/config_linux_seccomp.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/config_linux_seccomp.go @@ -1,5 +1,4 @@ //go:build linux && !remote -// +build linux,!remote package generate @@ -11,8 +10,8 @@ import ( "github.com/containers/common/libimage" goSeccomp "github.com/containers/common/pkg/seccomp" - "github.com/containers/podman/v4/pkg/seccomp" - "github.com/containers/podman/v4/pkg/specgen" + "github.com/containers/podman/v5/pkg/seccomp" + "github.com/containers/podman/v5/pkg/specgen" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" ) diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/container.go b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/container.go similarity index 94% rename from vendor/github.com/containers/podman/v4/pkg/specgen/generate/container.go rename to vendor/github.com/containers/podman/v5/pkg/specgen/generate/container.go index 002dd433f..f224453da 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/container.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/container.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package generate @@ -15,12 +14,12 @@ import ( "github.com/containers/common/libimage" "github.com/containers/common/pkg/config" - "github.com/containers/podman/v4/libpod" - "github.com/containers/podman/v4/libpod/define" - ann "github.com/containers/podman/v4/pkg/annotations" - envLib "github.com/containers/podman/v4/pkg/env" - "github.com/containers/podman/v4/pkg/signal" - "github.com/containers/podman/v4/pkg/specgen" + "github.com/containers/podman/v5/libpod" + "github.com/containers/podman/v5/libpod/define" + ann "github.com/containers/podman/v5/pkg/annotations" + envLib "github.com/containers/podman/v5/pkg/env" + "github.com/containers/podman/v5/pkg/signal" + "github.com/containers/podman/v5/pkg/specgen" "github.com/openshift/imagebuilder" "github.com/sirupsen/logrus" ) @@ -111,7 +110,15 @@ func CompleteSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGenerat } // Get Default Environment from containers.conf - defaultEnvs, err := envLib.ParseSlice(rtc.GetDefaultEnvEx(s.EnvHost, s.HTTPProxy)) + envHost := false + if s.EnvHost != nil { + envHost = *s.EnvHost + } + httpProxy := false + if s.HTTPProxy != nil { + httpProxy = *s.HTTPProxy + } + defaultEnvs, err := envLib.ParseSlice(rtc.GetDefaultEnvEx(envHost, httpProxy)) if err != nil { return nil, fmt.Errorf("parsing fields in containers.conf: %w", err) } @@ -130,7 +137,7 @@ func CompleteSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGenerat // add default terminal to env if tty flag is set _, ok := defaultEnvs["TERM"] - if s.Terminal && !ok { + if (s.Terminal != nil && *s.Terminal) && !ok { defaultEnvs["TERM"] = "xterm" } @@ -155,7 +162,7 @@ func CompleteSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGenerat delete(defaultEnvs, e) } - if s.UnsetEnvAll { + if s.UnsetEnvAll != nil && *s.UnsetEnvAll { defaultEnvs = make(map[string]string) } // First transform the os env into a map. We need it for the labels later in @@ -163,9 +170,9 @@ func CompleteSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGenerat osEnv := envLib.Map(os.Environ()) // Caller Specified defaults - if s.EnvHost { + if envHost { defaultEnvs = envLib.Join(defaultEnvs, osEnv) - } else if s.HTTPProxy { + } else if httpProxy { for _, envSpec := range config.ProxyEnv { if v, ok := osEnv[envSpec]; ok { defaultEnvs[envSpec] = v @@ -237,13 +244,8 @@ func CompleteSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGenerat } } - for _, v := range rtc.Containers.Annotations.Get() { - split := strings.SplitN(v, "=", 2) - k := split[0] - v := "" - if len(split) == 2 { - v = split[1] - } + for _, annotation := range rtc.Containers.Annotations.Get() { + k, v, _ := strings.Cut(annotation, "=") annotations[k] = v } // now pass in the values from client @@ -309,6 +311,10 @@ func CompleteSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGenerat warnings = append(warnings, "Port mappings have been discarded as one of the Host, Container, Pod, and None network modes are in use") } + if len(s.ImageVolumeMode) == 0 { + s.ImageVolumeMode = rtc.Engine.ImageVolumeMode + } + return warnings, nil } @@ -357,9 +363,9 @@ func ConfigToSpec(rt *libpod.Runtime, specg *specgen.SpecGenerator, containerID if conf.Spec.Process != nil && conf.Spec.Process.Env != nil { env := make(map[string]string) for _, entry := range conf.Spec.Process.Env { - split := strings.SplitN(entry, "=", 2) - if len(split) == 2 { - env[split[0]] = split[1] + key, val, hasVal := strings.Cut(entry, "=") + if hasVal { + env[key] = val } } specg.Env = env @@ -503,7 +509,7 @@ func ConfigToSpec(rt *libpod.Runtime, specg *specgen.SpecGenerator, containerID // mapSecurityConfig takes a libpod.ContainerSecurityConfig and converts it to a specgen.ContinerSecurityConfig func mapSecurityConfig(c *libpod.ContainerConfig, s *specgen.SpecGenerator) { - s.Privileged = c.Privileged + s.Privileged = &c.Privileged s.SelinuxOpts = append(s.SelinuxOpts, c.LabelOpts...) s.User = c.User s.Groups = c.Groups diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/container_create.go b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/container_create.go similarity index 93% rename from vendor/github.com/containers/podman/v4/pkg/specgen/generate/container_create.go rename to vendor/github.com/containers/podman/v5/pkg/specgen/generate/container_create.go index 940fefae0..db4f8cdb6 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/container_create.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/container_create.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package generate @@ -15,13 +14,13 @@ import ( "github.com/containers/common/libimage" "github.com/containers/common/libnetwork/pasta" "github.com/containers/common/libnetwork/slirp4netns" - "github.com/containers/podman/v4/libpod" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/namespaces" - "github.com/containers/podman/v4/pkg/rootless" - "github.com/containers/podman/v4/pkg/specgen" - "github.com/containers/podman/v4/pkg/specgenutil" - "github.com/containers/podman/v4/pkg/util" + "github.com/containers/podman/v5/libpod" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/namespaces" + "github.com/containers/podman/v5/pkg/rootless" + "github.com/containers/podman/v5/pkg/specgen" + "github.com/containers/podman/v5/pkg/specgenutil" + "github.com/containers/podman/v5/pkg/util" "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/selinux/go-selinux/label" "github.com/sirupsen/logrus" @@ -55,12 +54,12 @@ func MakeContainer(ctx context.Context, rt *libpod.Runtime, s *specgen.SpecGener s.ResourceLimits.Unified = make(map[string]string) } for _, cgroupConf := range rtc.Containers.CgroupConf.Get() { - cgr := strings.SplitN(cgroupConf, "=", 2) - if len(cgr) != 2 { - return nil, nil, nil, fmt.Errorf("CgroupConf %q from containers.conf invalid, must be name=value", cgr) + key, val, hasVal := strings.Cut(cgroupConf, "=") + if !hasVal { + return nil, nil, nil, fmt.Errorf("CgroupConf %s from containers.conf invalid, must be name=value", cgroupConf) } - if _, ok := s.ResourceLimits.Unified[cgr[0]]; !ok { - s.ResourceLimits.Unified[cgr[0]] = cgr[1] + if _, ok := s.ResourceLimits.Unified[key]; !ok { + s.ResourceLimits.Unified[key] = val } } } @@ -153,7 +152,11 @@ func MakeContainer(ctx context.Context, rt *libpod.Runtime, s *specgen.SpecGener } if s.Rootfs != "" { - options = append(options, libpod.WithRootFS(s.Rootfs, s.RootfsOverlay, s.RootfsMapping)) + rootfsOverlay := false + if s.RootfsOverlay != nil { + rootfsOverlay = *s.RootfsOverlay + } + options = append(options, libpod.WithRootFS(s.Rootfs, rootfsOverlay, s.RootfsMapping)) } newImage, resolvedImageName, imageData, err := getImageFromSpec(ctx, rt, s) @@ -227,10 +230,7 @@ func MakeContainer(ctx context.Context, rt *libpod.Runtime, s *specgen.SpecGener options = append(options, libpod.WithHostUsers(s.HostUsers)) } - command, err := makeCommand(s, imageData) - if err != nil { - return nil, nil, nil, err - } + command := makeCommand(s, imageData) infraVol := len(compatibleOptions.Mounts) > 0 || len(compatibleOptions.Volumes) > 0 || len(compatibleOptions.ImageVolumes) > 0 || len(compatibleOptions.OverlayVolumes) > 0 opts, err := createContainerOptions(rt, s, pod, finalVolumes, finalOverlays, imageData, command, infraVol, *compatibleOptions) @@ -251,6 +251,9 @@ func MakeContainer(ctx context.Context, rt *libpod.Runtime, s *specgen.SpecGener options = append(options, opts...) } runtimeSpec, err := SpecGenToOCI(ctx, s, rt, rtc, newImage, finalMounts, pod, command, compatibleOptions) + if err != nil { + return nil, nil, nil, err + } if clone { // the container fails to start if cloned due to missing Linux spec entries if c == nil { return nil, nil, nil, errors.New("the given container could not be retrieved") @@ -355,7 +358,11 @@ func createContainerOptions(rt *libpod.Runtime, s *specgen.SpecGenerator, pod *l options = append(options, libpod.WithPreserveFDs(s.PreserveFDs)) } - if s.Stdin { + if s.PreserveFD != nil { + options = append(options, libpod.WithPreserveFD(s.PreserveFD)) + } + + if s.Stdin != nil && *s.Stdin { options = append(options, libpod.WithStdin()) } @@ -365,7 +372,7 @@ func createContainerOptions(rt *libpod.Runtime, s *specgen.SpecGenerator, pod *l if s.Umask != "" { options = append(options, libpod.WithUmask(s.Umask)) } - if s.Volatile { + if s.Volatile != nil && *s.Volatile { options = append(options, libpod.WithVolatile()) } if s.PasswdEntry != "" { @@ -374,8 +381,11 @@ func createContainerOptions(rt *libpod.Runtime, s *specgen.SpecGenerator, pod *l if s.GroupEntry != "" { options = append(options, libpod.WithGroupEntry(s.GroupEntry)) } + if s.BaseHostsFile != "" { + options = append(options, libpod.WithBaseHostsFile(s.BaseHostsFile)) + } - if s.Privileged { + if s.IsPrivileged() { options = append(options, libpod.WithMountAllDevices()) } @@ -514,7 +524,7 @@ func createContainerOptions(rt *libpod.Runtime, s *specgen.SpecGenerator, pod *l if s.WorkDir == "" { s.WorkDir = "/" } - if s.CreateWorkingDir { + if s.CreateWorkingDir != nil && *s.CreateWorkingDir { options = append(options, libpod.WithCreateWorkingDir()) } if s.StopSignal != nil { @@ -541,8 +551,8 @@ func createContainerOptions(rt *libpod.Runtime, s *specgen.SpecGenerator, pod *l options = append(options, libpod.WithLogDriver(s.LogConfiguration.Driver)) } } - if s.ContainerSecurityConfig.LabelNested { - options = append(options, libpod.WithLabelNested(s.ContainerSecurityConfig.LabelNested)) + if s.LabelNested != nil { + options = append(options, libpod.WithLabelNested(*s.LabelNested)) } // Security options if len(s.SelinuxOpts) > 0 { @@ -561,8 +571,10 @@ func createContainerOptions(rt *libpod.Runtime, s *specgen.SpecGenerator, pod *l options = append(options, libpod.WithSecLabels(selinuxOpts)) } } - options = append(options, libpod.WithPrivileged(s.Privileged)) - options = append(options, libpod.WithReadWriteTmpfs(s.ReadWriteTmpfs)) + options = append(options, libpod.WithPrivileged(s.IsPrivileged())) + if s.ReadWriteTmpfs != nil { + options = append(options, libpod.WithReadWriteTmpfs(*s.ReadWriteTmpfs)) + } // Get namespace related options namespaceOpts, err := namespaceOptions(s, rt, pod, imageData) @@ -582,7 +594,11 @@ func createContainerOptions(rt *libpod.Runtime, s *specgen.SpecGenerator, pod *l options = append(options, libpod.WithShmSizeSystemd(*s.ShmSizeSystemd)) } if s.Rootfs != "" { - options = append(options, libpod.WithRootFS(s.Rootfs, s.RootfsOverlay, s.RootfsMapping)) + rootfsOverlay := false + if s.RootfsOverlay != nil { + rootfsOverlay = *s.RootfsOverlay + } + options = append(options, libpod.WithRootFS(s.Rootfs, rootfsOverlay, s.RootfsMapping)) } // Default used if not overridden on command line diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/kube/kube.go b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/kube/kube.go similarity index 94% rename from vendor/github.com/containers/podman/v4/pkg/specgen/generate/kube/kube.go rename to vendor/github.com/containers/podman/v5/pkg/specgen/generate/kube/kube.go index cebf3e5d1..25afd00f6 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/kube/kube.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/kube/kube.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package kube @@ -22,19 +21,18 @@ import ( "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/parse" "github.com/containers/common/pkg/secrets" - cutil "github.com/containers/common/pkg/util" "github.com/containers/image/v5/manifest" itypes "github.com/containers/image/v5/types" - "github.com/containers/podman/v4/libpod/define" - ann "github.com/containers/podman/v4/pkg/annotations" - "github.com/containers/podman/v4/pkg/domain/entities" - v1 "github.com/containers/podman/v4/pkg/k8s.io/api/core/v1" - "github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/api/resource" - "github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/util/intstr" - "github.com/containers/podman/v4/pkg/specgen" - "github.com/containers/podman/v4/pkg/specgen/generate" - systemdDefine "github.com/containers/podman/v4/pkg/systemd/define" - "github.com/containers/podman/v4/pkg/util" + "github.com/containers/podman/v5/libpod/define" + ann "github.com/containers/podman/v5/pkg/annotations" + "github.com/containers/podman/v5/pkg/domain/entities" + v1 "github.com/containers/podman/v5/pkg/k8s.io/api/core/v1" + "github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/api/resource" + "github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/util/intstr" + "github.com/containers/podman/v5/pkg/specgen" + "github.com/containers/podman/v5/pkg/specgen/generate" + systemdDefine "github.com/containers/podman/v5/pkg/systemd/define" + "github.com/containers/podman/v5/pkg/util" "github.com/docker/docker/pkg/meminfo" "github.com/docker/go-units" spec "github.com/opencontainers/runtime-spec/specs-go" @@ -142,6 +140,8 @@ type CtrSpecGenOptions struct { IpcNSIsHost bool // Volumes for all containers Volumes map[string]*KubeVolume + // VolumesFrom for all containers + VolumesFrom []string // PodID of the parent pod PodID string // PodName of the parent pod @@ -184,6 +184,8 @@ type CtrSpecGenOptions struct { } func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGenerator, error) { + localTrue := true + s := specgen.NewSpecGenerator(opts.Container.Image, false) rtc, err := config.Default() @@ -213,7 +215,7 @@ func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGener s.Name = fmt.Sprintf("%s-%s", opts.PodName, opts.Container.Name) - s.Terminal = opts.Container.TTY + s.Terminal = &opts.Container.TTY s.Pod = opts.PodID @@ -223,29 +225,29 @@ func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGener s.LogConfiguration.Options = make(map[string]string) for _, o := range opts.LogOptions { - split := strings.SplitN(o, "=", 2) - if len(split) < 2 { + opt, val, hasVal := strings.Cut(o, "=") + if !hasVal { return nil, fmt.Errorf("invalid log option %q", o) } - switch strings.ToLower(split[0]) { + switch strings.ToLower(opt) { case "driver": - s.LogConfiguration.Driver = split[1] + s.LogConfiguration.Driver = val case "path": - s.LogConfiguration.Path = split[1] + s.LogConfiguration.Path = val case "max-size": - logSize, err := units.FromHumanSize(split[1]) + logSize, err := units.FromHumanSize(val) if err != nil { return nil, err } s.LogConfiguration.Size = logSize default: - switch len(split[1]) { + switch len(val) { case 0: return nil, fmt.Errorf("invalid log option: %w", define.ErrInvalidArg) default: // tags for journald only if s.LogConfiguration.Driver == "" || s.LogConfiguration.Driver == define.JournaldLogging { - s.LogConfiguration.Options[split[0]] = split[1] + s.LogConfiguration.Options[opt] = val } else { logrus.Warnf("Can only set tags with journald log driver but driver is %q", s.LogConfiguration.Driver) } @@ -392,7 +394,7 @@ func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGener if label, ok := opts.Annotations[define.InspectAnnotationLabel+"/"+opts.Container.Name]; ok { if label == "nested" { - s.ContainerSecurityConfig.LabelNested = true + s.ContainerSecurityConfig.LabelNested = &localTrue } if !slices.Contains(s.ContainerSecurityConfig.SelinuxOpts, label) { s.ContainerSecurityConfig.SelinuxOpts = append(s.ContainerSecurityConfig.SelinuxOpts, label) @@ -405,7 +407,7 @@ func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGener if err != nil { return nil, err } - s.Remove = autoremoveAsBool + s.Remove = &autoremoveAsBool s.Annotations[define.InspectAnnotationAutoremove] = autoremove } @@ -415,7 +417,7 @@ func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGener return nil, err } - s.Init = initAsBool + s.Init = &initAsBool s.Annotations[define.InspectAnnotationInit] = init } @@ -425,17 +427,19 @@ func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGener if err != nil { return nil, err } - s.PublishExposedPorts = publishAllAsBool + s.PublishExposedPorts = &publishAllAsBool } s.Annotations[define.InspectAnnotationPublishAll] = publishAll } + s.Annotations[define.KubeHealthCheckAnnotation] = "true" + // Environment Variables envs := map[string]string{} for _, env := range imageData.Config.Env { - keyval := strings.SplitN(env, "=", 2) - envs[keyval[0]] = keyval[1] + key, val, _ := strings.Cut(env, "=") + envs[key] = val } for _, env := range opts.Container.Env { @@ -486,7 +490,7 @@ func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGener // Make sure the z/Z option is not already there (from editing the YAML) if k == define.BindMountPrefix { lastIndex := strings.LastIndex(v, ":") - if v[:lastIndex] == volumeSource.Source && !cutil.StringInSlice("z", options) && !cutil.StringInSlice("Z", options) { + if lastIndex != -1 && v[:lastIndex] == volumeSource.Source && !slices.Contains(options, "z") && !slices.Contains(options, "Z") { options = append(options, v[lastIndex+1:]) } } @@ -552,11 +556,20 @@ func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGener SubPath: volume.SubPath, } s.Volumes = append(s.Volumes, &emptyDirVolume) + case KubeVolumeTypeEmptyDirTmpfs: + memVolume := spec.Mount{ + Destination: volume.MountPath, + Type: define.TypeTmpfs, + Source: define.TypeTmpfs, + } + s.Mounts = append(s.Mounts, memVolume) default: return nil, errors.New("unsupported volume source type") } } + s.VolumesFrom = opts.VolumesFrom + s.RestartPolicy = opts.RestartPolicy if opts.NetNSIsHost { @@ -589,10 +602,12 @@ func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGener } if ro := opts.ReadOnly; ro != itypes.OptionalBoolUndefined { - s.ReadOnlyFilesystem = ro == itypes.OptionalBoolTrue + localRO := ro == itypes.OptionalBoolTrue + s.ReadOnlyFilesystem = &localRO } // This should default to true for kubernetes yaml - s.ReadWriteTmpfs = true + + s.ReadWriteTmpfs = &localTrue // Make sure the container runs in a systemd unit which is // stored as a label at container creation. @@ -819,14 +834,15 @@ func setupSecurityContext(s *specgen.SpecGenerator, securityContext *v1.Security } if securityContext.ReadOnlyRootFilesystem != nil { - s.ReadOnlyFilesystem = *securityContext.ReadOnlyRootFilesystem + s.ReadOnlyFilesystem = securityContext.ReadOnlyRootFilesystem } if securityContext.Privileged != nil { - s.Privileged = *securityContext.Privileged + s.Privileged = securityContext.Privileged } if securityContext.AllowPrivilegeEscalation != nil { - s.NoNewPrivileges = !*securityContext.AllowPrivilegeEscalation + localNNP := !*securityContext.AllowPrivilegeEscalation + s.NoNewPrivileges = &localNNP } if securityContext.ProcMount != nil && *securityContext.ProcMount == v1.UnmaskedProcMount { diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/kube/seccomp.go b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/kube/seccomp.go similarity index 95% rename from vendor/github.com/containers/podman/v4/pkg/specgen/generate/kube/seccomp.go rename to vendor/github.com/containers/podman/v5/pkg/specgen/generate/kube/seccomp.go index f2eb83757..22659b082 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/kube/seccomp.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/kube/seccomp.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package kube @@ -8,8 +7,8 @@ import ( "path/filepath" "strings" - "github.com/containers/podman/v4/libpod" - v1 "github.com/containers/podman/v4/pkg/k8s.io/api/core/v1" + "github.com/containers/podman/v5/libpod" + v1 "github.com/containers/podman/v5/pkg/k8s.io/api/core/v1" ) // KubeSeccompPaths holds information about a pod YAML's seccomp configuration diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/kube/volume.go b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/kube/volume.go similarity index 96% rename from vendor/github.com/containers/podman/v4/pkg/specgen/generate/kube/volume.go rename to vendor/github.com/containers/podman/v5/pkg/specgen/generate/kube/volume.go index 326a69136..7229b2471 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/kube/volume.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/kube/volume.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package kube @@ -10,8 +9,8 @@ import ( "github.com/containers/common/pkg/parse" "github.com/containers/common/pkg/secrets" - "github.com/containers/podman/v4/libpod" - v1 "github.com/containers/podman/v4/pkg/k8s.io/api/core/v1" + "github.com/containers/podman/v5/libpod" + v1 "github.com/containers/podman/v5/pkg/k8s.io/api/core/v1" "github.com/sirupsen/logrus" "sigs.k8s.io/yaml" @@ -35,6 +34,7 @@ const ( KubeVolumeTypeCharDevice KubeVolumeTypeSecret KubeVolumeTypeEmptyDir + KubeVolumeTypeEmptyDirTmpfs ) //nolint:revive @@ -264,7 +264,17 @@ func VolumeFromConfigMap(configMapVolumeSource *v1.ConfigMapVolumeSource, config // Create a kubeVolume for an emptyDir volume func VolumeFromEmptyDir(emptyDirVolumeSource *v1.EmptyDirVolumeSource, name string) (*KubeVolume, error) { - return &KubeVolume{Type: KubeVolumeTypeEmptyDir, Source: name}, nil + if emptyDirVolumeSource.Medium == v1.StorageMediumMemory { + return &KubeVolume{ + Type: KubeVolumeTypeEmptyDirTmpfs, + Source: name, + }, nil + } else { + return &KubeVolume{ + Type: KubeVolumeTypeEmptyDir, + Source: name, + }, nil + } } // Create a KubeVolume from one of the supported VolumeSource diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/namespaces.go b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/namespaces.go similarity index 96% rename from vendor/github.com/containers/podman/v4/pkg/specgen/generate/namespaces.go rename to vendor/github.com/containers/podman/v5/pkg/specgen/generate/namespaces.go index f8c827c62..8b8338dc0 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/namespaces.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/namespaces.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package generate @@ -10,12 +9,12 @@ import ( "github.com/containers/common/libimage" "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/config" - "github.com/containers/podman/v4/libpod" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/namespaces" - "github.com/containers/podman/v4/pkg/rootless" - "github.com/containers/podman/v4/pkg/specgen" - "github.com/containers/podman/v4/pkg/util" + "github.com/containers/podman/v5/libpod" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/namespaces" + "github.com/containers/podman/v5/pkg/rootless" + "github.com/containers/podman/v5/pkg/specgen" + "github.com/containers/podman/v5/pkg/util" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" ) @@ -91,7 +90,7 @@ func GetDefaultNamespaceMode(nsType string, cfg *config.Config, pod *libpod.Pod) case "cgroup": return specgen.ParseCgroupNamespace(cfg.Containers.CgroupNS) case "net": - ns, _, _, err := specgen.ParseNetworkFlag(nil, false) + ns, _, _, err := specgen.ParseNetworkFlag(nil) return ns, err } @@ -296,7 +295,7 @@ func namespaceOptions(s *specgen.SpecGenerator, rt *libpod.Runtime, pod *libpod. toReturn = append(toReturn, libpod.WithCgroupsMode(s.CgroupsMode)) } - postConfigureNetNS := !s.UserNS.IsHost() + postConfigureNetNS := needPostConfigureNetNS(s) switch s.NetNS.NSMode { case specgen.FromPod: @@ -368,7 +367,7 @@ func namespaceOptions(s *specgen.SpecGenerator, rt *libpod.Runtime, pod *libpod. toReturn = append(toReturn, libpod.WithNetNS(portMappings, expose, postConfigureNetNS, "bridge", s.Networks)) } - if s.UseImageHosts { + if s.UseImageHosts != nil && *s.UseImageHosts { toReturn = append(toReturn, libpod.WithUseImageHosts()) } else if len(s.HostAdd) > 0 { toReturn = append(toReturn, libpod.WithHosts(s.HostAdd)) @@ -376,7 +375,7 @@ func namespaceOptions(s *specgen.SpecGenerator, rt *libpod.Runtime, pod *libpod. if len(s.DNSSearch) > 0 { toReturn = append(toReturn, libpod.WithDNSSearch(s.DNSSearch)) } - if s.UseImageResolvConf { + if s.UseImageResolvConf != nil && *s.UseImageResolvConf { toReturn = append(toReturn, libpod.WithUseImageResolvConf()) } else if len(s.DNSServers) > 0 { var dnsServers []string diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/namespaces_freebsd.go b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/namespaces_freebsd.go similarity index 76% rename from vendor/github.com/containers/podman/v4/pkg/specgen/generate/namespaces_freebsd.go rename to vendor/github.com/containers/podman/v5/pkg/specgen/generate/namespaces_freebsd.go index 4fb6a4c51..0230bb3a8 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/namespaces_freebsd.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/namespaces_freebsd.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package generate @@ -7,8 +6,9 @@ import ( "fmt" "os" - "github.com/containers/podman/v4/libpod" - "github.com/containers/podman/v4/pkg/specgen" + "github.com/containers/buildah/pkg/jail" + "github.com/containers/podman/v5/libpod" + "github.com/containers/podman/v5/pkg/specgen" "github.com/opencontainers/runtime-tools/generate" "github.com/sirupsen/logrus" ) @@ -52,3 +52,10 @@ func specConfigureNamespaces(s *specgen.SpecGenerator, g *generate.Generator, rt return nil } + +// On FreeBSD 13.3 and later, we can avoid creating a separate vnet jail but +// only if we can initialise the network after the OCI container is created - +// the OCI container will own the vnet in this case. +func needPostConfigureNetNS(s *specgen.SpecGenerator) bool { + return jail.NeedVnetJail() == false +} diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/namespaces_linux.go b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/namespaces_linux.go similarity index 93% rename from vendor/github.com/containers/podman/v4/pkg/specgen/generate/namespaces_linux.go rename to vendor/github.com/containers/podman/v5/pkg/specgen/generate/namespaces_linux.go index 1ff539ac4..fb22b12b1 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/namespaces_linux.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/namespaces_linux.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package generate @@ -7,9 +6,9 @@ import ( "fmt" "os" - "github.com/containers/podman/v4/libpod" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/specgen" + "github.com/containers/podman/v5/libpod" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/specgen" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" "github.com/sirupsen/logrus" @@ -153,9 +152,13 @@ func specConfigureNamespaces(s *specgen.SpecGenerator, g *generate.Generator, rt if g.Config.Annotations == nil { g.Config.Annotations = make(map[string]string) } - if s.PublishExposedPorts { + if s.PublishExposedPorts != nil && *s.PublishExposedPorts { g.Config.Annotations[define.InspectAnnotationPublishAll] = define.InspectResponseTrue } return nil } + +func needPostConfigureNetNS(s *specgen.SpecGenerator) bool { + return !s.UserNS.IsHost() +} diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/oci.go b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/oci.go similarity index 78% rename from vendor/github.com/containers/podman/v4/pkg/specgen/generate/oci.go rename to vendor/github.com/containers/podman/v5/pkg/specgen/generate/oci.go index c5f643308..5cec103fb 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/oci.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/oci.go @@ -1,16 +1,15 @@ //go:build !remote -// +build !remote package generate import ( - "fmt" "strings" "github.com/containers/common/libimage" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/specgen" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/specgen" "github.com/opencontainers/runtime-tools/generate" + "github.com/sirupsen/logrus" ) func addRlimits(s *specgen.SpecGenerator, g *generate.Generator) { @@ -24,7 +23,7 @@ func addRlimits(s *specgen.SpecGenerator, g *generate.Generator) { } // Produce the final command for the container. -func makeCommand(s *specgen.SpecGenerator, imageData *libimage.ImageData) ([]string, error) { +func makeCommand(s *specgen.SpecGenerator, imageData *libimage.ImageData) []string { finalCommand := []string{} entrypoint := s.Entrypoint @@ -47,13 +46,14 @@ func makeCommand(s *specgen.SpecGenerator, imageData *libimage.ImageData) ([]str finalCommand = append(finalCommand, command...) if len(finalCommand) == 0 { - return nil, fmt.Errorf("no command or entrypoint provided, and no CMD or ENTRYPOINT from image") + logrus.Debug("no command or entrypoint provided, and no CMD or ENTRYPOINT from image: defaulting to empty string") + finalCommand = []string{""} } - if s.Init { + if s.Init != nil && *s.Init { // bind mount for this binary is added in addContainerInitBinary() finalCommand = append([]string{define.ContainerInitPath, "--"}, finalCommand...) } - return finalCommand, nil + return finalCommand } diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/oci_freebsd.go b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/oci_freebsd.go similarity index 90% rename from vendor/github.com/containers/podman/v4/pkg/specgen/generate/oci_freebsd.go rename to vendor/github.com/containers/podman/v5/pkg/specgen/generate/oci_freebsd.go index bccb46193..2aef6ad5f 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/oci_freebsd.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/oci_freebsd.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package generate @@ -10,9 +9,9 @@ import ( "github.com/containers/common/libimage" "github.com/containers/common/pkg/config" - "github.com/containers/podman/v4/libpod" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/specgen" + "github.com/containers/podman/v5/libpod" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/specgen" "github.com/opencontainers/runtime-spec/specs-go" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" @@ -44,7 +43,9 @@ func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runt g.SetProcessArgs(finalCmd) - g.SetProcessTerminal(s.Terminal) + if s.Terminal != nil { + g.SetProcessTerminal(*s.Terminal) + } for key, val := range s.Annotations { g.AddAnnotation(key, val) @@ -52,7 +53,7 @@ func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runt // Devices var userDevices []spec.LinuxDevice - if !s.Privileged { + if !s.IsPrivileged() { // add default devices from containers.conf for _, device := range rtc.Containers.Devices.Get() { if err = DevicesFromPath(&g, device); err != nil { @@ -146,19 +147,19 @@ func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runt configSpec.Annotations = make(map[string]string) } - if s.Remove { + if s.Remove != nil && *s.Remove { configSpec.Annotations[define.InspectAnnotationAutoremove] = define.InspectResponseTrue } if len(s.VolumesFrom) > 0 { - configSpec.Annotations[define.InspectAnnotationVolumesFrom] = strings.Join(s.VolumesFrom, ",") + configSpec.Annotations[define.VolumesFromAnnotation] = strings.Join(s.VolumesFrom, ";") } - if s.Privileged { + if s.IsPrivileged() { configSpec.Annotations[define.InspectAnnotationPrivileged] = define.InspectResponseTrue } - if s.Init { + if s.Init != nil && *s.Init { configSpec.Annotations[define.InspectAnnotationInit] = define.InspectResponseTrue } diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/oci_linux.go b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/oci_linux.go similarity index 91% rename from vendor/github.com/containers/podman/v4/pkg/specgen/generate/oci_linux.go rename to vendor/github.com/containers/podman/v5/pkg/specgen/generate/oci_linux.go index 7ea22e9f7..da9b30ec6 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/oci_linux.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/oci_linux.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package generate @@ -13,10 +12,10 @@ import ( "github.com/containers/common/libimage" "github.com/containers/common/pkg/cgroups" "github.com/containers/common/pkg/config" - "github.com/containers/podman/v4/libpod" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/rootless" - "github.com/containers/podman/v4/pkg/specgen" + "github.com/containers/podman/v5/libpod" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/rootless" + "github.com/containers/podman/v5/pkg/specgen" "github.com/docker/go-units" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" @@ -101,7 +100,7 @@ func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runt canMountSys := canMountSys(isRootless, isNewUserns, s) - if s.Privileged && canMountSys { + if s.IsPrivileged() && canMountSys { cgroupPerm = "rw" g.RemoveMount("/sys") sysMnt := spec.Mount{ @@ -116,7 +115,7 @@ func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runt addCgroup = false g.RemoveMount("/sys") r := "ro" - if s.Privileged { + if s.IsPrivileged() { r = "rw" } sysMnt := spec.Mount{ @@ -135,7 +134,7 @@ func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runt Options: []string{"rprivate", "nosuid", "noexec", "nodev", r}, } g.AddMount(sysFsCgroupMnt) - if !s.Privileged && isRootless { + if !s.IsPrivileged() && isRootless { g.AddLinuxMaskedPaths("/sys/kernel") } } @@ -212,7 +211,9 @@ func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runt g.SetProcessArgs(finalCmd) - g.SetProcessTerminal(s.Terminal) + if s.Terminal != nil { + g.SetProcessTerminal(*s.Terminal) + } for key, val := range s.Annotations { g.AddAnnotation(key, val) @@ -248,13 +249,13 @@ func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runt // Devices // set the default rule at the beginning of device configuration - if !inUserNS && !s.Privileged { + if !inUserNS && !s.IsPrivileged() { g.AddLinuxResourcesDevice(false, "", nil, nil, "rwm") } var userDevices []spec.LinuxDevice - if !s.Privileged { + if !s.IsPrivileged() { // add default devices from containers.conf for _, device := range rtc.Containers.Devices.Get() { if err = DevicesFromPath(&g, device); err != nil { @@ -279,13 +280,13 @@ func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runt if isRootless && len(s.DeviceCgroupRule) > 0 { return nil, fmt.Errorf("device cgroup rules are not supported in rootless mode or in a user namespace") } - if !isRootless && !s.Privileged { + if !isRootless && !s.IsPrivileged() { for _, dev := range s.DeviceCgroupRule { g.AddLinuxResourcesDevice(true, dev.Type, dev.Major, dev.Minor, dev.Access) } } - BlockAccessToKernelFilesystems(s.Privileged, s.PidNS.IsHost(), s.Mask, s.Unmask, &g) + BlockAccessToKernelFilesystems(s.IsPrivileged(), s.PidNS.IsHost(), s.Mask, s.Unmask, &g) g.ClearProcessEnv() for name, val := range s.Env { @@ -316,19 +317,19 @@ func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runt configSpec.Annotations = make(map[string]string) } - if s.Remove { + if s.Remove != nil && *s.Remove { configSpec.Annotations[define.InspectAnnotationAutoremove] = define.InspectResponseTrue } if len(s.VolumesFrom) > 0 { - configSpec.Annotations[define.InspectAnnotationVolumesFrom] = strings.Join(s.VolumesFrom, ",") + configSpec.Annotations[define.VolumesFromAnnotation] = strings.Join(s.VolumesFrom, ";") } - if s.Privileged { + if s.IsPrivileged() { configSpec.Annotations[define.InspectAnnotationPrivileged] = define.InspectResponseTrue } - if s.Init { + if s.Init != nil && *s.Init { configSpec.Annotations[define.InspectAnnotationInit] = define.InspectResponseTrue } @@ -337,7 +338,15 @@ func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runt } setProcOpts(s, &g) - if s.ReadOnlyFilesystem && !s.ReadWriteTmpfs { + roFS := false + if s.ReadOnlyFilesystem != nil { + roFS = *s.ReadOnlyFilesystem + } + rwTmpfs := false + if s.ReadWriteTmpfs != nil { + rwTmpfs = *s.ReadWriteTmpfs + } + if roFS && !rwTmpfs { setDevOptsReadOnly(&g) } diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/pause_image.go b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/pause_image.go similarity index 95% rename from vendor/github.com/containers/podman/v4/pkg/specgen/generate/pause_image.go rename to vendor/github.com/containers/podman/v5/pkg/specgen/generate/pause_image.go index 9e131625b..ffd398dec 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/pause_image.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/pause_image.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package generate @@ -10,8 +9,8 @@ import ( buildahDefine "github.com/containers/buildah/define" "github.com/containers/common/pkg/config" - "github.com/containers/podman/v4/libpod" - "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v5/libpod" + "github.com/containers/podman/v5/libpod/define" ) // PullOrBuildInfraImage pulls down the specified image or the one set in diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/pod_create.go b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/pod_create.go similarity index 96% rename from vendor/github.com/containers/podman/v4/pkg/specgen/generate/pod_create.go rename to vendor/github.com/containers/podman/v5/pkg/specgen/generate/pod_create.go index f09bcb385..79348aec8 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/pod_create.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/pod_create.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package generate @@ -12,11 +11,11 @@ import ( "strconv" "strings" - "github.com/containers/podman/v4/libpod" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/domain/entities" - "github.com/containers/podman/v4/pkg/specgen" - "github.com/containers/podman/v4/pkg/specgenutil" + "github.com/containers/podman/v5/libpod" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/specgen" + "github.com/containers/podman/v5/pkg/specgenutil" "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" ) @@ -251,7 +250,8 @@ func MapSpec(p *specgen.PodSpecGenerator) (*specgen.SpecGenerator, error) { spec.DNSSearch = p.DNSSearch } if p.NoManageResolvConf { - spec.UseImageResolvConf = true + localTrue := true + spec.UseImageResolvConf = &localTrue } if len(p.Networks) > 0 { spec.Networks = p.Networks @@ -261,7 +261,7 @@ func MapSpec(p *specgen.PodSpecGenerator) (*specgen.SpecGenerator, error) { spec.CNINetworks = p.CNINetworks } if p.NoManageHosts { - spec.UseImageHosts = p.NoManageHosts + spec.UseImageHosts = &p.NoManageHosts } if len(p.InfraConmonPidFile) > 0 { diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/ports.go b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/ports.go similarity index 98% rename from vendor/github.com/containers/podman/v4/pkg/specgen/generate/ports.go rename to vendor/github.com/containers/podman/v5/pkg/specgen/generate/ports.go index ddf7707f0..0218050e0 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/ports.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/ports.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package generate @@ -11,12 +10,11 @@ import ( "github.com/containers/common/libimage" "github.com/containers/common/libnetwork/types" - "github.com/containers/podman/v4/utils" - - "github.com/containers/common/pkg/util" - "github.com/containers/podman/v4/pkg/specgen" - "github.com/containers/podman/v4/pkg/specgenutil" + "github.com/containers/podman/v5/pkg/specgen" + "github.com/containers/podman/v5/pkg/specgenutil" + "github.com/containers/podman/v5/utils" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" ) const ( @@ -333,7 +331,7 @@ func ParsePortMapping(portMappings []types.PortMapping, exposePorts map[uint16][ func appendProtocolsNoDuplicates(slice []string, protocols []string) []string { for _, proto := range protocols { - if util.StringInSlice(proto, slice) { + if slices.Contains(slice, proto) { continue } slice = append(slice, proto) @@ -367,7 +365,7 @@ func createPortMappings(s *specgen.SpecGenerator, imageData *libimage.ImageData) } publishPorts := toExpose - if !s.PublishExposedPorts { + if s.PublishExposedPorts == nil || !*s.PublishExposedPorts { publishPorts = nil } diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/security_freebsd.go b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/security_freebsd.go similarity index 80% rename from vendor/github.com/containers/podman/v4/pkg/specgen/generate/security_freebsd.go rename to vendor/github.com/containers/podman/v5/pkg/specgen/generate/security_freebsd.go index 746fb3d2c..948acd3f5 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/security_freebsd.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/security_freebsd.go @@ -1,13 +1,12 @@ //go:build !remote -// +build !remote package generate import ( "github.com/containers/common/libimage" "github.com/containers/common/pkg/config" - "github.com/containers/podman/v4/libpod" - "github.com/containers/podman/v4/pkg/specgen" + "github.com/containers/podman/v5/libpod" + "github.com/containers/podman/v5/pkg/specgen" "github.com/opencontainers/runtime-tools/generate" ) @@ -19,7 +18,7 @@ func setLabelOpts(s *specgen.SpecGenerator, runtime *libpod.Runtime, pidConfig s func securityConfigureGenerator(s *specgen.SpecGenerator, g *generate.Generator, newImage *libimage.Image, rtc *config.Config) error { // If this is a privileged container, change the devfs ruleset to expose all devices. - if s.Privileged { + if s.IsPrivileged() { for k, m := range g.Config.Mounts { if m.Type == "devfs" { m.Options = []string{ @@ -30,7 +29,9 @@ func securityConfigureGenerator(s *specgen.SpecGenerator, g *generate.Generator, } } - g.SetRootReadonly(s.ReadOnlyFilesystem) + if s.ReadOnlyFilesystem != nil { + g.SetRootReadonly(*s.ReadOnlyFilesystem) + } return nil } diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/security_linux.go b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/security_linux.go similarity index 89% rename from vendor/github.com/containers/podman/v4/pkg/specgen/generate/security_linux.go rename to vendor/github.com/containers/podman/v5/pkg/specgen/generate/security_linux.go index e266fa509..a04508586 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/security_linux.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/security_linux.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package generate @@ -11,20 +10,20 @@ import ( "github.com/containers/common/pkg/apparmor" "github.com/containers/common/pkg/capabilities" "github.com/containers/common/pkg/config" - cutil "github.com/containers/common/pkg/util" - "github.com/containers/podman/v4/libpod" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/specgen" - "github.com/containers/podman/v4/pkg/util" + "github.com/containers/podman/v5/libpod" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/specgen" + "github.com/containers/podman/v5/pkg/util" "github.com/opencontainers/runtime-tools/generate" "github.com/opencontainers/selinux/go-selinux/label" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" ) // setLabelOpts sets the label options of the SecurityConfig according to the // input. func setLabelOpts(s *specgen.SpecGenerator, runtime *libpod.Runtime, pidConfig specgen.Namespace, ipcConfig specgen.Namespace) error { - if !runtime.EnableLabeling() || s.Privileged { + if !runtime.EnableLabeling() || s.IsPrivileged() { s.SelinuxOpts = label.DisableSecOpt() return nil } @@ -71,7 +70,7 @@ func setupApparmor(s *specgen.SpecGenerator, rtc *config.Config, g *generate.Gen return nil } // If privileged and caller did not specify apparmor profiles return - if s.Privileged && !hasProfile { + if s.IsPrivileged() && !hasProfile { return nil } if !hasProfile { @@ -91,7 +90,7 @@ func securityConfigureGenerator(s *specgen.SpecGenerator, g *generate.Generator, ) // HANDLE CAPABILITIES // NOTE: Must happen before SECCOMP - if s.Privileged { + if s.IsPrivileged() { g.SetupPrivileged(true) caplist, err = capabilities.BoundingSet() if err != nil { @@ -124,13 +123,13 @@ func securityConfigureGenerator(s *specgen.SpecGenerator, g *generate.Generator, // capabilities, required to run the container. var capsRequiredRequested []string for key, val := range s.Labels { - if cutil.StringInSlice(key, capabilities.ContainerImageLabels) { + if slices.Contains(capabilities.ContainerImageLabels, key) { capsRequiredRequested = strings.Split(val, ",") } } - if !s.Privileged && len(capsRequiredRequested) == 1 && capsRequiredRequested[0] == "" { + if !s.IsPrivileged() && len(capsRequiredRequested) == 1 && capsRequiredRequested[0] == "" { caplist = []string{} - } else if !s.Privileged && len(capsRequiredRequested) > 0 { + } else if !s.IsPrivileged() && len(capsRequiredRequested) > 0 { // Pass capRequiredRequested in CapAdd field to normalize capabilities names capsRequired, err := capabilities.MergeCapabilities(nil, capsRequiredRequested, nil) if err != nil { @@ -138,7 +137,7 @@ func securityConfigureGenerator(s *specgen.SpecGenerator, g *generate.Generator, } // Verify all capRequired are in the capList for _, cap := range capsRequired { - if !cutil.StringInSlice(cap, caplist) { + if !slices.Contains(caplist, cap) { privCapsRequired = append(privCapsRequired, cap) } } @@ -193,7 +192,9 @@ func securityConfigureGenerator(s *specgen.SpecGenerator, g *generate.Generator, } } - g.SetProcessNoNewPrivileges(s.NoNewPrivileges) + if s.NoNewPrivileges != nil { + g.SetProcessNoNewPrivileges(*s.NoNewPrivileges) + } if err := setupApparmor(s, rtc, g); err != nil { return err @@ -210,11 +211,13 @@ func securityConfigureGenerator(s *specgen.SpecGenerator, g *generate.Generator, // Clear default Seccomp profile from Generator for unconfined containers // and privileged containers which do not specify a seccomp profile. - if s.SeccompProfilePath == "unconfined" || (s.Privileged && (s.SeccompProfilePath == "" || s.SeccompProfilePath == config.SeccompOverridePath || s.SeccompProfilePath == config.SeccompDefaultPath)) { + if s.SeccompProfilePath == "unconfined" || (s.IsPrivileged() && (s.SeccompProfilePath == "" || s.SeccompProfilePath == config.SeccompOverridePath || s.SeccompProfilePath == config.SeccompDefaultPath)) { configSpec.Linux.Seccomp = nil } - g.SetRootReadonly(s.ReadOnlyFilesystem) + if s.ReadOnlyFilesystem != nil { + g.SetRootReadonly(*s.ReadOnlyFilesystem) + } noUseIPC := s.IpcNS.NSMode == specgen.FromContainer || s.IpcNS.NSMode == specgen.FromPod || s.IpcNS.NSMode == specgen.Host noUseNet := s.NetNS.NSMode == specgen.FromContainer || s.NetNS.NSMode == specgen.FromPod || s.NetNS.NSMode == specgen.Host diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/storage.go b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/storage.go similarity index 96% rename from vendor/github.com/containers/podman/v4/pkg/specgen/generate/storage.go rename to vendor/github.com/containers/podman/v5/pkg/specgen/generate/storage.go index 4a7c77636..9ff8700cd 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/storage.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/storage.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package generate @@ -15,10 +14,10 @@ import ( "github.com/containers/common/libimage" "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/parse" - "github.com/containers/podman/v4/libpod" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/specgen" - "github.com/containers/podman/v4/pkg/util" + "github.com/containers/podman/v5/libpod" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/specgen" + "github.com/containers/podman/v5/pkg/util" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" ) @@ -129,7 +128,7 @@ func finalizeMounts(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Ru } // If requested, add container init binary - if s.Init { + if s.Init != nil && *s.Init { initPath := s.InitPath if initPath == "" { initPath, err = rtc.FindInitBinary() @@ -181,7 +180,7 @@ func finalizeMounts(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Ru } } - if s.ReadWriteTmpfs { + if s.ReadWriteTmpfs != nil && *s.ReadWriteTmpfs { runPath, err := imageRunPath(ctx, img) if err != nil { return nil, nil, nil, err @@ -263,9 +262,9 @@ func getVolumesFrom(volumesFrom []string, runtime *libpod.Runtime) (map[string]s for _, volume := range volumesFrom { var options []string - splitVol := strings.SplitN(volume, ":", 2) - if len(splitVol) == 2 { - splitOpts := strings.Split(splitVol[1], ",") + idOrName, volOpts, hasVolOpts := strings.Cut(volume, ":") + if hasVolOpts { + splitOpts := strings.Split(volOpts, ",") setRORW := false setZ := false for _, opt := range splitOpts { @@ -287,9 +286,9 @@ func getVolumesFrom(volumesFrom []string, runtime *libpod.Runtime) (map[string]s options = splitOpts } - ctr, err := runtime.LookupContainer(splitVol[0]) + ctr, err := runtime.LookupContainer(idOrName) if err != nil { - return nil, nil, fmt.Errorf("looking up container %q for volumes-from: %w", splitVol[0], err) + return nil, nil, fmt.Errorf("looking up container %q for volumes-from: %w", idOrName, err) } logrus.Debugf("Adding volumes from container %s", ctr.ID()) diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/storage_freebsd.go b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/storage_freebsd.go similarity index 95% rename from vendor/github.com/containers/podman/v4/pkg/specgen/generate/storage_freebsd.go rename to vendor/github.com/containers/podman/v5/pkg/specgen/generate/storage_freebsd.go index e2d211a2a..9c496e3eb 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/storage_freebsd.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/storage_freebsd.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package generate diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/storage_linux.go b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/storage_linux.go similarity index 91% rename from vendor/github.com/containers/podman/v4/pkg/specgen/generate/storage_linux.go rename to vendor/github.com/containers/podman/v5/pkg/specgen/generate/storage_linux.go index 86f6d51bb..8336dd59b 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/storage_linux.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/storage_linux.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package generate diff --git a/vendor/github.com/containers/podman/v5/pkg/specgen/generate/validate_freebsd.go b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/validate_freebsd.go new file mode 100644 index 000000000..4e6da178a --- /dev/null +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/validate_freebsd.go @@ -0,0 +1,8 @@ +package generate + +import "github.com/containers/podman/v5/pkg/specgen" + +// verifyContainerResources does nothing on freebsd as it has no cgroups +func verifyContainerResources(s *specgen.SpecGenerator) ([]string, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/validate.go b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/validate_linux.go similarity index 98% rename from vendor/github.com/containers/podman/v4/pkg/specgen/generate/validate.go rename to vendor/github.com/containers/podman/v5/pkg/specgen/generate/validate_linux.go index 858fcbfc8..25c249613 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/validate.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/generate/validate_linux.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package generate @@ -12,9 +11,8 @@ import ( "github.com/containers/common/pkg/cgroups" "github.com/containers/common/pkg/sysinfo" - "github.com/containers/podman/v4/pkg/rootless" - "github.com/containers/podman/v4/pkg/specgen" - "github.com/containers/podman/v4/utils" + "github.com/containers/podman/v5/pkg/rootless" + "github.com/containers/podman/v5/pkg/specgen" "github.com/opencontainers/runtime-spec/specs-go" ) @@ -179,7 +177,7 @@ func verifyContainerResourcesCgroupV2(s *specgen.SpecGenerator) ([]string, error // Memory checks if s.ResourceLimits.Memory != nil && s.ResourceLimits.Memory.Swap != nil { - own, err := utils.GetOwnCgroup() + own, err := cgroups.GetOwnCgroup() if err != nil { return warnings, err } diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/namespaces.go b/vendor/github.com/containers/podman/v5/pkg/specgen/namespaces.go similarity index 77% rename from vendor/github.com/containers/podman/v4/pkg/specgen/namespaces.go rename to vendor/github.com/containers/podman/v5/pkg/specgen/namespaces.go index 94c96794a..4ff329922 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/namespaces.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/namespaces.go @@ -9,14 +9,14 @@ import ( "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/cgroups" - cutil "github.com/containers/common/pkg/util" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/namespaces" - "github.com/containers/podman/v4/pkg/rootless" - "github.com/containers/podman/v4/pkg/util" - "github.com/containers/storage" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/namespaces" + "github.com/containers/podman/v5/pkg/rootless" + "github.com/containers/podman/v5/pkg/util" + storageTypes "github.com/containers/storage/types" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" + "golang.org/x/exp/slices" ) type NamespaceMode string @@ -230,29 +230,23 @@ func (n *Namespace) validate() error { // function. func ParseNamespace(ns string) (Namespace, error) { toReturn := Namespace{} - switch { - case ns == "pod": + switch ns { + case "pod": toReturn.NSMode = FromPod - case ns == "host": + case "host": toReturn.NSMode = Host - case ns == "private", ns == "": + case "private", "": toReturn.NSMode = Private - case strings.HasPrefix(ns, "ns:"): - split := strings.SplitN(ns, ":", 2) - if len(split) != 2 { - return toReturn, fmt.Errorf("must provide a path to a namespace when specifying \"ns:\"") - } - toReturn.NSMode = Path - toReturn.Value = split[1] - case strings.HasPrefix(ns, "container:"): - split := strings.SplitN(ns, ":", 2) - if len(split) != 2 { - return toReturn, fmt.Errorf("must provide name or ID or a container when specifying \"container:\"") - } - toReturn.NSMode = FromContainer - toReturn.Value = split[1] default: - return toReturn, fmt.Errorf("unrecognized namespace mode %s passed", ns) + if value, ok := strings.CutPrefix(ns, "ns:"); ok { + toReturn.NSMode = Path + toReturn.Value = value + } else if value, ok := strings.CutPrefix(ns, "container:"); ok { + toReturn.NSMode = FromContainer + toReturn.Value = value + } else { + return toReturn, fmt.Errorf("unrecognized namespace mode %s passed", ns) + } } return toReturn, nil @@ -302,43 +296,37 @@ func ParseIPCNamespace(ns string) (Namespace, error) { // form. func ParseUserNamespace(ns string) (Namespace, error) { toReturn := Namespace{} - switch { - case ns == "auto": + switch ns { + case "auto": toReturn.NSMode = Auto return toReturn, nil - case strings.HasPrefix(ns, "auto:"): - split := strings.SplitN(ns, ":", 2) - if len(split) != 2 { - return toReturn, errors.New("invalid setting for auto: mode") - } - toReturn.NSMode = Auto - toReturn.Value = split[1] - return toReturn, nil - case ns == "keep-id": + case "keep-id": toReturn.NSMode = KeepID return toReturn, nil - case strings.HasPrefix(ns, "keep-id:"): - split := strings.SplitN(ns, ":", 2) - if len(split) != 2 { - return toReturn, errors.New("invalid setting for keep-id: mode") - } - toReturn.NSMode = KeepID - toReturn.Value = split[1] - return toReturn, nil - case ns == "nomap": + case "nomap": toReturn.NSMode = NoMap return toReturn, nil - case ns == "": + case "": toReturn.NSMode = Host return toReturn, nil + default: + if value, ok := strings.CutPrefix(ns, "auto:"); ok { + toReturn.NSMode = Auto + toReturn.Value = value + return toReturn, nil + } else if value, ok := strings.CutPrefix(ns, "keep-id:"); ok { + toReturn.NSMode = KeepID + toReturn.Value = value + return toReturn, nil + } else { + return ParseNamespace(ns) + } } - return ParseNamespace(ns) } // ParseNetworkFlag parses a network string slice into the network options // If the input is nil or empty it will use the default setting from containers.conf -// TODO (5.0): Drop pastaNetworkNameExists -func ParseNetworkFlag(networks []string, pastaNetworkNameExists bool) (Namespace, map[string]types.PerNetworkOptions, map[string][]string, error) { +func ParseNetworkFlag(networks []string) (Namespace, map[string]types.PerNetworkOptions, map[string][]string, error) { var networkOptions map[string][]string // by default we try to use the containers.conf setting // if we get at least one value use this instead @@ -352,10 +340,10 @@ func ParseNetworkFlag(networks []string, pastaNetworkNameExists bool) (Namespace switch { case ns == string(Slirp), strings.HasPrefix(ns, string(Slirp)+":"): - parts := strings.SplitN(ns, ":", 2) - if len(parts) > 1 { + key, options, hasOptions := strings.Cut(ns, ":") + if hasOptions { networkOptions = make(map[string][]string) - networkOptions[parts[0]] = strings.Split(parts[1], ",") + networkOptions[key] = strings.Split(options, ",") } toReturn.NSMode = Slirp case ns == string(FromPod): @@ -364,11 +352,11 @@ func ParseNetworkFlag(networks []string, pastaNetworkNameExists bool) (Namespace toReturn.NSMode = Private case ns == string(Bridge), strings.HasPrefix(ns, string(Bridge)+":"): toReturn.NSMode = Bridge - parts := strings.SplitN(ns, ":", 2) + _, options, hasOptions := strings.Cut(ns, ":") netOpts := types.PerNetworkOptions{} - if len(parts) > 1 { + if hasOptions { var err error - netOpts, err = parseBridgeNetworkOptions(parts[1]) + netOpts, err = parseBridgeNetworkOptions(options) if err != nil { return toReturn, nil, nil, err } @@ -381,53 +369,38 @@ func ParseNetworkFlag(networks []string, pastaNetworkNameExists bool) (Namespace case ns == string(Host): toReturn.NSMode = Host case strings.HasPrefix(ns, "ns:"): - split := strings.SplitN(ns, ":", 2) - if len(split) != 2 { - return toReturn, nil, nil, errors.New("must provide a path to a namespace when specifying \"ns:\"") - } + _, value, _ := strings.Cut(ns, ":") toReturn.NSMode = Path - toReturn.Value = split[1] + toReturn.Value = value case strings.HasPrefix(ns, string(FromContainer)+":"): - split := strings.SplitN(ns, ":", 2) - if len(split) != 2 { - return toReturn, nil, nil, errors.New("must provide name or ID or a container when specifying \"container:\"") - } + _, value, _ := strings.Cut(ns, ":") toReturn.NSMode = FromContainer - toReturn.Value = split[1] + toReturn.Value = value case ns == string(Pasta), strings.HasPrefix(ns, string(Pasta)+":"): - var parts []string - - if pastaNetworkNameExists { - goto nextCase - } - - parts = strings.SplitN(ns, ":", 2) - if len(parts) > 1 { + key, options, hasOptions := strings.Cut(ns, ":") + if hasOptions { networkOptions = make(map[string][]string) - networkOptions[parts[0]] = strings.Split(parts[1], ",") + networkOptions[key] = strings.Split(options, ",") } toReturn.NSMode = Pasta - break - nextCase: - fallthrough default: // we should have a normal network - parts := strings.SplitN(ns, ":", 2) - if len(parts) == 1 { + name, options, hasOptions := strings.Cut(ns, ":") + if hasOptions { + if name == "" { + return toReturn, nil, nil, errors.New("network name cannot be empty") + } + netOpts, err := parseBridgeNetworkOptions(options) + if err != nil { + return toReturn, nil, nil, fmt.Errorf("invalid option for network %s: %w", name, err) + } + podmanNetworks[name] = netOpts + } else { // Assume we have been given a comma separated list of networks for backwards compat. networkList := strings.Split(ns, ",") for _, net := range networkList { podmanNetworks[net] = types.PerNetworkOptions{} } - } else { - if parts[0] == "" { - return toReturn, nil, nil, errors.New("network name cannot be empty") - } - netOpts, err := parseBridgeNetworkOptions(parts[1]) - if err != nil { - return toReturn, nil, nil, fmt.Errorf("invalid option for network %s: %w", parts[0], err) - } - podmanNetworks[parts[0]] = netOpts } // networks need bridge mode @@ -440,24 +413,23 @@ func ParseNetworkFlag(networks []string, pastaNetworkNameExists bool) (Namespace } for _, network := range networks[1:] { - parts := strings.SplitN(network, ":", 2) - if parts[0] == "" { + name, options, hasOptions := strings.Cut(network, ":") + if name == "" { return toReturn, nil, nil, fmt.Errorf("network name cannot be empty: %w", define.ErrInvalidArg) } - // TODO (5.0): Don't accept string(Pasta) here once we drop pastaNetworkNameExists - if cutil.StringInSlice(parts[0], []string{string(Bridge), string(Slirp), string(FromPod), string(NoNetwork), - string(Default), string(Private), string(Path), string(FromContainer), string(Host)}) { - return toReturn, nil, nil, fmt.Errorf("can only set extra network names, selected mode %s conflicts with bridge: %w", parts[0], define.ErrInvalidArg) + if slices.Contains([]string{string(Bridge), string(Slirp), string(Pasta), string(FromPod), string(NoNetwork), + string(Default), string(Private), string(Path), string(FromContainer), string(Host)}, name) { + return toReturn, nil, nil, fmt.Errorf("can only set extra network names, selected mode %s conflicts with bridge: %w", name, define.ErrInvalidArg) } netOpts := types.PerNetworkOptions{} - if len(parts) > 1 { + if hasOptions { var err error - netOpts, err = parseBridgeNetworkOptions(parts[1]) + netOpts, err = parseBridgeNetworkOptions(options) if err != nil { - return toReturn, nil, nil, fmt.Errorf("invalid option for network %s: %w", parts[0], err) + return toReturn, nil, nil, fmt.Errorf("invalid option for network %s: %w", name, err) } } - podmanNetworks[parts[0]] = netOpts + podmanNetworks[name] = netOpts } } @@ -471,42 +443,42 @@ func parseBridgeNetworkOptions(opts string) (types.PerNetworkOptions, error) { } allopts := strings.Split(opts, ",") for _, opt := range allopts { - split := strings.SplitN(opt, "=", 2) - switch split[0] { + name, value, _ := strings.Cut(opt, "=") + switch name { case "ip", "ip6": - ip := net.ParseIP(split[1]) + ip := net.ParseIP(value) if ip == nil { - return netOpts, fmt.Errorf("invalid ip address %q", split[1]) + return netOpts, fmt.Errorf("invalid ip address %q", value) } netOpts.StaticIPs = append(netOpts.StaticIPs, ip) case "mac": - mac, err := net.ParseMAC(split[1]) + mac, err := net.ParseMAC(value) if err != nil { return netOpts, err } netOpts.StaticMAC = types.HardwareAddr(mac) case "alias": - if split[1] == "" { + if value == "" { return netOpts, errors.New("alias cannot be empty") } - netOpts.Aliases = append(netOpts.Aliases, split[1]) + netOpts.Aliases = append(netOpts.Aliases, value) case "interface_name": - if split[1] == "" { + if value == "" { return netOpts, errors.New("interface_name cannot be empty") } - netOpts.InterfaceName = split[1] + netOpts.InterfaceName = value default: - return netOpts, fmt.Errorf("unknown bridge network option: %s", split[0]) + return netOpts, fmt.Errorf("unknown bridge network option: %s", name) } } return netOpts, nil } -func SetupUserNS(idmappings *storage.IDMappingOptions, userns Namespace, g *generate.Generator) (string, error) { +func SetupUserNS(idmappings *storageTypes.IDMappingOptions, userns Namespace, g *generate.Generator) (string, error) { // User var user string switch userns.NSMode { @@ -562,7 +534,7 @@ func SetupUserNS(idmappings *storage.IDMappingOptions, userns Namespace, g *gene return user, nil } -func privateUserNamespace(idmappings *storage.IDMappingOptions, g *generate.Generator) error { +func privateUserNamespace(idmappings *storageTypes.IDMappingOptions, g *generate.Generator) error { if err := g.AddOrReplaceLinuxNamespace(string(spec.UserNamespace), ""); err != nil { return err } diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/pod_validate.go b/vendor/github.com/containers/podman/v5/pkg/specgen/pod_validate.go similarity index 98% rename from vendor/github.com/containers/podman/v4/pkg/specgen/pod_validate.go rename to vendor/github.com/containers/podman/v5/pkg/specgen/pod_validate.go index 1c0413cd5..2f1c874ae 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/pod_validate.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/pod_validate.go @@ -4,7 +4,7 @@ import ( "errors" "fmt" - "github.com/containers/podman/v4/pkg/util" + "github.com/containers/podman/v5/pkg/util" ) var ( diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/podspecgen.go b/vendor/github.com/containers/podman/v5/pkg/specgen/podspecgen.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/specgen/podspecgen.go rename to vendor/github.com/containers/podman/v5/pkg/specgen/podspecgen.go diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/resources_freebsd.go b/vendor/github.com/containers/podman/v5/pkg/specgen/resources_freebsd.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/specgen/resources_freebsd.go rename to vendor/github.com/containers/podman/v5/pkg/specgen/resources_freebsd.go diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/resources_linux.go b/vendor/github.com/containers/podman/v5/pkg/specgen/resources_linux.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/specgen/resources_linux.go rename to vendor/github.com/containers/podman/v5/pkg/specgen/resources_linux.go diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/specgen.go b/vendor/github.com/containers/podman/v5/pkg/specgen/specgen.go similarity index 88% rename from vendor/github.com/containers/podman/v4/pkg/specgen/specgen.go rename to vendor/github.com/containers/podman/v5/pkg/specgen/specgen.go index 9568ddc04..00a208921 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/specgen.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/specgen.go @@ -8,7 +8,7 @@ import ( nettypes "github.com/containers/common/libnetwork/types" "github.com/containers/image/v5/manifest" - "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v5/libpod/define" "github.com/containers/storage/types" spec "github.com/opencontainers/runtime-spec/specs-go" ) @@ -52,20 +52,21 @@ type ContainerBasicConfig struct { Command []string `json:"command,omitempty"` // EnvHost indicates that the host environment should be added to container // Optional. - EnvHost bool `json:"env_host,omitempty"` + EnvHost *bool `json:"env_host,omitempty"` // EnvHTTPProxy indicates that the http host proxy environment variables // should be added to container // Optional. - HTTPProxy bool `json:"httpproxy,omitempty"` + HTTPProxy *bool `json:"httpproxy,omitempty"` // Env is a set of environment variables that will be set in the // container. // Optional. Env map[string]string `json:"env,omitempty"` // Terminal is whether the container will create a PTY. // Optional. - Terminal bool `json:"terminal,omitempty"` + Terminal *bool `json:"terminal,omitempty"` // Stdin is whether the container will keep its STDIN open. - Stdin bool `json:"stdin,omitempty"` + // Optional. + Stdin *bool `json:"stdin,omitempty"` // Labels are key-value pairs that are used to add metadata to // containers. // Optional. @@ -90,6 +91,7 @@ type ContainerBasicConfig struct { // Timeout is a maximum time in seconds the container will run before // main process is sent SIGKILL. // If 0 is used, signal will not be sent. Container can run indefinitely + // if they do not stop after the default termination signal. // Optional. Timeout uint `json:"timeout,omitempty"` // LogConfiguration describes the logging for a container including @@ -101,15 +103,6 @@ type ContainerBasicConfig struct { // If not given, a default location will be used. // Optional. ConmonPidFile string `json:"conmon_pid_file,omitempty"` - // RawImageName is the user-specified and unprocessed input referring - // to a local or a remote image. - RawImageName string `json:"raw_image_name,omitempty"` - // ImageOS is the user-specified image OS - ImageOS string `json:"image_os,omitempty"` - // ImageArch is the user-specified image architecture - ImageArch string `json:"image_arch,omitempty"` - // ImageVariant is the user-specified image variant - ImageVariant string `json:"image_variant,omitempty"` // RestartPolicy is the container's restart policy - an action which // will be taken when the container exits. // If not given, the default policy, which does nothing, will be used. @@ -140,10 +133,8 @@ type ContainerBasicConfig struct { // "container" - let the OCI runtime deal with it, advertise conmon's MAINPID // "conmon-only" - advertise conmon's MAINPID, send READY when started, don't pass to OCI // "ignore" - unset NOTIFY_SOCKET - SdNotifyMode string `json:"sdnotifyMode,omitempty"` - // Namespace is the libpod namespace the container will be placed in. // Optional. - Namespace string `json:"namespace,omitempty"` + SdNotifyMode string `json:"sdnotifyMode,omitempty"` // PidNS is the container's PID namespace. // It defaults to private. // Mandatory. @@ -165,8 +156,9 @@ type ContainerBasicConfig struct { // Sysctl sets kernel parameters for the container Sysctl map[string]string `json:"sysctl,omitempty"` // Remove indicates if the container should be removed once it has been started - // and exits - Remove bool `json:"remove,omitempty"` + // and exits. + // Optional. + Remove *bool `json:"remove,omitempty"` // ContainerCreateCommand is the command that was used to create this // container. // This will be shown in the output of Inspect() on the container, and @@ -180,6 +172,11 @@ type ContainerBasicConfig struct { // set tags as `json:"-"` for not supported remote // Optional. PreserveFDs uint `json:"-"` + // PreserveFD is a list of additional file descriptors (in addition + // to 0, 1, 2) that will be passed to the executed process. + // set tags as `json:"-"` for not supported remote + // Optional. + PreserveFD []uint `json:"-"` // Timezone is the timezone inside the container. // Local means it has the same timezone as the host machine // Optional. @@ -189,23 +186,26 @@ type ContainerBasicConfig struct { // container. Dependencies can be specified by name or full/partial ID. // Optional. DependencyContainers []string `json:"dependencyContainers,omitempty"` - // PidFile is the file that saves container process id. - // set tags as `json:"-"` for not supported remote + // PidFile is the file that saves container's PID. + // Not supported for remote clients, so not serialized in specgen JSON. // Optional. PidFile string `json:"-"` // EnvSecrets are secrets that will be set as environment variables // Optional. EnvSecrets map[string]string `json:"secret_env,omitempty"` // InitContainerType describes if this container is an init container - // and if so, what type: always or once + // and if so, what type: always or once. + // Optional. InitContainerType string `json:"init_container_type"` // Personality allows users to configure different execution domains. // Execution domains tell Linux how to map signal numbers into signal actions. // The execution domain system allows Linux to provide limited support // for binaries compiled under other UNIX-like operating systems. + // Optional. Personality *spec.LinuxPersonality `json:"personality,omitempty"` // EnvMerge takes the specified environment variables from image and preprocess them before injecting them into the // container. + // Optional. EnvMerge []string `json:"envmerge,omitempty"` // UnsetEnv unsets the specified default environment variables from the image or from buildin or containers.conf // Optional. @@ -213,12 +213,14 @@ type ContainerBasicConfig struct { // UnsetEnvAll unsetall default environment variables from the image or from buildin or containers.conf // UnsetEnvAll unsets all default environment variables from the image or from buildin // Optional. - UnsetEnvAll bool `json:"unsetenvall,omitempty"` + UnsetEnvAll *bool `json:"unsetenvall,omitempty"` // Passwd is a container run option that determines if we are validating users/groups before running the container Passwd *bool `json:"manage_password,omitempty"` - // PasswdEntry specifies arbitrary data to append to a file. + // PasswdEntry specifies an arbitrary string to append to the container's /etc/passwd file. + // Optional. PasswdEntry string `json:"passwd_entry,omitempty"` - // GroupEntry specifies arbitrary data to append to a file. + // GroupEntry specifies an arbitrary string to append to the container's /etc/group file. + // Optional. GroupEntry string `json:"group_entry,omitempty"` } @@ -231,15 +233,33 @@ type ContainerStorageConfig struct { // Conflicts with Rootfs. // At least one of Image or Rootfs must be specified. Image string `json:"image"` + // RawImageName is the user-specified and unprocessed input referring + // to a local or a remote image. + // Optional, but strongly encouraged to be set if Image is set. + RawImageName string `json:"raw_image_name,omitempty"` + // ImageOS is the user-specified OS of the image. + // Used to select a different variant from a manifest list. + // Optional. + ImageOS string `json:"image_os,omitempty"` + // ImageArch is the user-specified image architecture. + // Used to select a different variant from a manifest list. + // Optional. + ImageArch string `json:"image_arch,omitempty"` + // ImageVariant is the user-specified image variant. + // Used to select a different variant from a manifest list. + // Optional. + ImageVariant string `json:"image_variant,omitempty"` // Rootfs is the path to a directory that will be used as the // container's root filesystem. No modification will be made to the // directory, it will be directly mounted into the container as root. // Conflicts with Image. // At least one of Image or Rootfs must be specified. Rootfs string `json:"rootfs,omitempty"` - // RootfsOverlay tells if rootfs is actually an overlay on top of base path - RootfsOverlay bool `json:"rootfs_overlay,omitempty"` - // RootfsMapping specifies if there are mappings to apply to the rootfs. + // RootfsOverlay tells if rootfs is actually an overlay on top of base path. + // Optional. + RootfsOverlay *bool `json:"rootfs_overlay,omitempty"` + // RootfsMapping specifies if there are UID/GID mappings to apply to the rootfs. + // Optional. RootfsMapping *string `json:"rootfs_mapping,omitempty"` // ImageVolumeMode indicates how image volumes will be created. // Supported modes are "ignore" (do not create), "tmpfs" (create as @@ -252,10 +272,12 @@ type ContainerStorageConfig struct { // may optionally be followed by a : and then one or more // comma-separated options. Valid options are 'ro', 'rw', and 'z'. // Options will be used for all volumes sourced from the container. + // Optional. VolumesFrom []string `json:"volumes_from,omitempty"` // Init specifies that an init binary will be mounted into the // container, and will be used as PID1. - Init bool `json:"init,omitempty"` + // Optional. + Init *bool `json:"init,omitempty"` // InitPath specifies the path to the init binary that will be added if // Init is specified above. If not specified, the default set in the // Libpod config will be used. Ignored if Init above is not set. @@ -283,7 +305,8 @@ type ContainerStorageConfig struct { // DeviceCgroupRule are device cgroup rules that allow containers // to use additional types of devices. DeviceCgroupRule []spec.LinuxDeviceCgroup `json:"device_cgroup_rule,omitempty"` - // DevicesFrom is a way to ensure your container inherits device specific information from another container + // DevicesFrom specifies that this container will mount the device(s) from other container(s). + // Optional. DevicesFrom []string `json:"devices_from,omitempty"` // HostDeviceList is used to recreate the mounted device on inherited containers HostDeviceList []spec.LinuxDevice `json:"host_device_list,omitempty"` @@ -307,7 +330,7 @@ type ContainerStorageConfig struct { // Create the working directory if it doesn't exist. // If unset, it doesn't create it. // Optional. - CreateWorkingDir bool `json:"create_working_dir,omitempty"` + CreateWorkingDir *bool `json:"create_working_dir,omitempty"` // StorageOpts is the container's storage options // Optional. StorageOpts map[string]string `json:"storage_opts,omitempty"` @@ -320,10 +343,12 @@ type ContainerStorageConfig struct { Secrets []Secret `json:"secrets,omitempty"` // Volatile specifies whether the container storage can be optimized // at the cost of not syncing all the dirty files in memory. - Volatile bool `json:"volatile,omitempty"` + // Optional. + Volatile *bool `json:"volatile,omitempty"` // ChrootDirs is an additional set of directories that need to be // treated as root directories. Standard bind mounts will be mounted // into paths relative to these directories. + // Optional. ChrootDirs []string `json:"chroot_directories,omitempty"` } @@ -338,7 +363,8 @@ type ContainerSecurityConfig struct { // (Though SELinux can be manually re-enabled). // TODO: this conflicts with things. // TODO: this does more. - Privileged bool `json:"privileged,omitempty"` + // Optional. + Privileged *bool `json:"privileged,omitempty"` // User is the user the container will be run as. // Can be given as a UID or a username; if a username, it will be // resolved within the container, using the container's /etc/passwd. @@ -377,7 +403,8 @@ type ContainerSecurityConfig struct { // NoNewPrivileges is whether the container will set the no new // privileges flag on create, which disables gaining additional // privileges (e.g. via setuid) in the container. - NoNewPrivileges bool `json:"no_new_privileges,omitempty"` + // Optional. + NoNewPrivileges *bool `json:"no_new_privileges,omitempty"` // UserNS is the container's user namespace. // It defaults to host, indicating that no user namespace will be // created. @@ -389,15 +416,18 @@ type ContainerSecurityConfig struct { // Required if UserNS is private. IDMappings *types.IDMappingOptions `json:"idmappings,omitempty"` // ReadOnlyFilesystem indicates that everything will be mounted - // as read-only - ReadOnlyFilesystem bool `json:"read_only_filesystem,omitempty"` + // as read-only. + // Optional. + ReadOnlyFilesystem *bool `json:"read_only_filesystem,omitempty"` // ReadWriteTmpfs indicates that when running with a ReadOnlyFilesystem - // mount temporary file systems - ReadWriteTmpfs bool `json:"read_write_tmpfs,omitempty"` + // mount temporary file systems. + // Optional. + ReadWriteTmpfs *bool `json:"read_write_tmpfs,omitempty"` // LabelNested indicates whether or not the container is allowed to - // run fully nested containers including labelling - LabelNested bool `json:"label_nested,omitempty"` + // run fully nested containers including SELinux labelling. + // Optional. + LabelNested *bool `json:"label_nested,omitempty"` // Umask is the umask the init process of the container will be run with. Umask string `json:"umask,omitempty"` @@ -407,8 +437,10 @@ type ContainerSecurityConfig struct { // given in addition to the default list. // Optional Mask []string `json:"mask,omitempty"` - // Unmask is the path we want to unmask in the container. To override - // all the default paths that are masked, set unmask=ALL. + // Unmask a path in the container. Some paths are masked by default, + // preventing them from being accessed within the container; this undoes + // that masking. If ALL is passed, all paths will be unmasked. + // Optional. Unmask []string `json:"unmask,omitempty"` } @@ -419,8 +451,9 @@ type ContainerCgroupConfig struct { // It defaults to private. // Mandatory. CgroupNS Namespace `json:"cgroupns,omitempty"` - // CgroupsMode sets a policy for how cgroups will be created in the + // CgroupsMode sets a policy for how cgroups will be created for the // container, including the ability to disable creation entirely. + // Optional. CgroupsMode string `json:"cgroups_mode,omitempty"` // CgroupParent is the container's Cgroup parent. // If not set, the default for the current cgroup driver will be used. @@ -444,7 +477,8 @@ type ContainerNetworkConfig struct { // This is based on ports set in Expose below, and any ports specified // by the Image (if one is given). // Only available if NetNS is set to Bridge or Slirp. - PublishExposedPorts bool `json:"publish_image_ports,omitempty"` + // Optional. + PublishExposedPorts *bool `json:"publish_image_ports,omitempty"` // Expose is a number of ports that will be forwarded to the container // if PublishExposedPorts is set. // Expose is a map of uint16 (port number) to a string representing @@ -461,6 +495,7 @@ type ContainerNetworkConfig struct { // network interface name for this container on the specific network. // If the map is empty and the bridge network mode is set the container // will be joined to the default network. + // Optional. Networks map[string]nettypes.PerNetworkOptions // CNINetworks is a list of CNI networks to join the container to. // If this list is empty, the default CNI network will be joined @@ -473,7 +508,8 @@ type ContainerNetworkConfig struct { // UseImageResolvConf indicates that resolv.conf should not be managed // by Podman, but instead sourced from the image. // Conflicts with DNSServer, DNSSearch, DNSOption. - UseImageResolvConf bool `json:"use_image_resolve_conf,omitempty"` + // Optional. + UseImageResolvConf *bool `json:"use_image_resolve_conf,omitempty"` // DNSServers is a set of DNS servers that will be used in the // container's resolv.conf, replacing the host's DNS Servers which are // used by default. @@ -495,13 +531,15 @@ type ContainerNetworkConfig struct { // UseImageHosts indicates that /etc/hosts should not be managed by // Podman, and instead sourced from the image. // Conflicts with HostAdd. - // Do not set omitempty here, if this is false it should be set to not get - // the server default. - // Ideally this would be a pointer so we could differentiate between an - // explicitly false/true and unset (containers.conf default). However - // specgen is stable so we can not change this right now. - // TODO (5.0): change to pointer - UseImageHosts bool `json:"use_image_hosts"` + // Optional. + UseImageHosts *bool `json:"use_image_hosts,omitempty"` + // BaseHostsFile is the path to a hosts file, the entries from this file + // are added to the containers hosts file. As special value "image" is + // allowed which uses the /etc/hosts file from within the image and "none" + // which uses no base file at all. If it is empty we should default + // to the base_hosts_file configuration in containers.conf. + // Optional. + BaseHostsFile string `json:"base_hosts_file,omitempty"` // HostAdd is a set of hosts which will be added to the container's // /etc/hosts file. // Conflicts with UseImageHosts. @@ -544,10 +582,6 @@ type ContainerResourceConfig struct { // that are used to configure cgroup v2. // Optional. CgroupConf map[string]string `json:"unified,omitempty"` - // CPU period of the cpuset, determined by --cpus - CPUPeriod uint64 `json:"cpu_period,omitempty"` - // CPU quota of the cpuset, determined by --cpus - CPUQuota int64 `json:"cpu_quota,omitempty"` } // ContainerHealthCheckConfig describes a container healthcheck with attributes @@ -578,6 +612,13 @@ type SpecGenerator struct { cacheLibImage } +func (s *SpecGenerator) IsPrivileged() bool { + if s.Privileged != nil { + return *s.Privileged + } + return false +} + func (s *SpecGenerator) IsInitContainer() bool { return len(s.InitContainerType) != 0 } @@ -611,7 +652,8 @@ func NewSpecGenerator(arg string, rootfs bool) *SpecGenerator { if lastColonIndex != -1 { lastPart := csc.Rootfs[lastColonIndex+1:] if lastPart == "O" { - csc.RootfsOverlay = true + localTrue := true + csc.RootfsOverlay = &localTrue csc.Rootfs = csc.Rootfs[:lastColonIndex] } else if lastPart == "idmap" || strings.HasPrefix(lastPart, "idmap=") { csc.RootfsMapping = &lastPart diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/specgen_local.go b/vendor/github.com/containers/podman/v5/pkg/specgen/specgen_local.go similarity index 97% rename from vendor/github.com/containers/podman/v4/pkg/specgen/specgen_local.go rename to vendor/github.com/containers/podman/v5/pkg/specgen/specgen_local.go index 9e84249c4..3ed3ec043 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/specgen_local.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/specgen_local.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package specgen diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/specgen_remote.go b/vendor/github.com/containers/podman/v5/pkg/specgen/specgen_remote.go similarity index 93% rename from vendor/github.com/containers/podman/v4/pkg/specgen/specgen_remote.go rename to vendor/github.com/containers/podman/v5/pkg/specgen/specgen_remote.go index 806aed73b..7bb83da29 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/specgen_remote.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/specgen_remote.go @@ -1,5 +1,4 @@ //go:build remote -// +build remote package specgen diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/utils.go b/vendor/github.com/containers/podman/v5/pkg/specgen/utils.go similarity index 95% rename from vendor/github.com/containers/podman/v4/pkg/specgen/utils.go rename to vendor/github.com/containers/podman/v5/pkg/specgen/utils.go index dc9127bb3..dc53d023b 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/utils.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/utils.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package specgen diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/utils_linux.go b/vendor/github.com/containers/podman/v5/pkg/specgen/utils_linux.go similarity index 99% rename from vendor/github.com/containers/podman/v4/pkg/specgen/utils_linux.go rename to vendor/github.com/containers/podman/v5/pkg/specgen/utils_linux.go index d8e4cbae3..075f81c4f 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/utils_linux.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/utils_linux.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package specgen diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/volumes.go b/vendor/github.com/containers/podman/v5/pkg/specgen/volumes.go similarity index 99% rename from vendor/github.com/containers/podman/v4/pkg/specgen/volumes.go rename to vendor/github.com/containers/podman/v5/pkg/specgen/volumes.go index b8b2ece8b..075711138 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/volumes.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/volumes.go @@ -7,7 +7,7 @@ import ( "strings" "github.com/containers/common/pkg/parse" - "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v5/libpod/define" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" ) diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/winpath.go b/vendor/github.com/containers/podman/v5/pkg/specgen/winpath.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/specgen/winpath.go rename to vendor/github.com/containers/podman/v5/pkg/specgen/winpath.go diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/winpath_linux.go b/vendor/github.com/containers/podman/v5/pkg/specgen/winpath_linux.go similarity index 88% rename from vendor/github.com/containers/podman/v4/pkg/specgen/winpath_linux.go rename to vendor/github.com/containers/podman/v5/pkg/specgen/winpath_linux.go index f42ac7639..16c62492d 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/winpath_linux.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/winpath_linux.go @@ -7,7 +7,7 @@ import ( ) func shouldResolveWinPaths() bool { - return machine.MachineHostType() == "wsl" + return machine.HostType() == "wsl" } func shouldResolveUnixWinVariant(path string) bool { diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/winpath_unsupported.go b/vendor/github.com/containers/podman/v5/pkg/specgen/winpath_unsupported.go similarity index 91% rename from vendor/github.com/containers/podman/v4/pkg/specgen/winpath_unsupported.go rename to vendor/github.com/containers/podman/v5/pkg/specgen/winpath_unsupported.go index 4cd008fdd..ff40b55a4 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/winpath_unsupported.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/winpath_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux && !windows -// +build !linux,!windows package specgen diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/winpath_windows.go b/vendor/github.com/containers/podman/v5/pkg/specgen/winpath_windows.go similarity index 99% rename from vendor/github.com/containers/podman/v4/pkg/specgen/winpath_windows.go rename to vendor/github.com/containers/podman/v5/pkg/specgen/winpath_windows.go index c6aad314a..f85d40dd5 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/winpath_windows.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgen/winpath_windows.go @@ -1,9 +1,10 @@ package specgen import ( - "github.com/sirupsen/logrus" "os" "path/filepath" + + "github.com/sirupsen/logrus" ) func shouldResolveUnixWinVariant(path string) bool { diff --git a/vendor/github.com/containers/podman/v4/pkg/specgenutil/createparse.go b/vendor/github.com/containers/podman/v5/pkg/specgenutil/createparse.go similarity index 92% rename from vendor/github.com/containers/podman/v4/pkg/specgenutil/createparse.go rename to vendor/github.com/containers/podman/v5/pkg/specgenutil/createparse.go index 373fb6faa..04bd8b0aa 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgenutil/createparse.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgenutil/createparse.go @@ -4,7 +4,7 @@ import ( "errors" "github.com/containers/common/pkg/config" - "github.com/containers/podman/v4/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/domain/entities" ) // validate determines if the flags and values given by the user are valid. things checked diff --git a/vendor/github.com/containers/podman/v4/pkg/specgenutil/ports.go b/vendor/github.com/containers/podman/v5/pkg/specgenutil/ports.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/specgenutil/ports.go rename to vendor/github.com/containers/podman/v5/pkg/specgenutil/ports.go diff --git a/vendor/github.com/containers/podman/v4/pkg/specgenutil/specgen.go b/vendor/github.com/containers/podman/v5/pkg/specgenutil/specgen.go similarity index 84% rename from vendor/github.com/containers/podman/v4/pkg/specgenutil/specgen.go rename to vendor/github.com/containers/podman/v5/pkg/specgenutil/specgen.go index c15c56031..912193b45 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgenutil/specgen.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgenutil/specgen.go @@ -11,14 +11,14 @@ import ( "github.com/containers/common/pkg/config" "github.com/containers/image/v5/manifest" - "github.com/containers/podman/v4/cmd/podman/parse" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/domain/entities" - envLib "github.com/containers/podman/v4/pkg/env" - "github.com/containers/podman/v4/pkg/namespaces" - "github.com/containers/podman/v4/pkg/specgen" - systemdDefine "github.com/containers/podman/v4/pkg/systemd/define" - "github.com/containers/podman/v4/pkg/util" + "github.com/containers/podman/v5/cmd/podman/parse" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/domain/entities" + envLib "github.com/containers/podman/v5/pkg/env" + "github.com/containers/podman/v5/pkg/namespaces" + "github.com/containers/podman/v5/pkg/specgen" + systemdDefine "github.com/containers/podman/v5/pkg/systemd/define" + "github.com/containers/podman/v5/pkg/util" "github.com/docker/go-units" "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/selinux/go-selinux" @@ -226,6 +226,7 @@ func setNamespaces(rtc *config.Config, s *specgen.SpecGenerator, c *entities.Con if ns, ok := os.LookupEnv("PODMAN_USERNS"); ok { userns = ns } else { + // TODO: This should be moved into pkg/specgen/generate so we don't use the client's containers.conf userns = rtc.Containers.UserNS } } @@ -326,6 +327,7 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions return err } + // TODO: This needs to move into pkg/specgen/generate so we aren't using containers.conf on the client. if rtc.Containers.EnableLabeledUsers { defSecurityOpts, err := currentLabelOpts() if err != nil { @@ -334,6 +336,7 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions c.SecurityOpt = append(defSecurityOpts, c.SecurityOpt...) } + // validate flags as needed if err := validate(c); err != nil { return err @@ -389,8 +392,8 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions return err } - if !s.Terminal { - s.Terminal = c.TTY + if s.Terminal == nil { + s.Terminal = &c.TTY } if err := verifyExpose(c.Expose); err != nil { @@ -401,8 +404,8 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions if c.Net != nil { s.PortMappings = c.Net.PublishPorts } - if !s.PublishExposedPorts { - s.PublishExposedPorts = c.PublishAll + if s.PublishExposedPorts == nil { + s.PublishExposedPorts = &c.PublishAll } if len(s.Pod) == 0 || len(c.Pod) > 0 { @@ -450,12 +453,12 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions // any case. osEnv := envLib.Map(os.Environ()) - if !s.EnvHost { - s.EnvHost = c.EnvHost + if s.EnvHost == nil { + s.EnvHost = &c.EnvHost } - if !s.HTTPProxy { - s.HTTPProxy = c.HTTPProxy + if s.HTTPProxy == nil { + s.HTTPProxy = &c.HTTPProxy } // env-file overrides any previous variables @@ -502,11 +505,11 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions // Last, add user annotations for _, annotation := range c.Annotation { - splitAnnotation := strings.SplitN(annotation, "=", 2) - if len(splitAnnotation) < 2 { + key, val, hasVal := strings.Cut(annotation, "=") + if !hasVal { return errors.New("annotations must be formatted KEY=VALUE") } - annotations[splitAnnotation[0]] = splitAnnotation[1] + annotations[key] = val } if len(s.Annotations) == 0 { s.Annotations = annotations @@ -515,11 +518,11 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions if len(c.StorageOpts) > 0 { opts := make(map[string]string, len(c.StorageOpts)) for _, opt := range c.StorageOpts { - split := strings.SplitN(opt, "=", 2) - if len(split) != 2 { + key, val, hasVal := strings.Cut(opt, "=") + if !hasVal { return errors.New("storage-opt must be formatted KEY=VALUE") } - opts[split[0]] = split[1] + opts[key] = val } s.StorageOpts = opts } @@ -572,12 +575,12 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions if c.Net != nil { s.HostAdd = c.Net.AddHosts - s.UseImageResolvConf = c.Net.UseImageResolvConf + s.UseImageResolvConf = &c.Net.UseImageResolvConf s.DNSServers = c.Net.DNSServers s.DNSSearch = c.Net.DNSSearch s.DNSOptions = c.Net.DNSOptions s.NetworkOptions = c.Net.NetworkOptions - s.UseImageHosts = c.Net.NoHosts + s.UseImageHosts = &c.Net.NoHosts } if len(s.HostUsers) == 0 || len(c.HostUsers) != 0 { s.HostUsers = c.HostUsers @@ -587,9 +590,6 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions s.ImageVolumeMode = c.ImageVolume } } - if len(s.ImageVolumeMode) == 0 { - s.ImageVolumeMode = rtc.Engine.ImageVolumeMode - } if s.ImageVolumeMode == define.TypeBind { s.ImageVolumeMode = "anonymous" } @@ -622,9 +622,6 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions if len(s.CgroupsMode) == 0 { s.CgroupsMode = c.CgroupsMode } - if s.CgroupsMode == "" { - s.CgroupsMode = rtc.Cgroups() - } if len(s.Groups) == 0 || len(c.GroupAdd) != 0 { s.Groups = c.GroupAdd @@ -650,11 +647,11 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions if len(s.CapDrop) == 0 || len(c.CapDrop) != 0 { s.CapDrop = c.CapDrop } - if !s.Privileged { - s.Privileged = c.Privileged + if s.Privileged == nil { + s.Privileged = &c.Privileged } - if !s.ReadOnlyFilesystem { - s.ReadOnlyFilesystem = c.ReadOnly + if s.ReadOnlyFilesystem == nil { + s.ReadOnlyFilesystem = &c.ReadOnly } if len(s.ConmonPidFile) == 0 || len(c.ConmonPIDFile) != 0 { s.ConmonPidFile = c.ConmonPIDFile @@ -667,17 +664,18 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions // Only add ReadWrite tmpfs mounts iff the container is // being run ReadOnly and ReadWriteTmpFS is not disabled, // (user specifying --read-only-tmpfs=false.) - s.ReadWriteTmpfs = c.ReadOnly && c.ReadWriteTmpFS + localRWTmpfs := c.ReadOnly && c.ReadWriteTmpFS + s.ReadWriteTmpfs = &localRWTmpfs // TODO convert to map? // check if key=value and convert sysmap := make(map[string]string) for _, ctl := range c.Sysctl { - splitCtl := strings.SplitN(ctl, "=", 2) - if len(splitCtl) < 2 { + key, val, hasVal := strings.Cut(ctl, "=") + if !hasVal { return fmt.Errorf("invalid sysctl value %q", ctl) } - sysmap[splitCtl[0]] = splitCtl[1] + sysmap[key] = val } if len(s.Sysctl) == 0 || len(c.Sysctl) != 0 { s.Sysctl = sysmap @@ -690,53 +688,57 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions for _, opt := range c.SecurityOpt { // Docker deprecated the ":" syntax but still supports it, // so we need to as well - var con []string + var key, val string + var hasVal bool if strings.Contains(opt, "=") { - con = strings.SplitN(opt, "=", 2) + key, val, hasVal = strings.Cut(opt, "=") } else { - con = strings.SplitN(opt, ":", 2) + key, val, hasVal = strings.Cut(opt, ":") } - if len(con) != 2 && - con[0] != "no-new-privileges" { + if !hasVal && + key != "no-new-privileges" { return fmt.Errorf("invalid --security-opt 1: %q", opt) } - switch con[0] { + switch key { case "apparmor": - s.ContainerSecurityConfig.ApparmorProfile = con[1] - s.Annotations[define.InspectAnnotationApparmor] = con[1] + s.ContainerSecurityConfig.ApparmorProfile = val + s.Annotations[define.InspectAnnotationApparmor] = val case "label": - if con[1] == "nested" { - s.ContainerSecurityConfig.LabelNested = true + if val == "nested" { + localTrue := true + s.ContainerSecurityConfig.LabelNested = &localTrue continue } // TODO selinux opts and label opts are the same thing - s.ContainerSecurityConfig.SelinuxOpts = append(s.ContainerSecurityConfig.SelinuxOpts, con[1]) + s.ContainerSecurityConfig.SelinuxOpts = append(s.ContainerSecurityConfig.SelinuxOpts, val) s.Annotations[define.InspectAnnotationLabel] = strings.Join(s.ContainerSecurityConfig.SelinuxOpts, ",label=") case "mask": - s.ContainerSecurityConfig.Mask = append(s.ContainerSecurityConfig.Mask, strings.Split(con[1], ":")...) + s.ContainerSecurityConfig.Mask = append(s.ContainerSecurityConfig.Mask, strings.Split(val, ":")...) case "proc-opts": - s.ProcOpts = strings.Split(con[1], ",") + s.ProcOpts = strings.Split(val, ",") case "seccomp": - s.SeccompProfilePath = con[1] - s.Annotations[define.InspectAnnotationSeccomp] = con[1] + s.SeccompProfilePath = val + s.Annotations[define.InspectAnnotationSeccomp] = val // this option is for docker compatibility, it is the same as unmask=ALL case "systempaths": - if con[1] == "unconfined" { + if val == "unconfined" { s.ContainerSecurityConfig.Unmask = append(s.ContainerSecurityConfig.Unmask, []string{"ALL"}...) } else { - return fmt.Errorf("invalid systempaths option %q, only `unconfined` is supported", con[1]) + return fmt.Errorf("invalid systempaths option %q, only `unconfined` is supported", val) } case "unmask": - s.ContainerSecurityConfig.Unmask = append(s.ContainerSecurityConfig.Unmask, con[1:]...) + if hasVal { + s.ContainerSecurityConfig.Unmask = append(s.ContainerSecurityConfig.Unmask, val) + } case "no-new-privileges": noNewPrivileges := true - if len(con) == 2 { - noNewPrivileges, err = strconv.ParseBool(con[1]) + if hasVal { + noNewPrivileges, err = strconv.ParseBool(val) if err != nil { return fmt.Errorf("invalid --security-opt 2: %q", opt) } } - s.ContainerSecurityConfig.NoNewPrivileges = noNewPrivileges + s.ContainerSecurityConfig.NoNewPrivileges = &noNewPrivileges default: return fmt.Errorf("invalid --security-opt 2: %q", opt) } @@ -763,7 +765,7 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions s.Volumes = volumes } - if s.ContainerSecurityConfig.LabelNested { + if s.LabelNested != nil && *s.LabelNested { // Need to unmask the SELinux file system s.Unmask = append(s.Unmask, "/sys/fs/selinux", "/proc") s.Mounts = append(s.Mounts, specs.Mount{ @@ -781,7 +783,12 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions s.ImageVolumes = imageVolumes } - for _, dev := range c.Devices { + devices := c.Devices + for _, gpu := range c.GPUs { + devices = append(devices, "nvidia.com/gpu="+gpu) + } + + for _, dev := range devices { s.Devices = append(s.Devices, specs.LinuxDevice{Path: dev}) } @@ -793,14 +800,14 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions s.DeviceCgroupRule = append(s.DeviceCgroupRule, dev) } - if !s.Init { - s.Init = c.Init + if s.Init == nil { + s.Init = &c.Init } if len(s.InitPath) == 0 || len(c.InitPath) != 0 { s.InitPath = c.InitPath } - if !s.Stdin { - s.Stdin = c.Interactive + if s.Stdin == nil { + s.Stdin = &c.Interactive } // quiet // DeviceCgroupRules: c.StringSlice("device-cgroup-rule"), @@ -813,23 +820,23 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions logOpts := make(map[string]string) for _, o := range c.LogOptions { - split := strings.SplitN(o, "=", 2) - if len(split) < 2 { + key, val, hasVal := strings.Cut(o, "=") + if !hasVal { return fmt.Errorf("invalid log option %q", o) } - switch strings.ToLower(split[0]) { + switch strings.ToLower(key) { case "driver": - s.LogConfiguration.Driver = split[1] + s.LogConfiguration.Driver = val case "path": - s.LogConfiguration.Path = split[1] + s.LogConfiguration.Path = val case "max-size": - logSize, err := units.FromHumanSize(split[1]) + logSize, err := units.FromHumanSize(val) if err != nil { return err } s.LogConfiguration.Size = logSize default: - logOpts[split[0]] = split[1] + logOpts[key] = val } } if len(s.LogConfiguration.Options) == 0 || len(c.LogOptions) != 0 { @@ -838,9 +845,17 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions if len(s.Name) == 0 || len(c.Name) != 0 { s.Name = c.Name } + + if c.PreserveFDs != 0 && c.PreserveFD != nil { + return errors.New("cannot specify both --preserve-fds and --preserve-fd") + } + if s.PreserveFDs == 0 || c.PreserveFDs != 0 { s.PreserveFDs = c.PreserveFDs } + if s.PreserveFD == nil || c.PreserveFD != nil { + s.PreserveFD = c.PreserveFD + } if s.OOMScoreAdj == nil || c.OOMScoreAdj != nil { s.OOMScoreAdj = c.OOMScoreAdj @@ -866,8 +881,8 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions s.Personality.Domain = specs.LinuxPersonalityDomain(c.Personality) } - if !s.Remove { - s.Remove = c.Rm + if s.Remove == nil { + s.Remove = &c.Rm } if s.StopTimeout == nil || c.StopTimeout != 0 { s.StopTimeout = &c.StopTimeout @@ -884,8 +899,8 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions if len(s.PidFile) == 0 || len(c.PidFile) != 0 { s.PidFile = c.PidFile } - if !s.Volatile { - s.Volatile = c.Rm + if s.Volatile == nil { + s.Volatile = &c.Rm } if len(s.EnvMerge) == 0 || len(c.EnvMerge) != 0 { s.EnvMerge = c.EnvMerge @@ -893,8 +908,8 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions if len(s.UnsetEnv) == 0 || len(c.UnsetEnv) != 0 { s.UnsetEnv = c.UnsetEnv } - if !s.UnsetEnvAll { - s.UnsetEnvAll = c.UnsetEnvAll + if s.UnsetEnvAll == nil { + s.UnsetEnvAll = &c.UnsetEnvAll } if len(s.ChrootDirs) == 0 || len(c.ChrootDirs) != 0 { s.ChrootDirs = c.ChrootDirs @@ -996,23 +1011,23 @@ func makeHealthCheckFromCli(inCmd, interval string, retries uint, timeout, start func parseWeightDevices(weightDevs []string) (map[string]specs.LinuxWeightDevice, error) { wd := make(map[string]specs.LinuxWeightDevice) - for _, val := range weightDevs { - split := strings.SplitN(val, ":", 2) - if len(split) != 2 { - return nil, fmt.Errorf("bad format: %s", val) + for _, dev := range weightDevs { + key, val, hasVal := strings.Cut(dev, ":") + if !hasVal { + return nil, fmt.Errorf("bad format: %s", dev) } - if !strings.HasPrefix(split[0], "/dev/") { - return nil, fmt.Errorf("bad format for device path: %s", val) + if !strings.HasPrefix(key, "/dev/") { + return nil, fmt.Errorf("bad format for device path: %s", dev) } - weight, err := strconv.ParseUint(split[1], 10, 0) + weight, err := strconv.ParseUint(val, 10, 0) if err != nil { - return nil, fmt.Errorf("invalid weight for device: %s", val) + return nil, fmt.Errorf("invalid weight for device: %s", dev) } if weight > 0 && (weight < 10 || weight > 1000) { - return nil, fmt.Errorf("invalid weight for device: %s", val) + return nil, fmt.Errorf("invalid weight for device: %s", dev) } w := uint16(weight) - wd[split[0]] = specs.LinuxWeightDevice{ + wd[key] = specs.LinuxWeightDevice{ Weight: &w, LeafWeight: nil, } @@ -1022,41 +1037,41 @@ func parseWeightDevices(weightDevs []string) (map[string]specs.LinuxWeightDevice func parseThrottleBPSDevices(bpsDevices []string) (map[string]specs.LinuxThrottleDevice, error) { td := make(map[string]specs.LinuxThrottleDevice) - for _, val := range bpsDevices { - split := strings.SplitN(val, ":", 2) - if len(split) != 2 { - return nil, fmt.Errorf("bad format: %s", val) + for _, dev := range bpsDevices { + key, val, hasVal := strings.Cut(dev, ":") + if !hasVal { + return nil, fmt.Errorf("bad format: %s", dev) } - if !strings.HasPrefix(split[0], "/dev/") { - return nil, fmt.Errorf("bad format for device path: %s", val) + if !strings.HasPrefix(key, "/dev/") { + return nil, fmt.Errorf("bad format for device path: %s", dev) } - rate, err := units.RAMInBytes(split[1]) + rate, err := units.RAMInBytes(val) if err != nil { - return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :[]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", val) + return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :[]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", dev) } if rate < 0 { - return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :[]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", val) + return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :[]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", dev) } - td[split[0]] = specs.LinuxThrottleDevice{Rate: uint64(rate)} + td[key] = specs.LinuxThrottleDevice{Rate: uint64(rate)} } return td, nil } func parseThrottleIOPsDevices(iopsDevices []string) (map[string]specs.LinuxThrottleDevice, error) { td := make(map[string]specs.LinuxThrottleDevice) - for _, val := range iopsDevices { - split := strings.SplitN(val, ":", 2) - if len(split) != 2 { - return nil, fmt.Errorf("bad format: %s", val) + for _, dev := range iopsDevices { + key, val, hasVal := strings.Cut(dev, ":") + if !hasVal { + return nil, fmt.Errorf("bad format: %s", dev) } - if !strings.HasPrefix(split[0], "/dev/") { - return nil, fmt.Errorf("bad format for device path: %s", val) + if !strings.HasPrefix(key, "/dev/") { + return nil, fmt.Errorf("bad format for device path: %s", dev) } - rate, err := strconv.ParseUint(split[1], 10, 64) + rate, err := strconv.ParseUint(val, 10, 64) if err != nil { - return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :. Number must be a positive integer", val) + return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :. Number must be a positive integer", dev) } - td[split[0]] = specs.LinuxThrottleDevice{Rate: rate} + td[key] = specs.LinuxThrottleDevice{Rate: rate} } return td, nil } @@ -1095,42 +1110,42 @@ func parseSecrets(secrets []string) ([]specgen.Secret, map[string]string, error) } for _, val := range split { - kv := strings.SplitN(val, "=", 2) - if len(kv) < 2 { + name, value, hasValue := strings.Cut(val, "=") + if !hasValue { return nil, nil, fmt.Errorf("option %s must be in form option=value: %w", val, secretParseError) } - switch kv[0] { + switch name { case "source": - source = kv[1] + source = value case "type": if secretType != "" { return nil, nil, fmt.Errorf("cannot set more than one secret type: %w", secretParseError) } - if kv[1] != "mount" && kv[1] != "env" { - return nil, nil, fmt.Errorf("type %s is invalid: %w", kv[1], secretParseError) + if value != "mount" && value != "env" { + return nil, nil, fmt.Errorf("type %s is invalid: %w", value, secretParseError) } - secretType = kv[1] + secretType = value case "target": - target = kv[1] + target = value case "mode": mountOnly = true - mode64, err := strconv.ParseUint(kv[1], 8, 32) + mode64, err := strconv.ParseUint(value, 8, 32) if err != nil { - return nil, nil, fmt.Errorf("mode %s invalid: %w", kv[1], secretParseError) + return nil, nil, fmt.Errorf("mode %s invalid: %w", value, secretParseError) } mode = uint32(mode64) case "uid", "UID": mountOnly = true - uid64, err := strconv.ParseUint(kv[1], 10, 32) + uid64, err := strconv.ParseUint(value, 10, 32) if err != nil { - return nil, nil, fmt.Errorf("UID %s invalid: %w", kv[1], secretParseError) + return nil, nil, fmt.Errorf("UID %s invalid: %w", value, secretParseError) } uid = uint32(uid64) case "gid", "GID": mountOnly = true - gid64, err := strconv.ParseUint(kv[1], 10, 32) + gid64, err := strconv.ParseUint(value, 10, 32) if err != nil { - return nil, nil, fmt.Errorf("GID %s invalid: %w", kv[1], secretParseError) + return nil, nil, fmt.Errorf("GID %s invalid: %w", value, secretParseError) } gid = uint32(gid64) @@ -1195,17 +1210,17 @@ func parseLinuxResourcesDeviceAccess(device string) (specs.LinuxDeviceCgroup, er return specs.LinuxDeviceCgroup{}, fmt.Errorf("invalid device type in device-access-add: %s", devType) } - number := strings.SplitN(value[1], ":", 2) - if number[0] != "*" { - i, err := strconv.ParseUint(number[0], 10, 64) + majorNumber, minorNumber, hasMinor := strings.Cut(value[1], ":") + if majorNumber != "*" { + i, err := strconv.ParseUint(majorNumber, 10, 64) if err != nil { return specs.LinuxDeviceCgroup{}, err } m := int64(i) major = &m } - if len(number) == 2 && number[1] != "*" { - i, err := strconv.ParseUint(number[1], 10, 64) + if hasMinor && minorNumber != "*" { + i, err := strconv.ParseUint(minorNumber, 10, 64) if err != nil { return specs.LinuxDeviceCgroup{}, err } @@ -1255,11 +1270,11 @@ func GetResources(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions) unifieds := make(map[string]string) for _, unified := range c.CgroupConf { - splitUnified := strings.SplitN(unified, "=", 2) - if len(splitUnified) < 2 { + key, val, hasVal := strings.Cut(unified, "=") + if !hasVal { return nil, errors.New("--cgroup-conf must be formatted KEY=VALUE") } - unifieds[splitUnified[0]] = splitUnified[1] + unifieds[key] = val } if len(unifieds) > 0 { s.ResourceLimits.Unified = unifieds diff --git a/vendor/github.com/containers/podman/v4/pkg/specgenutil/util.go b/vendor/github.com/containers/podman/v5/pkg/specgenutil/util.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/specgenutil/util.go rename to vendor/github.com/containers/podman/v5/pkg/specgenutil/util.go diff --git a/vendor/github.com/containers/podman/v4/pkg/specgenutil/volumes.go b/vendor/github.com/containers/podman/v5/pkg/specgenutil/volumes.go similarity index 82% rename from vendor/github.com/containers/podman/v4/pkg/specgenutil/volumes.go rename to vendor/github.com/containers/podman/v5/pkg/specgenutil/volumes.go index b9a85f18b..c48186716 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgenutil/volumes.go +++ b/vendor/github.com/containers/podman/v5/pkg/specgenutil/volumes.go @@ -9,11 +9,11 @@ import ( "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/parse" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/rootless" - "github.com/containers/podman/v4/pkg/specgen" - "github.com/containers/podman/v4/pkg/specgenutilexternal" - "github.com/containers/podman/v4/pkg/util" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/rootless" + "github.com/containers/podman/v5/pkg/specgen" + "github.com/containers/podman/v5/pkg/specgenutilexternal" + "github.com/containers/podman/v5/pkg/util" spec "github.com/opencontainers/runtime-spec/specs-go" ) @@ -30,6 +30,7 @@ var ( // TODO: handle options parsing/processing via containers/storage/pkg/mount func parseVolumes(rtc *config.Config, volumeFlag, mountFlag, tmpfsFlag []string) ([]spec.Mount, []*specgen.NamedVolume, []*specgen.OverlayVolume, []*specgen.ImageVolume, error) { // Get mounts from the --mounts flag. + // TODO: The runtime config part of this needs to move into pkg/specgen/generate to avoid querying containers.conf on the client. unifiedMounts, unifiedVolumes, unifiedImageVolumes, err := Mounts(mountFlag, rtc.Mounts()) if err != nil { return nil, nil, nil, nil, err @@ -256,29 +257,29 @@ func parseMountOptions(mountType string, args []string) (*spec.Mount, error) { var setTmpcopyup, setRORW, setSuid, setDev, setExec, setRelabel, setOwnership, setSwap bool mnt := spec.Mount{} - for _, val := range args { - kv := strings.SplitN(val, "=", 2) - switch kv[0] { + for _, arg := range args { + name, value, hasValue := strings.Cut(arg, "=") + switch name { case "bind-nonrecursive": if mountType != define.TypeBind { - return nil, fmt.Errorf("%q option not supported for %q mount types", kv[0], mountType) + return nil, fmt.Errorf("%q option not supported for %q mount types", name, mountType) } mnt.Options = append(mnt.Options, define.TypeBind) case "bind-propagation": if mountType != define.TypeBind { - return nil, fmt.Errorf("%q option not supported for %q mount types", kv[0], mountType) + return nil, fmt.Errorf("%q option not supported for %q mount types", name, mountType) } - if len(kv) == 1 { - return nil, fmt.Errorf("%v: %w", kv[0], errOptionArg) + if !hasValue { + return nil, fmt.Errorf("%v: %w", name, errOptionArg) } - mnt.Options = append(mnt.Options, kv[1]) + mnt.Options = append(mnt.Options, value) case "consistency": // Often used on MACs and mistakenly on Linux platforms. // Since Docker ignores this option so shall we. continue case "idmap": - if len(kv) > 1 { - mnt.Options = append(mnt.Options, fmt.Sprintf("idmap=%s", kv[1])) + if hasValue { + mnt.Options = append(mnt.Options, fmt.Sprintf("idmap=%s", value)) } else { mnt.Options = append(mnt.Options, "idmap") } @@ -294,42 +295,41 @@ func parseMountOptions(mountType string, args []string) (*spec.Mount, error) { // ro=[true|false] // rw // rw=[true|false] - if kv[0] == "readonly" { - kv[0] = "ro" - } - switch len(kv) { - case 1: - mnt.Options = append(mnt.Options, kv[0]) - case 2: - switch strings.ToLower(kv[1]) { + if name == "readonly" { + name = "ro" + } + if hasValue { + switch strings.ToLower(value) { case "true": - mnt.Options = append(mnt.Options, kv[0]) + mnt.Options = append(mnt.Options, name) case "false": // Set the opposite only for rw // ro's opposite is the default - if kv[0] == "rw" { + if name == "rw" { mnt.Options = append(mnt.Options, "ro") } } + } else { + mnt.Options = append(mnt.Options, name) } case "nodev", "dev": if setDev { return nil, fmt.Errorf("cannot pass 'nodev' and 'dev' mnt.Options more than once: %w", errOptionArg) } setDev = true - mnt.Options = append(mnt.Options, kv[0]) + mnt.Options = append(mnt.Options, name) case "noexec", "exec": if setExec { return nil, fmt.Errorf("cannot pass 'noexec' and 'exec' mnt.Options more than once: %w", errOptionArg) } setExec = true - mnt.Options = append(mnt.Options, kv[0]) + mnt.Options = append(mnt.Options, name) case "nosuid", "suid": if setSuid { return nil, fmt.Errorf("cannot pass 'nosuid' and 'suid' mnt.Options more than once: %w", errOptionArg) } setSuid = true - mnt.Options = append(mnt.Options, kv[0]) + mnt.Options = append(mnt.Options, name) case "noswap": if setSwap { return nil, fmt.Errorf("cannot pass 'noswap' mnt.Options more than once: %w", errOptionArg) @@ -338,80 +338,80 @@ func parseMountOptions(mountType string, args []string) (*spec.Mount, error) { return nil, fmt.Errorf("the 'noswap' option is only allowed with rootful tmpfs mounts: %w", errOptionArg) } setSwap = true - mnt.Options = append(mnt.Options, kv[0]) + mnt.Options = append(mnt.Options, name) case "relabel": if setRelabel { return nil, fmt.Errorf("cannot pass 'relabel' option more than once: %w", errOptionArg) } setRelabel = true - if len(kv) != 2 { - return nil, fmt.Errorf("%s mount option must be 'private' or 'shared': %w", kv[0], util.ErrBadMntOption) + if !hasValue { + return nil, fmt.Errorf("%s mount option must be 'private' or 'shared': %w", name, util.ErrBadMntOption) } - switch kv[1] { + switch value { case "private": mnt.Options = append(mnt.Options, "Z") case "shared": mnt.Options = append(mnt.Options, "z") default: - return nil, fmt.Errorf("%s mount option must be 'private' or 'shared': %w", kv[0], util.ErrBadMntOption) + return nil, fmt.Errorf("%s mount option must be 'private' or 'shared': %w", name, util.ErrBadMntOption) } - case "shared", "rshared", "private", "rprivate", "slave", "rslave", "unbindable", "runbindable", "Z", "z": - mnt.Options = append(mnt.Options, kv[0]) + case "shared", "rshared", "private", "rprivate", "slave", "rslave", "unbindable", "runbindable", "Z", "z", "no-dereference": + mnt.Options = append(mnt.Options, name) case "src", "source": if mountType == define.TypeTmpfs { - return nil, fmt.Errorf("%q option not supported for %q mount types", kv[0], mountType) + return nil, fmt.Errorf("%q option not supported for %q mount types", name, mountType) } if mnt.Source != "" { - return nil, fmt.Errorf("cannot pass %q option more than once: %w", kv[0], errOptionArg) + return nil, fmt.Errorf("cannot pass %q option more than once: %w", name, errOptionArg) } - if len(kv) == 1 { - return nil, fmt.Errorf("%v: %w", kv[0], errOptionArg) + if !hasValue { + return nil, fmt.Errorf("%v: %w", name, errOptionArg) } - if len(kv[1]) == 0 { + if len(value) == 0 { return nil, fmt.Errorf("host directory cannot be empty: %w", errOptionArg) } - mnt.Source = kv[1] + mnt.Source = value case "target", "dst", "destination": if mnt.Destination != "" { - return nil, fmt.Errorf("cannot pass %q option more than once: %w", kv[0], errOptionArg) + return nil, fmt.Errorf("cannot pass %q option more than once: %w", name, errOptionArg) } - if len(kv) == 1 { - return nil, fmt.Errorf("%v: %w", kv[0], errOptionArg) + if !hasValue { + return nil, fmt.Errorf("%v: %w", name, errOptionArg) } - if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil { + if err := parse.ValidateVolumeCtrDir(value); err != nil { return nil, err } - mnt.Destination = unixPathClean(kv[1]) + mnt.Destination = unixPathClean(value) case "tmpcopyup", "notmpcopyup": if mountType != define.TypeTmpfs { - return nil, fmt.Errorf("%q option not supported for %q mount types", kv[0], mountType) + return nil, fmt.Errorf("%q option not supported for %q mount types", name, mountType) } if setTmpcopyup { return nil, fmt.Errorf("cannot pass 'tmpcopyup' and 'notmpcopyup' mnt.Options more than once: %w", errOptionArg) } setTmpcopyup = true - mnt.Options = append(mnt.Options, kv[0]) + mnt.Options = append(mnt.Options, name) case "tmpfs-mode": if mountType != define.TypeTmpfs { - return nil, fmt.Errorf("%q option not supported for %q mount types", kv[0], mountType) + return nil, fmt.Errorf("%q option not supported for %q mount types", name, mountType) } - if len(kv) == 1 { - return nil, fmt.Errorf("%v: %w", kv[0], errOptionArg) + if !hasValue { + return nil, fmt.Errorf("%v: %w", name, errOptionArg) } - mnt.Options = append(mnt.Options, fmt.Sprintf("mode=%s", kv[1])) + mnt.Options = append(mnt.Options, fmt.Sprintf("mode=%s", value)) case "tmpfs-size": if mountType != define.TypeTmpfs { - return nil, fmt.Errorf("%q option not supported for %q mount types", kv[0], mountType) + return nil, fmt.Errorf("%q option not supported for %q mount types", name, mountType) } - if len(kv) == 1 { - return nil, fmt.Errorf("%v: %w", kv[0], errOptionArg) + if !hasValue { + return nil, fmt.Errorf("%v: %w", name, errOptionArg) } - mnt.Options = append(mnt.Options, fmt.Sprintf("size=%s", kv[1])) + mnt.Options = append(mnt.Options, fmt.Sprintf("size=%s", value)) case "U", "chown": if setOwnership { return nil, fmt.Errorf("cannot pass 'U' or 'chown' option more than once: %w", errOptionArg) } - ok, err := validChownFlag(val) + ok, err := validChownFlag(value) if err != nil { return nil, err } @@ -421,16 +421,16 @@ func parseMountOptions(mountType string, args []string) (*spec.Mount, error) { setOwnership = true case "volume-label": if mountType != define.TypeVolume { - return nil, fmt.Errorf("%q option not supported for %q mount types", kv[0], mountType) + return nil, fmt.Errorf("%q option not supported for %q mount types", name, mountType) } return nil, fmt.Errorf("the --volume-label option is not presently implemented") case "volume-opt": if mountType != define.TypeVolume { - return nil, fmt.Errorf("%q option not supported for %q mount types", kv[0], mountType) + return nil, fmt.Errorf("%q option not supported for %q mount types", name, mountType) } - mnt.Options = append(mnt.Options, val) + mnt.Options = append(mnt.Options, arg) default: - return nil, fmt.Errorf("%s: %w", kv[0], util.ErrBadMntOption) + return nil, fmt.Errorf("%s: %w", name, util.ErrBadMntOption) } } if mountType != "glob" && len(mnt.Destination) == 0 { @@ -534,22 +534,22 @@ func getDevptsMount(args []string) (spec.Mount, error) { var setDest bool - for _, val := range args { - kv := strings.SplitN(val, "=", 2) - switch kv[0] { + for _, arg := range args { + name, value, hasValue := strings.Cut(arg, "=") + switch name { case "uid", "gid", "mode", "ptxmode", "newinstance", "max": - newMount.Options = append(newMount.Options, val) + newMount.Options = append(newMount.Options, arg) case "target", "dst", "destination": - if len(kv) == 1 { - return newMount, fmt.Errorf("%v: %w", kv[0], errOptionArg) + if !hasValue { + return newMount, fmt.Errorf("%v: %w", name, errOptionArg) } - if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil { + if err := parse.ValidateVolumeCtrDir(value); err != nil { return newMount, err } - newMount.Destination = unixPathClean(kv[1]) + newMount.Destination = unixPathClean(value) setDest = true default: - return newMount, fmt.Errorf("%s: %w", kv[0], util.ErrBadMntOption) + return newMount, fmt.Errorf("%s: %w", name, util.ErrBadMntOption) } } @@ -586,37 +586,37 @@ func getNamedVolume(args []string) (*specgen.NamedVolume, error) { func getImageVolume(args []string) (*specgen.ImageVolume, error) { newVolume := new(specgen.ImageVolume) - for _, val := range args { - kv := strings.SplitN(val, "=", 2) - switch kv[0] { + for _, arg := range args { + name, value, hasValue := strings.Cut(arg, "=") + switch name { case "src", "source": - if len(kv) == 1 { - return nil, fmt.Errorf("%v: %w", kv[0], errOptionArg) + if !hasValue { + return nil, fmt.Errorf("%v: %w", name, errOptionArg) } - newVolume.Source = kv[1] + newVolume.Source = value case "target", "dst", "destination": - if len(kv) == 1 { - return nil, fmt.Errorf("%v: %w", kv[0], errOptionArg) + if !hasValue { + return nil, fmt.Errorf("%v: %w", name, errOptionArg) } - if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil { + if err := parse.ValidateVolumeCtrDir(value); err != nil { return nil, err } - newVolume.Destination = unixPathClean(kv[1]) + newVolume.Destination = unixPathClean(value) case "rw", "readwrite": - switch kv[1] { + switch value { case "true": newVolume.ReadWrite = true case "false": // Nothing to do. RO is default. default: - return nil, fmt.Errorf("invalid rw value %q: %w", kv[1], util.ErrBadMntOption) + return nil, fmt.Errorf("invalid rw value %q: %w", value, util.ErrBadMntOption) } case "consistency": // Often used on MACs and mistakenly on Linux platforms. // Since Docker ignores this option so shall we. continue default: - return nil, fmt.Errorf("%s: %w", kv[0], util.ErrBadMntOption) + return nil, fmt.Errorf("%s: %w", name, util.ErrBadMntOption) } } @@ -660,24 +660,16 @@ func getTmpfsMounts(tmpfsFlag []string) (map[string]spec.Mount, error) { } // validChownFlag ensures that the U or chown flag is correctly used -func validChownFlag(flag string) (bool, error) { - kv := strings.SplitN(flag, "=", 2) - switch len(kv) { - case 1: - case 2: - // U=[true|false] - switch strings.ToLower(kv[1]) { - case "true": - case "false": - return false, nil - default: - return false, fmt.Errorf("'U' or 'chown' must be set to true or false, instead received %q: %w", kv[1], errOptionArg) - } +func validChownFlag(value string) (bool, error) { + // U=[true|false] + switch { + case strings.EqualFold(value, "true"), value == "": + return true, nil + case strings.EqualFold(value, "false"): + return false, nil default: - return false, fmt.Errorf("badly formatted option %q: %w", flag, errOptionArg) + return false, fmt.Errorf("'U' or 'chown' must be set to true or false, instead received %q: %w", value, errOptionArg) } - - return true, nil } // Use path instead of filepath to preserve Unix style paths on Windows diff --git a/vendor/github.com/containers/podman/v4/pkg/specgenutilexternal/mount.go b/vendor/github.com/containers/podman/v5/pkg/specgenutilexternal/mount.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/specgenutilexternal/mount.go rename to vendor/github.com/containers/podman/v5/pkg/specgenutilexternal/mount.go diff --git a/vendor/github.com/containers/podman/v4/pkg/systemd/activation.go b/vendor/github.com/containers/podman/v5/pkg/systemd/activation.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/systemd/activation.go rename to vendor/github.com/containers/podman/v5/pkg/systemd/activation.go diff --git a/vendor/github.com/containers/podman/v4/pkg/systemd/dbus.go b/vendor/github.com/containers/podman/v5/pkg/systemd/dbus.go similarity index 98% rename from vendor/github.com/containers/podman/v4/pkg/systemd/dbus.go rename to vendor/github.com/containers/podman/v5/pkg/systemd/dbus.go index 059d39958..d5613bb85 100644 --- a/vendor/github.com/containers/podman/v4/pkg/systemd/dbus.go +++ b/vendor/github.com/containers/podman/v5/pkg/systemd/dbus.go @@ -7,7 +7,7 @@ import ( "path/filepath" "strconv" - "github.com/containers/podman/v4/pkg/rootless" + "github.com/containers/podman/v5/pkg/rootless" "github.com/coreos/go-systemd/v22/dbus" godbus "github.com/godbus/dbus/v5" "github.com/sirupsen/logrus" diff --git a/vendor/github.com/containers/podman/v4/pkg/systemd/define/const.go b/vendor/github.com/containers/podman/v5/pkg/systemd/define/const.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/systemd/define/const.go rename to vendor/github.com/containers/podman/v5/pkg/systemd/define/const.go diff --git a/vendor/github.com/containers/podman/v4/pkg/systemd/generate/common.go b/vendor/github.com/containers/podman/v5/pkg/systemd/generate/common.go similarity index 98% rename from vendor/github.com/containers/podman/v4/pkg/systemd/generate/common.go rename to vendor/github.com/containers/podman/v5/pkg/systemd/generate/common.go index 1ec0ca9fe..ded78aab3 100644 --- a/vendor/github.com/containers/podman/v4/pkg/systemd/generate/common.go +++ b/vendor/github.com/containers/podman/v5/pkg/systemd/generate/common.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package generate @@ -8,7 +7,7 @@ import ( "strconv" "strings" - "github.com/containers/podman/v4/pkg/systemd/define" + "github.com/containers/podman/v5/pkg/systemd/define" ) // minTimeoutStopSec is the minimal stop timeout for generated systemd units. diff --git a/vendor/github.com/containers/podman/v4/pkg/systemd/generate/containers.go b/vendor/github.com/containers/podman/v5/pkg/systemd/generate/containers.go similarity index 98% rename from vendor/github.com/containers/podman/v4/pkg/systemd/generate/containers.go rename to vendor/github.com/containers/podman/v5/pkg/systemd/generate/containers.go index 1230640f3..ade282430 100644 --- a/vendor/github.com/containers/podman/v4/pkg/systemd/generate/containers.go +++ b/vendor/github.com/containers/podman/v5/pkg/systemd/generate/containers.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package generate @@ -13,11 +12,11 @@ import ( "text/template" "time" - "github.com/containers/podman/v4/libpod" - libpodDefine "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/domain/entities" - "github.com/containers/podman/v4/pkg/systemd/define" - "github.com/containers/podman/v4/version" + "github.com/containers/podman/v5/libpod" + libpodDefine "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/systemd/define" + "github.com/containers/podman/v5/version" "github.com/sirupsen/logrus" "github.com/spf13/pflag" ) @@ -472,8 +471,8 @@ func executeContainerTemplate(info *containerInfo, options entities.GenerateSyst // because it does try to red the value from the environment if !strings.Contains(env, "=") { for _, containerEnv := range info.containerEnv { - split := strings.SplitN(containerEnv, "=", 2) - if split[0] == env { + key, _, _ := strings.Cut(containerEnv, "=") + if key == env { info.ExtraEnvs = append(info.ExtraEnvs, escapeSystemdArg(containerEnv)) } } diff --git a/vendor/github.com/containers/podman/v4/pkg/systemd/generate/pods.go b/vendor/github.com/containers/podman/v5/pkg/systemd/generate/pods.go similarity index 98% rename from vendor/github.com/containers/podman/v4/pkg/systemd/generate/pods.go rename to vendor/github.com/containers/podman/v5/pkg/systemd/generate/pods.go index cab758779..3c9797329 100644 --- a/vendor/github.com/containers/podman/v4/pkg/systemd/generate/pods.go +++ b/vendor/github.com/containers/podman/v5/pkg/systemd/generate/pods.go @@ -1,5 +1,4 @@ //go:build !remote -// +build !remote package generate @@ -13,10 +12,10 @@ import ( "text/template" "time" - "github.com/containers/podman/v4/libpod" - "github.com/containers/podman/v4/pkg/domain/entities" - "github.com/containers/podman/v4/pkg/systemd/define" - "github.com/containers/podman/v4/version" + "github.com/containers/podman/v5/libpod" + "github.com/containers/podman/v5/pkg/domain/entities" + "github.com/containers/podman/v5/pkg/systemd/define" + "github.com/containers/podman/v5/version" "github.com/sirupsen/logrus" "github.com/spf13/pflag" ) diff --git a/vendor/github.com/containers/podman/v4/pkg/systemd/notifyproxy/notifyproxy.go b/vendor/github.com/containers/podman/v5/pkg/systemd/notifyproxy/notifyproxy.go similarity index 98% rename from vendor/github.com/containers/podman/v4/pkg/systemd/notifyproxy/notifyproxy.go rename to vendor/github.com/containers/podman/v5/pkg/systemd/notifyproxy/notifyproxy.go index 44db55d63..c8ea4748b 100644 --- a/vendor/github.com/containers/podman/v4/pkg/systemd/notifyproxy/notifyproxy.go +++ b/vendor/github.com/containers/podman/v5/pkg/systemd/notifyproxy/notifyproxy.go @@ -1,3 +1,5 @@ +//go:build !windows + package notifyproxy import ( @@ -11,7 +13,7 @@ import ( "syscall" "time" - "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v5/libpod/define" "github.com/coreos/go-systemd/v22/daemon" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" diff --git a/vendor/github.com/containers/podman/v4/pkg/trust/policy.go b/vendor/github.com/containers/podman/v5/pkg/trust/policy.go similarity index 97% rename from vendor/github.com/containers/podman/v4/pkg/trust/policy.go rename to vendor/github.com/containers/podman/v5/pkg/trust/policy.go index b5d8e7a41..1424ffe91 100644 --- a/vendor/github.com/containers/podman/v4/pkg/trust/policy.go +++ b/vendor/github.com/containers/podman/v5/pkg/trust/policy.go @@ -131,8 +131,11 @@ func parseUids(colonDelimitKeys []byte) []string { continue } parseduid := uid - if strings.Contains(uid, "<") && strings.Contains(uid, ">") { - parseduid = strings.SplitN(strings.SplitAfterN(uid, "<", 2)[1], ">", 2)[0] + if ltidx := strings.Index(uid, "<"); ltidx != -1 { + subuid := parseduid[ltidx+1:] + if gtidx := strings.Index(subuid, ">"); gtidx != -1 { + parseduid = subuid[:gtidx] + } } parseduids = append(parseduids, parseduid) } diff --git a/vendor/github.com/containers/podman/v4/pkg/trust/registries.go b/vendor/github.com/containers/podman/v5/pkg/trust/registries.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/trust/registries.go rename to vendor/github.com/containers/podman/v5/pkg/trust/registries.go diff --git a/vendor/github.com/containers/podman/v4/pkg/trust/trust.go b/vendor/github.com/containers/podman/v5/pkg/trust/trust.go similarity index 92% rename from vendor/github.com/containers/podman/v4/pkg/trust/trust.go rename to vendor/github.com/containers/podman/v5/pkg/trust/trust.go index 07d144bc1..2722e8670 100644 --- a/vendor/github.com/containers/podman/v4/pkg/trust/trust.go +++ b/vendor/github.com/containers/podman/v5/pkg/trust/trust.go @@ -4,6 +4,8 @@ import ( "fmt" "sort" "strings" + + "golang.org/x/exp/maps" ) // Policy describes a basic trust policy configuration @@ -51,11 +53,7 @@ func getPolicyShowOutput(policyContentStruct policyContent, systemRegistriesDirP } output = append(output, descriptionsOfPolicyRequirements(policyContentStruct.Default, template, registryConfigs, "", idReader)...) } - // FIXME: This should use x/exp/maps.Keys after we update to Go 1.18. - transports := []string{} - for t := range policyContentStruct.Transports { - transports = append(transports, t) - } + transports := maps.Keys(policyContentStruct.Transports) sort.Strings(transports) for _, transport := range transports { transval := policyContentStruct.Transports[transport] @@ -63,11 +61,7 @@ func getPolicyShowOutput(policyContentStruct policyContent, systemRegistriesDirP transport = "repository" } - // FIXME: This should use x/exp/maps.Keys after we update to Go 1.18. - scopes := []string{} - for s := range transval { - scopes = append(scopes, s) - } + scopes := maps.Keys(transval) sort.Strings(scopes) for _, repo := range scopes { repoval := transval[repo] diff --git a/vendor/github.com/containers/podman/v4/pkg/util/filters.go b/vendor/github.com/containers/podman/v5/pkg/util/filters.go similarity index 93% rename from vendor/github.com/containers/podman/v4/pkg/util/filters.go rename to vendor/github.com/containers/podman/v5/pkg/util/filters.go index 765b5c4db..d1e33b108 100644 --- a/vendor/github.com/containers/podman/v4/pkg/util/filters.go +++ b/vendor/github.com/containers/podman/v5/pkg/util/filters.go @@ -65,9 +65,9 @@ func PrepareFilters(r *http.Request) (*map[string][]string, error) { } filterMap := map[string][]string{} for _, filter := range filtersList { - split := strings.SplitN(filter, "=", 2) - if len(split) > 1 { - filterMap[split[0]] = append(filterMap[split[0]], split[1]) + fname, filter, hasFilter := strings.Cut(filter, "=") + if hasFilter { + filterMap[fname] = append(filterMap[fname], filter) } } return &filterMap, nil diff --git a/vendor/github.com/containers/podman/v4/pkg/util/kube.go b/vendor/github.com/containers/podman/v5/pkg/util/kube.go similarity index 88% rename from vendor/github.com/containers/podman/v4/pkg/util/kube.go rename to vendor/github.com/containers/podman/v5/pkg/util/kube.go index 1a70ed051..7159fa746 100644 --- a/vendor/github.com/containers/podman/v4/pkg/util/kube.go +++ b/vendor/github.com/containers/podman/v5/pkg/util/kube.go @@ -15,4 +15,6 @@ const ( VolumeMountOptsAnnotation = "volume.podman.io/mount-options" // Kube annotation for podman volume import source. VolumeImportSourceAnnotation = "volume.podman.io/import-source" + // Kube annotation for podman volume image. + VolumeImageAnnotation = "volume.podman.io/image" ) diff --git a/vendor/github.com/containers/podman/v4/pkg/util/mountOpts.go b/vendor/github.com/containers/podman/v5/pkg/util/mountOpts.go similarity index 90% rename from vendor/github.com/containers/podman/v4/pkg/util/mountOpts.go rename to vendor/github.com/containers/podman/v5/pkg/util/mountOpts.go index a65dcfbba..2ae1fbbed 100644 --- a/vendor/github.com/containers/podman/v4/pkg/util/mountOpts.go +++ b/vendor/github.com/containers/podman/v5/pkg/util/mountOpts.go @@ -5,8 +5,8 @@ import ( "fmt" "strings" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/rootless" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/rootless" ) var ( @@ -28,13 +28,13 @@ type defaultMountOptions struct { // The sourcePath variable, if not empty, contains a bind mount source. func ProcessOptions(options []string, isTmpfs bool, sourcePath string) ([]string, error) { var ( - foundWrite, foundSize, foundProp, foundMode, foundExec, foundSuid, foundDev, foundCopyUp, foundBind, foundZ, foundU, foundOverlay, foundIdmap, foundCopy, foundNoSwap bool + foundWrite, foundSize, foundProp, foundMode, foundExec, foundSuid, foundDev, foundCopyUp, foundBind, foundZ, foundU, foundOverlay, foundIdmap, foundCopy, foundNoSwap, foundNoDereference bool ) newOptions := make([]string, 0, len(options)) for _, opt := range options { // Some options have parameters - size, mode - splitOpt := strings.SplitN(opt, "=", 2) + key, _, _ := strings.Cut(opt, "=") // add advanced options such as upperdir=/path and workdir=/path, when overlay is specified if foundOverlay { @@ -47,11 +47,11 @@ func ProcessOptions(options []string, isTmpfs bool, sourcePath string) ([]string continue } } - if strings.HasPrefix(splitOpt[0], "subpath") { + if strings.HasPrefix(key, "subpath") { newOptions = append(newOptions, opt) continue } - if strings.HasPrefix(splitOpt[0], "idmap") { + if strings.HasPrefix(key, "idmap") { if foundIdmap { return nil, fmt.Errorf("the 'idmap' option can only be set once: %w", ErrDupeMntOption) } @@ -60,7 +60,7 @@ func ProcessOptions(options []string, isTmpfs bool, sourcePath string) ([]string continue } - switch splitOpt[0] { + switch key { case "copy", "nocopy": if foundCopy { return nil, fmt.Errorf("only one of 'nocopy' and 'copy' can be used: %w", ErrDupeMntOption) @@ -148,6 +148,11 @@ func ProcessOptions(options []string, isTmpfs bool, sourcePath string) ([]string foundNoSwap = true newOptions = append(newOptions, opt) continue + case "no-dereference": + if foundNoDereference { + return nil, fmt.Errorf("the 'no-dereference' option can only be set once: %w", ErrDupeMntOption) + } + foundNoDereference = true case define.TypeBind, "rbind": if isTmpfs { return nil, fmt.Errorf("the 'bind' and 'rbind' options are not allowed with tmpfs mounts: %w", ErrBadMntOption) @@ -205,13 +210,13 @@ func ProcessOptions(options []string, isTmpfs bool, sourcePath string) ([]string } func ParseDriverOpts(option string) (string, string, error) { - token := strings.SplitN(option, "=", 2) - if len(token) != 2 { + _, val, hasVal := strings.Cut(option, "=") + if !hasVal { return "", "", fmt.Errorf("cannot parse driver opts: %w", ErrBadMntOption) } - opt := strings.SplitN(token[1], "=", 2) - if len(opt) != 2 { + optKey, optVal, hasOptVal := strings.Cut(val, "=") + if !hasOptVal { return "", "", fmt.Errorf("cannot parse driver opts: %w", ErrBadMntOption) } - return opt[0], opt[1], nil + return optKey, optVal, nil } diff --git a/vendor/github.com/containers/podman/v4/pkg/util/mountOpts_linux.go b/vendor/github.com/containers/podman/v5/pkg/util/mountOpts_linux.go similarity index 100% rename from vendor/github.com/containers/podman/v4/pkg/util/mountOpts_linux.go rename to vendor/github.com/containers/podman/v5/pkg/util/mountOpts_linux.go diff --git a/vendor/github.com/containers/podman/v4/pkg/util/mountOpts_other.go b/vendor/github.com/containers/podman/v5/pkg/util/mountOpts_other.go similarity index 87% rename from vendor/github.com/containers/podman/v4/pkg/util/mountOpts_other.go rename to vendor/github.com/containers/podman/v5/pkg/util/mountOpts_other.go index 64b4dd1d9..7a3c3c382 100644 --- a/vendor/github.com/containers/podman/v4/pkg/util/mountOpts_other.go +++ b/vendor/github.com/containers/podman/v5/pkg/util/mountOpts_other.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package util diff --git a/vendor/github.com/containers/podman/v4/pkg/util/utils.go b/vendor/github.com/containers/podman/v5/pkg/util/utils.go similarity index 86% rename from vendor/github.com/containers/podman/v4/pkg/util/utils.go rename to vendor/github.com/containers/podman/v5/pkg/util/utils.go index 679110df2..06ef0da3d 100644 --- a/vendor/github.com/containers/podman/v4/pkg/util/utils.go +++ b/vendor/github.com/containers/podman/v5/pkg/util/utils.go @@ -13,25 +13,22 @@ import ( "sort" "strconv" "strings" - "sync" "syscall" "time" "github.com/BurntSushi/toml" "github.com/containers/common/pkg/config" - "github.com/containers/common/pkg/util" "github.com/containers/image/v5/types" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/errorhandling" - "github.com/containers/podman/v4/pkg/namespaces" - "github.com/containers/podman/v4/pkg/rootless" - "github.com/containers/podman/v4/pkg/signal" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/errorhandling" + "github.com/containers/podman/v5/pkg/namespaces" + "github.com/containers/podman/v5/pkg/rootless" + "github.com/containers/podman/v5/pkg/signal" "github.com/containers/storage/pkg/directory" - "github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/idtools" stypes "github.com/containers/storage/types" securejoin "github.com/cyphar/filepath-securejoin" - ruser "github.com/opencontainers/runc/libcontainer/user" + ruser "github.com/moby/sys/user" "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" "golang.org/x/term" @@ -58,14 +55,8 @@ func init() { // Helper function to determine the username/password passed // in the creds string. It could be either or both. func parseCreds(creds string) (string, string) { - if creds == "" { - return "", "" - } - up := strings.SplitN(creds, ":", 2) - if len(up) == 1 { - return up[0], "" - } - return up[0], up[1] + username, password, _ := strings.Cut(creds, ":") + return username, password } // Takes build context and validates `.containerignore` or `.dockerignore` @@ -162,11 +153,6 @@ func ParseRegistryCreds(creds string) (*types.DockerAuthConfig, error) { }, nil } -// StringInSlice is deprecated, use containers/common/pkg/util/StringInSlice -func StringInSlice(s string, sl []string) bool { - return util.StringInSlice(s, sl) -} - // StringMatchRegexSlice determines if a given string matches one of the given regexes, returns bool func StringMatchRegexSlice(s string, re []string) bool { for _, r := range re { @@ -178,17 +164,6 @@ func StringMatchRegexSlice(s string, re []string) bool { return false } -// IndexOfStringInSlice returns the index if a string is in a slice, otherwise -// it returns -1 if the string is not found -func IndexOfStringInSlice(s string, sl []string) int { - for i := range sl { - if sl[i] == s { - return i - } - } - return -1 -} - // ParseSignal parses and validates a signal name or number. func ParseSignal(rawSignal string) (syscall.Signal, error) { // Strip off leading dash, to allow -1 or -HUP @@ -831,6 +806,162 @@ func sortAndMergeConsecutiveMappings(idmap []idtools.IDMap) (finalIDMap []idtool return finalIDMap } +// Extension of idTools.parseAutoTriple that parses idmap triples. +// The triple should be a length 3 string array, containing: +// - Flags and ContainerID +// - HostID +// - Size +// +// parseAutoTriple returns the parsed mapping and any possible error. +// If the error is not-nil, the mapping is not well-defined. +// +// idTools.parseAutoTriple is extended here with the following enhancements: +// +// HostID @ syntax: +// ================= +// HostID may use the "@" syntax: The "101001:@1001:1" mapping +// means "take the 1001 id from the parent namespace and map it to 101001" +func parseAutoTriple(spec []string, parentMapping []ruser.IDMap, mapSetting string) (mappings []idtools.IDMap, err error) { + if len(spec[0]) == 0 { + return mappings, fmt.Errorf("invalid empty container id at %s map: %v", mapSetting, spec) + } + var cids, hids, sizes []uint64 + var cid, hid uint64 + var hidIsParent bool + // Parse the container ID, which must be an integer: + cid, err = strconv.ParseUint(spec[0][0:], 10, 32) + if err != nil { + return mappings, fmt.Errorf("parsing id map value %q: %w", spec[0], err) + } + // Parse the host id, which may be integer or @ + if len(spec[1]) == 0 { + return mappings, fmt.Errorf("invalid empty host id at %s map: %v", mapSetting, spec) + } + if spec[1][0] != '@' { + hidIsParent = false + hid, err = strconv.ParseUint(spec[1], 10, 32) + } else { + // Parse @, where is an integer corresponding to the parent mapping + hidIsParent = true + hid, err = strconv.ParseUint(spec[1][1:], 10, 32) + } + if err != nil { + return mappings, fmt.Errorf("parsing id map value %q: %w", spec[1], err) + } + // Parse the size of the mapping, which must be an integer + sz, err := strconv.ParseUint(spec[2], 10, 32) + if err != nil { + return mappings, fmt.Errorf("parsing id map value %q: %w", spec[2], err) + } + + if hidIsParent { + for i := uint64(0); i < sz; i++ { + cids = append(cids, cid+i) + mappedID, err := mapIDwithMapping(hid+i, parentMapping, mapSetting) + if err != nil { + return mappings, err + } + hids = append(hids, mappedID) + sizes = append(sizes, 1) + } + } else { + cids = []uint64{cid} + hids = []uint64{hid} + sizes = []uint64{sz} + } + + // Avoid possible integer overflow on 32bit builds + if bits.UintSize == 32 { + for i := range cids { + if cids[i] > math.MaxInt32 || hids[i] > math.MaxInt32 || sizes[i] > math.MaxInt32 { + return mappings, fmt.Errorf("initializing ID mappings: %s setting is malformed expected [\"[+ug]uint32:[@]uint32[:uint32]\"] : %q", mapSetting, spec) + } + } + } + for i := range cids { + mappings = append(mappings, idtools.IDMap{ + ContainerID: int(cids[i]), + HostID: int(hids[i]), + Size: int(sizes[i]), + }) + } + return mappings, nil +} + +// Extension of idTools.ParseIDMap that parses idmap triples from string. +// This extension accepts additional flags that control how the mapping is done +func parseAutoIDMap(mapSpec string, mapSetting string, parentMapping []ruser.IDMap) (idmap []idtools.IDMap, err error) { + stdErr := fmt.Errorf("initializing ID mappings: %s setting is malformed expected [\"uint32:[@]uint32[:uint32]\"] : %q", mapSetting, mapSpec) + idSpec := strings.Split(mapSpec, ":") + // if it's a length-2 list assume the size is 1: + if len(idSpec) == 2 { + idSpec = append(idSpec, "1") + } + if len(idSpec) != 3 { + return nil, stdErr + } + // Parse this mapping: + mappings, err := parseAutoTriple(idSpec, parentMapping, mapSetting) + if err != nil { + return nil, err + } + idmap = sortAndMergeConsecutiveMappings(mappings) + return idmap, nil +} + +// GetAutoOptions returns an AutoUserNsOptions with the settings to automatically set up +// a user namespace. +func GetAutoOptions(n namespaces.UsernsMode) (*stypes.AutoUserNsOptions, error) { + mode, opts, hasOpts := strings.Cut(string(n), ":") + if mode != "auto" { + return nil, fmt.Errorf("wrong user namespace mode") + } + options := stypes.AutoUserNsOptions{} + if !hasOpts { + return &options, nil + } + + parentUIDMap, parentGIDMap, err := rootless.GetAvailableIDMaps() + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + // The kernel-provided files only exist if user namespaces are supported + logrus.Debugf("User or group ID mappings not available: %s", err) + } else { + return nil, err + } + } + + for _, o := range strings.Split(opts, ",") { + key, val, hasVal := strings.Cut(o, "=") + if !hasVal { + return nil, fmt.Errorf("invalid option specified: %q", o) + } + switch key { + case "size": + s, err := strconv.ParseUint(val, 10, 32) + if err != nil { + return nil, err + } + options.Size = uint32(s) + case "uidmapping": + mapping, err := parseAutoIDMap(val, "UID", parentUIDMap) + if err != nil { + return nil, err + } + options.AdditionalUIDMappings = append(options.AdditionalUIDMappings, mapping...) + case "gidmapping": + mapping, err := parseAutoIDMap(val, "GID", parentGIDMap) + if err != nil { + return nil, err + } + options.AdditionalGIDMappings = append(options.AdditionalGIDMappings, mapping...) + default: + return nil, fmt.Errorf("unknown option specified: %q", key) + } + } + return &options, nil +} + // ParseIDMapping takes idmappings and subuid and subgid maps and returns a storage mapping func ParseIDMapping(mode namespaces.UsernsMode, uidMapSlice, gidMapSlice []string, subUIDMap, subGIDMap string) (*stypes.IDMappingOptions, error) { options := stypes.IDMappingOptions{ @@ -843,7 +974,7 @@ func ParseIDMapping(mode namespaces.UsernsMode, uidMapSlice, gidMapSlice []strin options.HostUIDMapping = false options.HostGIDMapping = false options.AutoUserNs = true - opts, err := mode.GetAutoOptions() + opts, err := GetAutoOptions(mode) if err != nil { return nil, err } @@ -920,13 +1051,6 @@ func ParseIDMapping(mode namespaces.UsernsMode, uidMapSlice, gidMapSlice []strin return &options, nil } -var ( - rootlessConfigHomeDirOnce sync.Once - rootlessConfigHomeDir string - rootlessRuntimeDirOnce sync.Once - rootlessRuntimeDir string -) - type tomlOptionsConfig struct { MountProgram string `toml:"mount_program"` } @@ -947,9 +1071,9 @@ func getTomlStorage(storeOptions *stypes.StoreOptions) *tomlConfig { config.Storage.RunRoot = storeOptions.RunRoot config.Storage.GraphRoot = storeOptions.GraphRoot for _, i := range storeOptions.GraphDriverOptions { - s := strings.SplitN(i, "=", 2) - if s[0] == "overlay.mount_program" && len(s) == 2 { - config.Storage.Options.MountProgram = s[1] + program, hasPrefix := strings.CutPrefix(i, "overlay.mount_program=") + if hasPrefix { + config.Storage.Options.MountProgram = program } } @@ -1035,10 +1159,6 @@ func ExitCode(err error) int { return 126 } -func GetIdentityPath(name string) string { - return filepath.Join(homedir.Get(), ".ssh", name) -} - func Tmpdir() string { tmpdir := os.Getenv("TMPDIR") if tmpdir == "" { @@ -1212,10 +1332,10 @@ func ParseRestartPolicy(policy string) (string, uint, error) { return policyType, retriesUint, nil } -// ConvertTimeout converts negative timeout to MaxInt, which indicates approximately infinity, waiting to stop containers +// ConvertTimeout converts negative timeout to MaxUint32, which indicates approximately infinity, waiting to stop containers func ConvertTimeout(timeout int) uint { if timeout < 0 { - return math.MaxInt + return math.MaxUint32 } return uint(timeout) } diff --git a/vendor/github.com/containers/podman/v4/pkg/util/utils_darwin.go b/vendor/github.com/containers/podman/v5/pkg/util/utils_darwin.go similarity index 92% rename from vendor/github.com/containers/podman/v4/pkg/util/utils_darwin.go rename to vendor/github.com/containers/podman/v5/pkg/util/utils_darwin.go index 3a2e587df..adefa703c 100644 --- a/vendor/github.com/containers/podman/v4/pkg/util/utils_darwin.go +++ b/vendor/github.com/containers/podman/v5/pkg/util/utils_darwin.go @@ -1,5 +1,4 @@ //go:build darwin -// +build darwin package util diff --git a/vendor/github.com/containers/podman/v4/pkg/util/utils_freebsd.go b/vendor/github.com/containers/podman/v5/pkg/util/utils_freebsd.go similarity index 96% rename from vendor/github.com/containers/podman/v4/pkg/util/utils_freebsd.go rename to vendor/github.com/containers/podman/v5/pkg/util/utils_freebsd.go index 621bb436e..3da132821 100644 --- a/vendor/github.com/containers/podman/v4/pkg/util/utils_freebsd.go +++ b/vendor/github.com/containers/podman/v5/pkg/util/utils_freebsd.go @@ -1,5 +1,4 @@ //go:build freebsd -// +build freebsd package util diff --git a/vendor/github.com/containers/podman/v4/pkg/util/utils_linux.go b/vendor/github.com/containers/podman/v5/pkg/util/utils_linux.go similarity index 98% rename from vendor/github.com/containers/podman/v4/pkg/util/utils_linux.go rename to vendor/github.com/containers/podman/v5/pkg/util/utils_linux.go index 3cb080481..eadc8d3ba 100644 --- a/vendor/github.com/containers/podman/v4/pkg/util/utils_linux.go +++ b/vendor/github.com/containers/podman/v5/pkg/util/utils_linux.go @@ -10,8 +10,8 @@ import ( "strings" "syscall" - "github.com/containers/podman/v4/libpod/define" - "github.com/containers/podman/v4/pkg/rootless" + "github.com/containers/podman/v5/libpod/define" + "github.com/containers/podman/v5/pkg/rootless" "github.com/containers/psgo" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" diff --git a/vendor/github.com/containers/podman/v5/pkg/util/utils_supported.go b/vendor/github.com/containers/podman/v5/pkg/util/utils_supported.go new file mode 100644 index 000000000..024c93dac --- /dev/null +++ b/vendor/github.com/containers/podman/v5/pkg/util/utils_supported.go @@ -0,0 +1,39 @@ +//go:build !windows + +package util + +// TODO once rootless function is consolidated under libpod, we +// should work to take darwin from this + +import ( + "path/filepath" + + "github.com/containers/podman/v5/pkg/rootless" + "github.com/containers/storage/pkg/homedir" +) + +// GetRootlessRuntimeDir returns the runtime directory when running as non root +func GetRootlessRuntimeDir() (string, error) { + if !rootless.IsRootless() { + return "", nil + } + return homedir.GetRuntimeDir() +} + +// GetRootlessConfigHomeDir returns the config home directory when running as non root +func GetRootlessConfigHomeDir() (string, error) { + return homedir.GetConfigHome() +} + +// GetRootlessPauseProcessPidPath returns the path to the file that holds the pid for +// the pause process. +func GetRootlessPauseProcessPidPath() (string, error) { + runtimeDir, err := GetRootlessRuntimeDir() + if err != nil { + return "", err + } + // Note this path must be kept in sync with pkg/rootless/rootless_linux.go + // We only want a single pause process per user, so we do not want to use + // the tmpdir which can be changed via --tmpdir. + return filepath.Join(runtimeDir, "libpod", "tmp", "pause.pid"), nil +} diff --git a/vendor/github.com/containers/podman/v4/pkg/util/utils_unsupported.go b/vendor/github.com/containers/podman/v5/pkg/util/utils_unsupported.go similarity index 88% rename from vendor/github.com/containers/podman/v4/pkg/util/utils_unsupported.go rename to vendor/github.com/containers/podman/v5/pkg/util/utils_unsupported.go index 26fb7adf9..8a77e189d 100644 --- a/vendor/github.com/containers/podman/v4/pkg/util/utils_unsupported.go +++ b/vendor/github.com/containers/podman/v5/pkg/util/utils_unsupported.go @@ -1,5 +1,4 @@ //go:build darwin || windows || freebsd -// +build darwin windows freebsd package util diff --git a/vendor/github.com/containers/podman/v4/pkg/util/utils_windows.go b/vendor/github.com/containers/podman/v5/pkg/util/utils_windows.go similarity index 92% rename from vendor/github.com/containers/podman/v4/pkg/util/utils_windows.go rename to vendor/github.com/containers/podman/v5/pkg/util/utils_windows.go index 1e48eb572..0fbe9c260 100644 --- a/vendor/github.com/containers/podman/v4/pkg/util/utils_windows.go +++ b/vendor/github.com/containers/podman/v5/pkg/util/utils_windows.go @@ -1,5 +1,4 @@ //go:build windows -// +build windows package util @@ -30,8 +29,8 @@ func GetRootlessPauseProcessPidPath() (string, error) { return "", fmt.Errorf("GetRootlessPauseProcessPidPath: %w", errNotImplemented) } -// GetRuntimeDir returns the runtime directory -func GetRuntimeDir() (string, error) { +// GetRootlessRuntimeDir returns the runtime directory +func GetRootlessRuntimeDir() (string, error) { data, err := homedir.GetDataHome() if err != nil { return "", err diff --git a/vendor/github.com/containers/podman/v4/utils/ports.go b/vendor/github.com/containers/podman/v5/utils/ports.go similarity index 100% rename from vendor/github.com/containers/podman/v4/utils/ports.go rename to vendor/github.com/containers/podman/v5/utils/ports.go diff --git a/vendor/github.com/containers/podman/v4/utils/utils.go b/vendor/github.com/containers/podman/v5/utils/utils.go similarity index 57% rename from vendor/github.com/containers/podman/v4/utils/utils.go rename to vendor/github.com/containers/podman/v5/utils/utils.go index f73672c7a..bfca83914 100644 --- a/vendor/github.com/containers/podman/v4/utils/utils.go +++ b/vendor/github.com/containers/podman/v5/utils/utils.go @@ -2,20 +2,16 @@ package utils import ( "bytes" - "crypto/rand" "fmt" "io" "os" "os/exec" "strconv" "strings" - "sync" "time" - "github.com/containers/common/pkg/cgroups" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/chrootarchive" - "github.com/godbus/dbus/v5" "github.com/sirupsen/logrus" "github.com/vbauerster/mpb/v8" "github.com/vbauerster/mpb/v8/decor" @@ -60,7 +56,7 @@ func UntarToFileSystem(dest string, tarball *os.File, options *archive.TarOption return archive.Untar(tarball, dest, options) } -// Creates a new tar file and wrties bytes from io.ReadCloser +// Creates a new tar file and writes bytes from io.ReadCloser func CreateTarFromSrc(source string, dest string) error { file, err := os.Create(dest) if err != nil { @@ -77,6 +73,7 @@ func TarToFilesystem(source string, tarball *os.File) error { if err != nil { return err } + defer tb.Close() _, err = io.Copy(tarball, tb) if err != nil { return err @@ -98,6 +95,7 @@ func TarChrootToFilesystem(source string, tarball *os.File) error { if err != nil { return err } + defer tb.Close() _, err = io.Copy(tarball, tb) if err != nil { return err @@ -131,121 +129,6 @@ func RemoveScientificNotationFromFloat(x float64) (float64, error) { return result, nil } -var ( - runsOnSystemdOnce sync.Once - runsOnSystemd bool -) - -// RunsOnSystemd returns whether the system is using systemd -func RunsOnSystemd() bool { - runsOnSystemdOnce.Do(func() { - // per sd_booted(3), check for this dir - fd, err := os.Stat("/run/systemd/system") - runsOnSystemd = err == nil && fd.IsDir() - }) - return runsOnSystemd -} - -func moveProcessPIDFileToScope(pidPath, slice, scope string) error { - data, err := os.ReadFile(pidPath) - if err != nil { - // do not raise an error if the file doesn't exist - if os.IsNotExist(err) { - return nil - } - return fmt.Errorf("cannot read pid file: %w", err) - } - pid, err := strconv.ParseUint(string(data), 10, 0) - if err != nil { - return fmt.Errorf("cannot parse pid file %s: %w", pidPath, err) - } - - return moveProcessToScope(int(pid), slice, scope) -} - -func moveProcessToScope(pid int, slice, scope string) error { - err := RunUnderSystemdScope(pid, slice, scope) - // If the PID is not valid anymore, do not return an error. - if dbusErr, ok := err.(dbus.Error); ok { - if dbusErr.Name == "org.freedesktop.DBus.Error.UnixProcessIdUnknown" { - return nil - } - } - return err -} - -// MoveRootlessNetnsSlirpProcessToUserSlice moves the slirp4netns process for the rootless netns -// into a different scope so that systemd does not kill it with a container. -func MoveRootlessNetnsSlirpProcessToUserSlice(pid int) error { - randBytes := make([]byte, 4) - _, err := rand.Read(randBytes) - if err != nil { - return err - } - return moveProcessToScope(pid, "user.slice", fmt.Sprintf("rootless-netns-%x.scope", randBytes)) -} - -// MovePauseProcessToScope moves the pause process used for rootless mode to keep the namespaces alive to -// a separate scope. -func MovePauseProcessToScope(pausePidPath string) { - var err error - - for i := 0; i < 10; i++ { - randBytes := make([]byte, 4) - _, err = rand.Read(randBytes) - if err != nil { - logrus.Errorf("failed to read random bytes: %v", err) - continue - } - err = moveProcessPIDFileToScope(pausePidPath, "user.slice", fmt.Sprintf("podman-pause-%x.scope", randBytes)) - if err == nil { - return - } - } - - if err != nil { - unified, err2 := cgroups.IsCgroup2UnifiedMode() - if err2 != nil { - logrus.Warnf("Failed to detect if running with cgroup unified: %v", err) - } - if RunsOnSystemd() && unified { - logrus.Warnf("Failed to add pause process to systemd sandbox cgroup: %v", err) - } else { - logrus.Debugf("Failed to add pause process to systemd sandbox cgroup: %v", err) - } - } -} - -var ( - maybeMoveToSubCgroupSync sync.Once - maybeMoveToSubCgroupSyncErr error -) - -// MaybeMoveToSubCgroup moves the current process in a sub cgroup when -// it is running in the root cgroup on a system that uses cgroupv2. -func MaybeMoveToSubCgroup() error { - maybeMoveToSubCgroupSync.Do(func() { - unifiedMode, err := cgroups.IsCgroup2UnifiedMode() - if err != nil { - maybeMoveToSubCgroupSyncErr = err - return - } - if !unifiedMode { - maybeMoveToSubCgroupSyncErr = nil - return - } - cgroup, err := GetOwnCgroup() - if err != nil { - maybeMoveToSubCgroupSyncErr = err - return - } - if cgroup == "/" { - maybeMoveToSubCgroupSyncErr = MoveUnderCgroupSubtree("init") - } - }) - return maybeMoveToSubCgroupSyncErr -} - // GuardedRemoveAll functions much like os.RemoveAll but // will not delete certain catastrophic paths. func GuardedRemoveAll(path string) error { diff --git a/vendor/github.com/containers/podman/v4/version/rawversion/version.go b/vendor/github.com/containers/podman/v5/version/rawversion/version.go similarity index 90% rename from vendor/github.com/containers/podman/v4/version/rawversion/version.go rename to vendor/github.com/containers/podman/v5/version/rawversion/version.go index fd3552c46..db7d47464 100644 --- a/vendor/github.com/containers/podman/v4/version/rawversion/version.go +++ b/vendor/github.com/containers/podman/v5/version/rawversion/version.go @@ -7,4 +7,4 @@ package rawversion // // NOTE: remember to bump the version at the top of the top-level README.md // file when this is bumped. -const RawVersion = "4.9.3" +const RawVersion = "5.0.0" diff --git a/vendor/github.com/containers/podman/v4/version/version.go b/vendor/github.com/containers/podman/v5/version/version.go similarity index 96% rename from vendor/github.com/containers/podman/v4/version/version.go rename to vendor/github.com/containers/podman/v5/version/version.go index cf97e0229..5bd86b1af 100644 --- a/vendor/github.com/containers/podman/v4/version/version.go +++ b/vendor/github.com/containers/podman/v5/version/version.go @@ -2,7 +2,7 @@ package version import ( "github.com/blang/semver/v4" - "github.com/containers/podman/v4/version/rawversion" + "github.com/containers/podman/v5/version/rawversion" ) type ( diff --git a/vendor/github.com/containers/psgo/README.md b/vendor/github.com/containers/psgo/README.md index e54423ca7..aa26c170f 100644 --- a/vendor/github.com/containers/psgo/README.md +++ b/vendor/github.com/containers/psgo/README.md @@ -1,4 +1,4 @@ -[![GoDoc](https://godoc.org/github.com/containers/psgo?status.svg)](https://godoc.org/github.com/containers/psgo) [![Build Status](https://travis-ci.org/containers/psgo.svg?branch=master)](https://travis-ci.org/containers/psgo) +[![GoDoc](https://godoc.org/github.com/containers/psgo?status.svg)](https://godoc.org/github.com/containers/psgo) # psgo A ps(1) AIX-format compatible golang library extended with various descriptors useful for displaying container-related data. diff --git a/vendor/github.com/containers/psgo/internal/proc/status.go b/vendor/github.com/containers/psgo/internal/proc/status.go index 1d2247cbd..1b60974a0 100644 --- a/vendor/github.com/containers/psgo/internal/proc/status.go +++ b/vendor/github.com/containers/psgo/internal/proc/status.go @@ -156,6 +156,9 @@ type Status struct { // provided only if the kernel was built with the CONFIG_SECCOMP kernel // configu- ration option enabled. Seccomp string + // SeccompFilters: Amount of filters attached to the process. + // (since Linux 5.9) + SeccompFilters string // Cpus_allowed: Mask of CPUs on which this process may run // (since Linux 2.6.24, see cpuset(7)). CpusAllowed string @@ -379,6 +382,8 @@ func parseStatus(pid string, lines []string) (*Status, error) { s.NoNewPrivs = fields[1] case "Seccomp:": s.Seccomp = fields[1] + case "Seccomp_filters:": + s.SeccompFilters = fields[1] case "Cpus_allowed:": s.CpusAllowed = fields[1] case "Cpus_allowed_list:": diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml index c41dd5da2..13bc20e7e 100644 --- a/vendor/github.com/containers/storage/.cirrus.yml +++ b/vendor/github.com/containers/storage/.cirrus.yml @@ -17,13 +17,13 @@ env: #### #### Cache-image names to test with (double-quotes around names are critical) ### - FEDORA_NAME: "fedora-39ß" + FEDORA_NAME: "fedora-39" DEBIAN_NAME: "debian-13" # GCE project where images live IMAGE_PROJECT: "libpod-218412" # VM Image built in containers/automation_images - IMAGE_SUFFIX: "c20231004t194547z-f39f38d13" + IMAGE_SUFFIX: "c20240102t155643z-f39f38d13" FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}" @@ -167,7 +167,7 @@ vendor_task: cross_task: container: - image: golang:1.19 + image: golang:1.20 build_script: make cross @@ -181,6 +181,6 @@ success_task: - vendor - cross container: - image: golang:1.19 + image: golang:1.20 clone_script: 'mkdir -p "$CIRRUS_WORKING_DIR"' # Source code not needed script: /bin/true diff --git a/vendor/github.com/containers/storage/Makefile b/vendor/github.com/containers/storage/Makefile index 77189d49e..8461c0901 100644 --- a/vendor/github.com/containers/storage/Makefile +++ b/vendor/github.com/containers/storage/Makefile @@ -41,7 +41,7 @@ containers-storage: ## build using gc on the host $(GO) build -compiler gc $(BUILDFLAGS) ./cmd/containers-storage codespell: - codespell -S Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L worl,flate,uint,iff,od,ERRO -w + codespell -S Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L plack,worl,flate,uint,iff,od,ERRO -w binary local-binary: containers-storage diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION index ba0a71911..3f4830156 100644 --- a/vendor/github.com/containers/storage/VERSION +++ b/vendor/github.com/containers/storage/VERSION @@ -1 +1 @@ -1.51.0 +1.53.0 diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go index ab32d652e..aa99fdead 100644 --- a/vendor/github.com/containers/storage/drivers/driver.go +++ b/vendor/github.com/containers/storage/drivers/driver.go @@ -73,6 +73,13 @@ type ApplyDiffOpts struct { ForceMask *os.FileMode } +// ApplyDiffWithDifferOpts contains optional arguments for ApplyDiffWithDiffer methods. +type ApplyDiffWithDifferOpts struct { + ApplyDiffOpts + + Flags map[string]interface{} +} + // InitFunc initializes the storage driver. type InitFunc func(homedir string, options Options) (Driver, error) @@ -189,6 +196,8 @@ type DriverWithDifferOutput struct { BigData map[string][]byte TarSplit []byte TOCDigest digest.Digest + // RootDirMode is the mode of the root directory of the layer, if specified. + RootDirMode *os.FileMode // Artifacts is a collection of additional artifacts // generated by the differ that the storage driver can use. Artifacts map[string]interface{} @@ -205,10 +214,26 @@ const ( DifferOutputFormatFlat ) +type DifferFsVerity int + +const ( + // DifferFsVerityDisabled means no fs-verity is used + DifferFsVerityDisabled = iota + + // DifferFsVerityEnabled means fs-verity is used when supported + DifferFsVerityEnabled + + // DifferFsVerityRequired means fs-verity is required + DifferFsVerityRequired +) + // DifferOptions overrides how the differ work type DifferOptions struct { // Format defines the destination directory layout format Format DifferOutputFormat + + // UseFsVerity defines whether fs-verity is used + UseFsVerity DifferFsVerity } // Differ defines the interface for using a custom differ. @@ -223,9 +248,9 @@ type DriverWithDiffer interface { Driver // ApplyDiffWithDiffer applies the changes using the callback function. // If id is empty, then a staging directory is created. The staging directory is guaranteed to be usable with ApplyDiffFromStagingDirectory. - ApplyDiffWithDiffer(id, parent string, options *ApplyDiffOpts, differ Differ) (output DriverWithDifferOutput, err error) - // ApplyDiffFromStagingDirectory applies the changes using the specified staging directory. - ApplyDiffFromStagingDirectory(id, parent, stagingDirectory string, diffOutput *DriverWithDifferOutput, options *ApplyDiffOpts) error + ApplyDiffWithDiffer(id, parent string, options *ApplyDiffWithDifferOpts, differ Differ) (output DriverWithDifferOutput, err error) + // ApplyDiffFromStagingDirectory applies the changes using the diffOutput target directory. + ApplyDiffFromStagingDirectory(id, parent string, diffOutput *DriverWithDifferOutput, options *ApplyDiffWithDifferOpts) error // CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors CleanupStagingDirectory(stagingDirectory string) error // DifferTarget gets the location where files are stored for the layer. diff --git a/vendor/github.com/containers/storage/drivers/overlay/composefs_supported.go b/vendor/github.com/containers/storage/drivers/overlay/composefs.go similarity index 63% rename from vendor/github.com/containers/storage/drivers/overlay/composefs_supported.go rename to vendor/github.com/containers/storage/drivers/overlay/composefs.go index 26dd36866..baa9d7bef 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/composefs_supported.go +++ b/vendor/github.com/containers/storage/drivers/overlay/composefs.go @@ -1,5 +1,5 @@ -//go:build linux && composefs && cgo -// +build linux,composefs,cgo +//go:build linux && cgo +// +build linux,cgo package overlay @@ -7,15 +7,13 @@ import ( "encoding/binary" "errors" "fmt" - "io/fs" "os" "os/exec" "path/filepath" "sync" - "syscall" - "unsafe" "github.com/containers/storage/pkg/chunked/dump" + "github.com/containers/storage/pkg/fsverity" "github.com/containers/storage/pkg/loopback" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" @@ -34,77 +32,6 @@ func getComposeFsHelper() (string, error) { return composeFsHelperPath, composeFsHelperErr } -func composeFsSupported() bool { - _, err := getComposeFsHelper() - return err == nil -} - -func enableVerity(description string, fd int) error { - enableArg := unix.FsverityEnableArg{ - Version: 1, - Hash_algorithm: unix.FS_VERITY_HASH_ALG_SHA256, - Block_size: 4096, - } - - _, _, e1 := syscall.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.FS_IOC_ENABLE_VERITY), uintptr(unsafe.Pointer(&enableArg))) - if e1 != 0 && !errors.Is(e1, unix.EEXIST) { - return fmt.Errorf("failed to enable verity for %q: %w", description, e1) - } - return nil -} - -type verityDigest struct { - Fsv unix.FsverityDigest - Buf [64]byte -} - -func measureVerity(description string, fd int) (string, error) { - var digest verityDigest - digest.Fsv.Size = 64 - _, _, e1 := syscall.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.FS_IOC_MEASURE_VERITY), uintptr(unsafe.Pointer(&digest))) - if e1 != 0 { - return "", fmt.Errorf("failed to measure verity for %q: %w", description, e1) - } - return fmt.Sprintf("%x", digest.Buf[:digest.Fsv.Size]), nil -} - -func enableVerityRecursive(root string) (map[string]string, error) { - digests := make(map[string]string) - walkFn := func(path string, d fs.DirEntry, err error) error { - if err != nil { - return err - } - if !d.Type().IsRegular() { - return nil - } - - f, err := os.Open(path) - if err != nil { - return err - } - defer f.Close() - - if err := enableVerity(path, int(f.Fd())); err != nil { - return err - } - - verity, err := measureVerity(path, int(f.Fd())) - if err != nil { - return err - } - - relPath, err := filepath.Rel(root, path) - if err != nil { - return err - } - - digests[relPath] = verity - return nil - } - err := filepath.WalkDir(root, walkFn) - return digests, err -} - func getComposefsBlob(dataDir string) string { return filepath.Join(dataDir, "composefs.blob") } @@ -156,7 +83,7 @@ func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, com return err } - if err := enableVerity("manifest file", int(newFd.Fd())); err != nil && !errors.Is(err, unix.ENOTSUP) && !errors.Is(err, unix.ENOTTY) { + if err := fsverity.EnableVerity("manifest file", int(newFd.Fd())); err != nil && !errors.Is(err, unix.ENOTSUP) && !errors.Is(err, unix.ENOTTY) { logrus.Warningf("%s", err) } diff --git a/vendor/github.com/containers/storage/drivers/overlay/composefs_notsupported.go b/vendor/github.com/containers/storage/drivers/overlay/composefs_notsupported.go deleted file mode 100644 index 347e4d35c..000000000 --- a/vendor/github.com/containers/storage/drivers/overlay/composefs_notsupported.go +++ /dev/null @@ -1,24 +0,0 @@ -//go:build !linux || !composefs || !cgo -// +build !linux !composefs !cgo - -package overlay - -import ( - "fmt" -) - -func composeFsSupported() bool { - return false -} - -func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, composefsDir string) error { - return fmt.Errorf("composefs is not supported") -} - -func mountComposefsBlob(dataDir, mountPoint string) error { - return fmt.Errorf("composefs is not supported") -} - -func enableVerityRecursive(path string) (map[string]string, error) { - return nil, fmt.Errorf("composefs is not supported") -} diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go index 04ecf871f..f007aa943 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -82,7 +82,8 @@ const ( lowerFile = "lower" maxDepth = 500 - tocArtifact = "toc" + tocArtifact = "toc" + fsVerityDigestsArtifact = "fs-verity-digests" // idLength represents the number of random characters // which can be used to create the unique link identifier @@ -105,6 +106,7 @@ type overlayOptions struct { mountOptions string ignoreChownErrors bool forceMask *os.FileMode + useComposefs bool } // Driver contains information about the home directory and the list of active mounts that are created using this driver. @@ -122,6 +124,7 @@ type Driver struct { supportsDType bool supportsVolatile *bool usingMetacopy bool + usingComposefs bool supportsIDMappedMounts *bool } @@ -293,7 +296,7 @@ func isNetworkFileSystem(fsMagic graphdriver.FsMagic) bool { // a bunch of network file systems... case graphdriver.FsMagicNfsFs, graphdriver.FsMagicSmbFs, graphdriver.FsMagicAcfs, graphdriver.FsMagicAfs, graphdriver.FsMagicCephFs, graphdriver.FsMagicCIFS, - graphdriver.FsMagicFHGFSFs, graphdriver.FsMagicGPFS, graphdriver.FsMagicIBRIX, + graphdriver.FsMagicGPFS, graphdriver.FsMagicIBRIX, graphdriver.FsMagicKAFS, graphdriver.FsMagicLUSTRE, graphdriver.FsMagicNCP, graphdriver.FsMagicNFSD, graphdriver.FsMagicOCFS2, graphdriver.FsMagicPANFS, graphdriver.FsMagicPRLFS, graphdriver.FsMagicSMB2, graphdriver.FsMagicSNFS, @@ -307,16 +310,6 @@ func isNetworkFileSystem(fsMagic graphdriver.FsMagic) bool { // If overlay filesystem is not supported on the host, a wrapped graphdriver.ErrNotSupported is returned as error. // If an overlay filesystem is not supported over an existing filesystem then a wrapped graphdriver.ErrIncompatibleFS is returned. func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) { - // If custom --imagestore is selected never - // ditch the original graphRoot, instead add it as - // additionalImageStore so its images can still be - // read and used. - if options.ImageStore != "" { - graphRootAsAdditionalStore := fmt.Sprintf("AdditionalImageStore=%s", options.ImageStore) - options.DriverOptions = append(options.DriverOptions, graphRootAsAdditionalStore) - // complete base name with driver name included - options.ImageStore = filepath.Join(options.ImageStore, "overlay") - } opts, err := parseOptions(options.DriverOptions) if err != nil { return nil, err @@ -387,6 +380,22 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) } } + if opts.useComposefs { + if unshare.IsRootless() { + return nil, fmt.Errorf("composefs is not supported in user namespaces") + } + supportsDataOnly, err := supportsDataOnlyLayersCached(home, runhome) + if err != nil { + return nil, err + } + if !supportsDataOnly { + return nil, fmt.Errorf("composefs is not supported on this kernel: %w", graphdriver.ErrIncompatibleFS) + } + if _, err := getComposeFsHelper(); err != nil { + return nil, fmt.Errorf("composefs helper program not found: %w", err) + } + } + var usingMetacopy bool var supportsDType bool var supportsVolatile *bool @@ -448,6 +457,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) supportsDType: supportsDType, usingMetacopy: usingMetacopy, supportsVolatile: supportsVolatile, + usingComposefs: opts.useComposefs, options: *opts, } @@ -555,6 +565,12 @@ func parseOptions(options []string) (*overlayOptions, error) { withReference: withReference, }) } + case "use_composefs": + logrus.Debugf("overlay: use_composefs=%s", val) + o.useComposefs, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } case "mount_program": logrus.Debugf("overlay: mount_program=%s", val) if val != "" { @@ -782,7 +798,7 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI } func (d *Driver) useNaiveDiff() bool { - if d.useComposeFs() { + if d.usingComposefs { return true } @@ -837,22 +853,15 @@ func (d *Driver) Status() [][2]string { // Metadata returns meta data about the overlay driver such as // LowerDir, UpperDir, WorkDir and MergeDir used to store data. func (d *Driver) Metadata(id string) (map[string]string, error) { - dir, imagestore, _ := d.dir2(id) + dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return nil, err } - workDirBase := dir - if imagestore != "" { - if _, err := os.Stat(dir); err != nil { - return nil, err - } - workDirBase = imagestore - } metadata := map[string]string{ - "WorkDir": path.Join(workDirBase, "work"), - "MergedDir": path.Join(workDirBase, "merged"), - "UpperDir": path.Join(workDirBase, "diff"), + "WorkDir": path.Join(dir, "work"), + "MergedDir": path.Join(dir, "merged"), + "UpperDir": path.Join(dir, "diff"), } lowerDirs, err := d.getLowerDirs(id) @@ -870,7 +879,7 @@ func (d *Driver) Metadata(id string) (map[string]string, error) { // is being shutdown. For now, we just have to unmount the bind mounted // we had created. func (d *Driver) Cleanup() error { - _ = os.RemoveAll(d.getStagingDir()) + _ = os.RemoveAll(filepath.Join(d.home, stagingDir)) return mount.Unmount(d.home) } @@ -966,8 +975,10 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr return d.create(id, parent, opts, true) } -func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disableQuota bool) (retErr error) { - dir, imageStore, _ := d.dir2(id) +func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, readOnly bool) (retErr error) { + dir, homedir, _ := d.dir2(id, readOnly) + + disableQuota := readOnly uidMaps := d.uidMaps gidMaps := d.gidMaps @@ -978,7 +989,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable } // Make the link directory if it does not exist - if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0o755, 0, 0); err != nil { + if err := idtools.MkdirAllAs(path.Join(homedir, linkDir), 0o755, 0, 0); err != nil { return err } @@ -995,20 +1006,8 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable if err := idtools.MkdirAllAndChownNew(path.Dir(dir), 0o755, idPair); err != nil { return err } - workDirBase := dir - if imageStore != "" { - workDirBase = imageStore - if err := idtools.MkdirAllAndChownNew(path.Dir(imageStore), 0o755, idPair); err != nil { - return err - } - } if parent != "" { - parentBase, parentImageStore, inAdditionalStore := d.dir2(parent) - // If parentBase path is additional image store, select the image contained in parentBase. - // See https://github.com/containers/podman/issues/19748 - if parentImageStore != "" && !inAdditionalStore { - parentBase = parentImageStore - } + parentBase := d.dir(parent) st, err := system.Stat(filepath.Join(parentBase, "diff")) if err != nil { return err @@ -1029,11 +1028,6 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable if err := idtools.MkdirAllAndChownNew(dir, 0o700, idPair); err != nil { return err } - if imageStore != "" { - if err := idtools.MkdirAllAndChownNew(imageStore, 0o700, idPair); err != nil { - return err - } - } defer func() { // Clean up on failure @@ -1041,11 +1035,6 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable if err2 := os.RemoveAll(dir); err2 != nil { logrus.Errorf("While recovering from a failure creating a layer, error deleting %#v: %v", dir, err2) } - if imageStore != "" { - if err2 := os.RemoveAll(workDirBase); err2 != nil { - logrus.Errorf("While recovering from a failure creating a layer, error deleting %#v: %v", workDirBase, err2) - } - } } }() @@ -1068,11 +1057,6 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable if err := d.quotaCtl.SetQuota(dir, quota); err != nil { return err } - if imageStore != "" { - if err := d.quotaCtl.SetQuota(imageStore, quota); err != nil { - return err - } - } } perms := defaultPerms @@ -1081,12 +1065,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable } if parent != "" { - parentBase, parentImageStore, inAdditionalStore := d.dir2(parent) - // If parentBase path is additional image store, select the image contained in parentBase. - // See https://github.com/containers/podman/issues/19748 - if parentImageStore != "" && !inAdditionalStore { - parentBase = parentImageStore - } + parentBase := d.dir(parent) st, err := system.Stat(filepath.Join(parentBase, "diff")) if err != nil { return err @@ -1094,17 +1073,14 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable perms = os.FileMode(st.Mode()) } - if err := idtools.MkdirAs(path.Join(workDirBase, "diff"), perms, rootUID, rootGID); err != nil { + if err := idtools.MkdirAs(path.Join(dir, "diff"), perms, rootUID, rootGID); err != nil { return err } lid := generateID(idLength) linkBase := path.Join("..", id, "diff") - if imageStore != "" { - linkBase = path.Join(imageStore, "diff") - } - if err := os.Symlink(linkBase, path.Join(d.home, linkDir, lid)); err != nil { + if err := os.Symlink(linkBase, path.Join(homedir, linkDir, lid)); err != nil { return err } @@ -1113,10 +1089,10 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable return err } - if err := idtools.MkdirAs(path.Join(workDirBase, "work"), 0o700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAs(path.Join(dir, "work"), 0o700, rootUID, rootGID); err != nil { return err } - if err := idtools.MkdirAs(path.Join(workDirBase, "merged"), 0o700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAs(path.Join(dir, "merged"), 0o700, rootUID, rootGID); err != nil { return err } @@ -1198,26 +1174,39 @@ func (d *Driver) getLower(parent string) (string, error) { } func (d *Driver) dir(id string) string { - p, _, _ := d.dir2(id) + p, _, _ := d.dir2(id, false) return p } -func (d *Driver) dir2(id string) (string, string, bool) { - newpath := path.Join(d.home, id) - imageStore := "" +func (d *Driver) getAllImageStores() []string { + additionalImageStores := d.AdditionalImageStores() if d.imageStore != "" { - imageStore = path.Join(d.imageStore, id) + additionalImageStores = append([]string{d.imageStore}, additionalImageStores...) + } + return additionalImageStores +} + +func (d *Driver) dir2(id string, useImageStore bool) (string, string, bool) { + var homedir string + + if useImageStore && d.imageStore != "" { + homedir = path.Join(d.imageStore, d.name) + } else { + homedir = d.home } + + newpath := path.Join(homedir, id) + if _, err := os.Stat(newpath); err != nil { - for _, p := range d.AdditionalImageStores() { + for _, p := range d.getAllImageStores() { l := path.Join(p, d.name, id) _, err = os.Stat(l) if err == nil { - return l, imageStore, true + return l, homedir, true } } } - return newpath, imageStore, false + return newpath, homedir, false } func (d *Driver) getLowerDirs(id string) ([]string, error) { @@ -1427,14 +1416,11 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { } func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountOpts) (_ string, retErr error) { - dir, imageStore, inAdditionalStore := d.dir2(id) + dir, _, inAdditionalStore := d.dir2(id, false) if _, err := os.Stat(dir); err != nil { return "", err } - workDirBase := dir - if imageStore != "" { - workDirBase = imageStore - } + readWrite := !inAdditionalStore if !d.SupportsShifting() || options.DisableShifting { @@ -1539,7 +1525,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO }() composeFsLayers := []string{} - composeFsLayersDir := filepath.Join(workDirBase, "composefs-layers") + composeFsLayersDir := filepath.Join(dir, "composefs-layers") maybeAddComposefsMount := func(lowerID string, i int, readWrite bool) (string, error) { composefsBlob := d.getComposefsData(lowerID) _, err = os.Stat(composefsBlob) @@ -1573,7 +1559,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO return dest, nil } - diffDir := path.Join(workDirBase, "diff") + diffDir := path.Join(dir, "diff") if dest, err := maybeAddComposefsMount(id, 0, readWrite); err != nil { return "", err @@ -1591,7 +1577,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO lower := "" newpath := path.Join(d.home, l) if st, err := os.Stat(newpath); err != nil { - for _, p := range d.AdditionalImageStores() { + for _, p := range d.getAllImageStores() { lower = path.Join(p, d.name, l) if st2, err2 := os.Stat(lower); err2 == nil { if !permsKnown { @@ -1659,21 +1645,27 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO optsList = append(optsList, "metacopy=on", "redirect_dir=on") } - if len(absLowers) == 0 { - absLowers = append(absLowers, path.Join(dir, "empty")) - } - // user namespace requires this to move a directory from lower to upper. rootUID, rootGID, err := idtools.GetRootUIDGID(options.UidMaps, options.GidMaps) if err != nil { return "", err } + if len(absLowers) == 0 { + absLowers = append(absLowers, path.Join(dir, "empty")) + } + if err := idtools.MkdirAllAs(diffDir, perms, rootUID, rootGID); err != nil { - return "", err + if !inAdditionalStore { + return "", err + } + // if it is in an additional store, do not fail if the directory already exists + if _, err2 := os.Stat(diffDir); err2 != nil { + return "", err + } } - mergedDir := path.Join(workDirBase, "merged") + mergedDir := path.Join(dir, "merged") // Create the driver merged dir if err := idtools.MkdirAs(mergedDir, 0o700, rootUID, rootGID); err != nil && !os.IsExist(err) { return "", err @@ -1691,7 +1683,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO } }() - workdir := path.Join(workDirBase, "work") + workdir := path.Join(dir, "work") if d.options.mountProgram == "" && unshare.IsRootless() { optsList = append(optsList, "userxattr") @@ -1841,7 +1833,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO // Put unmounts the mount path created for the give id. func (d *Driver) Put(id string) error { - dir := d.dir(id) + dir, _, inAdditionalStore := d.dir2(id, false) if _, err := os.Stat(dir); err != nil { return err } @@ -1902,11 +1894,27 @@ func (d *Driver) Put(id string) error { } } - if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { - logrus.Debugf("Failed to remove mountpoint %s overlay: %s - %v", id, mountpoint, err) - return fmt.Errorf("removing mount point %q: %w", mountpoint, err) - } + if !inAdditionalStore { + uid, gid := int(0), int(0) + fi, err := os.Stat(mountpoint) + if err != nil { + return err + } + if stat, ok := fi.Sys().(*syscall.Stat_t); ok { + uid, gid = int(stat.Uid), int(stat.Gid) + } + tmpMountpoint := path.Join(dir, "merged.1") + if err := idtools.MkdirAs(tmpMountpoint, 0o700, uid, gid); err != nil && !errors.Is(err, os.ErrExist) { + return err + } + // rename(2) can be used on an empty directory, as it is the mountpoint after umount, and it retains + // its atomic semantic. In this way the "merged" directory is never removed. + if err := unix.Rename(tmpMountpoint, mountpoint); err != nil { + logrus.Debugf("Failed to replace mountpoint %s overlay: %s - %v", id, mountpoint, err) + return fmt.Errorf("replacing mount point %q: %w", mountpoint, err) + } + } return nil } @@ -1994,14 +2002,18 @@ func (g *overlayFileGetter) Close() error { return nil } -func (d *Driver) getStagingDir() string { - return filepath.Join(d.home, stagingDir) +func (d *Driver) getStagingDir(id string) string { + _, homedir, _ := d.dir2(id, d.imageStore != "") + return filepath.Join(homedir, stagingDir) } // DiffGetter returns a FileGetCloser that can read files from the directory that // contains files for the layer differences, either for this layer, or one of our // lowers if we're just a template directory. Used for direct access for tar-split. func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + if d.usingComposefs { + return nil, nil + } p, err := d.getDiffPath(id) if err != nil { return nil, err @@ -2018,9 +2030,9 @@ func (d *Driver) CleanupStagingDirectory(stagingDirectory string) error { return os.RemoveAll(stagingDirectory) } -func (d *Driver) supportsDataOnlyLayers() (bool, error) { +func supportsDataOnlyLayersCached(home, runhome string) (bool, error) { feature := "dataonly-layers" - overlayCacheResult, overlayCacheText, err := cachedFeatureCheck(d.runhome, feature) + overlayCacheResult, overlayCacheText, err := cachedFeatureCheck(runhome, feature) if err == nil { if overlayCacheResult { logrus.Debugf("Cached value indicated that data-only layers for overlay are supported") @@ -2029,27 +2041,15 @@ func (d *Driver) supportsDataOnlyLayers() (bool, error) { logrus.Debugf("Cached value indicated that data-only layers for overlay are not supported") return false, errors.New(overlayCacheText) } - supportsDataOnly, err := supportsDataOnlyLayers(d.home) - if err2 := cachedFeatureRecord(d.runhome, feature, supportsDataOnly, ""); err2 != nil { + supportsDataOnly, err := supportsDataOnlyLayers(home) + if err2 := cachedFeatureRecord(runhome, feature, supportsDataOnly, ""); err2 != nil { return false, fmt.Errorf("recording overlay data-only layers support status: %w", err2) } return supportsDataOnly, err } -func (d *Driver) useComposeFs() bool { - if !composeFsSupported() || unshare.IsRootless() { - return false - } - supportsDataOnlyLayers, err := d.supportsDataOnlyLayers() - if err != nil { - logrus.Debugf("Check for data-only layers failed with: %v", err) - return false - } - return supportsDataOnlyLayers -} - // ApplyDiff applies the changes in the new layer using the specified function -func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.ApplyDiffOpts, differ graphdriver.Differ) (output graphdriver.DriverWithDifferOutput, err error) { +func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.ApplyDiffWithDifferOpts, differ graphdriver.Differ) (output graphdriver.DriverWithDifferOutput, err error) { var idMappings *idtools.IDMappings if options != nil { idMappings = options.Mappings @@ -2061,15 +2061,22 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App var applyDir string if id == "" { - err := os.MkdirAll(d.getStagingDir(), 0o700) + stagingDir := d.getStagingDir(id) + err := os.MkdirAll(stagingDir, 0o700) if err != nil && !os.IsExist(err) { return graphdriver.DriverWithDifferOutput{}, err } - applyDir, err = os.MkdirTemp(d.getStagingDir(), "") + applyDir, err = os.MkdirTemp(stagingDir, "") if err != nil { return graphdriver.DriverWithDifferOutput{}, err } - + perms := defaultPerms + if d.options.forceMask != nil { + perms = *d.options.forceMask + } + if err := os.Chmod(applyDir, perms); err != nil { + return graphdriver.DriverWithDifferOutput{}, err + } } else { var err error applyDir, err = d.getDiffPath(id) @@ -2083,8 +2090,9 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App differOptions := graphdriver.DifferOptions{ Format: graphdriver.DifferOutputFormatDir, } - if d.useComposeFs() { + if d.usingComposefs { differOptions.Format = graphdriver.DifferOutputFormatFlat + differOptions.UseFsVerity = graphdriver.DifferFsVerityEnabled } out, err := differ.ApplyDiff(applyDir, &archive.TarOptions{ UIDMaps: idMappings.UIDs(), @@ -2100,33 +2108,42 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App } // ApplyDiffFromStagingDirectory applies the changes using the specified staging directory. -func (d *Driver) ApplyDiffFromStagingDirectory(id, parent, stagingDirectory string, diffOutput *graphdriver.DriverWithDifferOutput, options *graphdriver.ApplyDiffOpts) error { - if filepath.Dir(stagingDirectory) != d.getStagingDir() { +func (d *Driver) ApplyDiffFromStagingDirectory(id, parent string, diffOutput *graphdriver.DriverWithDifferOutput, options *graphdriver.ApplyDiffWithDifferOpts) error { + stagingDirectory := diffOutput.Target + if filepath.Dir(stagingDirectory) != d.getStagingDir(id) { return fmt.Errorf("%q is not a staging directory", stagingDirectory) } + diffPath, err := d.getDiffPath(id) + if err != nil { + return err + } - if d.useComposeFs() { - // FIXME: move this logic into the differ so we don't have to open - // the file twice. - verityDigests, err := enableVerityRecursive(stagingDirectory) - if err != nil && !errors.Is(err, unix.ENOTSUP) && !errors.Is(err, unix.ENOTTY) { - logrus.Warningf("%s", err) + // If the current layer doesn't set the mode for the parent, override it with the parent layer's mode. + if d.options.forceMask == nil && diffOutput.RootDirMode == nil && parent != "" { + parentDiffPath, err := d.getDiffPath(parent) + if err != nil { + return err + } + parentSt, err := os.Stat(parentDiffPath) + if err != nil { + return err } + if err := os.Chmod(stagingDirectory, parentSt.Mode()); err != nil { + return err + } + } + + if d.usingComposefs { toc := diffOutput.Artifacts[tocArtifact] + verityDigests := diffOutput.Artifacts[fsVerityDigestsArtifact].(map[string]string) if err := generateComposeFsBlob(verityDigests, toc, d.getComposefsData(id)); err != nil { return err } } - diffPath, err := d.getDiffPath(id) - if err != nil { - return err - } if err := os.RemoveAll(diffPath); err != nil && !os.IsNotExist(err) { return err } - diffOutput.UncompressedDigest = diffOutput.TOCDigest - return os.Rename(stagingDirectory, diffPath) } @@ -2179,12 +2196,8 @@ func (d *Driver) getComposefsData(id string) string { } func (d *Driver) getDiffPath(id string) (string, error) { - dir, imagestore, _ := d.dir2(id) - base := dir - if imagestore != "" { - base = imagestore - } - return redirectDiffIfAdditionalLayer(path.Join(base, "diff")) + dir := d.dir(id) + return redirectDiffIfAdditionalLayer(path.Join(dir, "diff")) } func (d *Driver) getLowerDiffPaths(id string) ([]string, error) { @@ -2275,12 +2288,8 @@ func (d *Driver) AdditionalImageStores() []string { // by toContainer to those specified by toHost. func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error { var err error - dir, imagestore, _ := d.dir2(id) - base := dir - if imagestore != "" { - base = imagestore - } - diffDir := filepath.Join(base, "diff") + dir := d.dir(id) + diffDir := filepath.Join(dir, "diff") rootUID, rootGID := 0, 0 if toHost != nil { diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go b/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go index 2a7a307a2..d4f540c9c 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go @@ -4,6 +4,7 @@ package overlay import ( + "fmt" "path" "github.com/containers/storage/pkg/directory" @@ -15,3 +16,15 @@ import ( func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) { return directory.Usage(path.Join(d.dir(id), "diff")) } + +func getComposeFsHelper() (string, error) { + return "", fmt.Errorf("composefs not supported on this build") +} + +func mountComposefsBlob(dataDir, mountPoint string) error { + return fmt.Errorf("composefs not supported on this build") +} + +func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, composefsDir string) error { + return fmt.Errorf("composefs not supported on this build") +} diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota.go b/vendor/github.com/containers/storage/drivers/quota/projectquota.go index 2be79698d..081fb2b10 100644 --- a/vendor/github.com/containers/storage/drivers/quota/projectquota.go +++ b/vendor/github.com/containers/storage/drivers/quota/projectquota.go @@ -1,453 +1,5 @@ -//go:build linux && !exclude_disk_quota && cgo -// +build linux,!exclude_disk_quota,cgo - -// -// projectquota.go - implements XFS project quota controls -// for setting quota limits on a newly created directory. -// It currently supports the legacy XFS specific ioctls. -// -// TODO: use generic quota control ioctl FS_IOC_FS{GET,SET}XATTR -// for both xfs/ext4 for kernel version >= v4.5 -// - package quota -/* -#include -#include -#include -#include -#include - -#ifndef FS_XFLAG_PROJINHERIT -struct fsxattr { - __u32 fsx_xflags; - __u32 fsx_extsize; - __u32 fsx_nextents; - __u32 fsx_projid; - unsigned char fsx_pad[12]; -}; -#define FS_XFLAG_PROJINHERIT 0x00000200 -#endif -#ifndef FS_IOC_FSGETXATTR -#define FS_IOC_FSGETXATTR _IOR ('X', 31, struct fsxattr) -#endif -#ifndef FS_IOC_FSSETXATTR -#define FS_IOC_FSSETXATTR _IOW ('X', 32, struct fsxattr) -#endif - -#ifndef PRJQUOTA -#define PRJQUOTA 2 -#endif -#ifndef FS_PROJ_QUOTA -#define FS_PROJ_QUOTA 2 -#endif -#ifndef Q_XSETPQLIM -#define Q_XSETPQLIM QCMD(Q_XSETQLIM, PRJQUOTA) -#endif -#ifndef Q_XGETPQUOTA -#define Q_XGETPQUOTA QCMD(Q_XGETQUOTA, PRJQUOTA) -#endif -*/ -import "C" - -import ( - "errors" - "fmt" - "math" - "os" - "path" - "path/filepath" - "sync" - "syscall" - "unsafe" - - "github.com/containers/storage/pkg/directory" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -const projectIDsAllocatedPerQuotaHome = 10000 - // BackingFsBlockDeviceLink is the name of a file that we place in // the home directory of a driver that uses this package. const BackingFsBlockDeviceLink = "backingFsBlockDev" - -// Quota limit params - currently we only control blocks hard limit and inodes -type Quota struct { - Size uint64 - Inodes uint64 -} - -// Control - Context to be used by storage driver (e.g. overlay) -// who wants to apply project quotas to container dirs -type Control struct { - backingFsBlockDev string - nextProjectID uint32 - quotas *sync.Map - basePath string -} - -// Attempt to generate a unigue projectid. Multiple directories -// per file system can have quota and they need a group of unique -// ids. This function attempts to allocate at least projectIDsAllocatedPerQuotaHome(10000) -// unique projectids, based on the inode of the basepath. -func generateUniqueProjectID(path string) (uint32, error) { - fileinfo, err := os.Stat(path) - if err != nil { - return 0, err - } - stat, ok := fileinfo.Sys().(*syscall.Stat_t) - if !ok { - return 0, fmt.Errorf("not a syscall.Stat_t %s", path) - } - projectID := projectIDsAllocatedPerQuotaHome + (stat.Ino*projectIDsAllocatedPerQuotaHome)%(math.MaxUint32-projectIDsAllocatedPerQuotaHome) - return uint32(projectID), nil -} - -// NewControl - initialize project quota support. -// Test to make sure that quota can be set on a test dir and find -// the first project id to be used for the next container create. -// -// Returns nil (and error) if project quota is not supported. -// -// First get the project id of the basePath directory. -// This test will fail if the backing fs is not xfs. -// -// xfs_quota tool can be used to assign a project id to the driver home directory, e.g.: -// echo 100000:/var/lib/containers/storage/overlay >> /etc/projects -// echo 200000:/var/lib/containers/storage/volumes >> /etc/projects -// echo storage:100000 >> /etc/projid -// echo volumes:200000 >> /etc/projid -// xfs_quota -x -c 'project -s storage volumes' / -// -// In the example above, the storage directory project id will be used as a -// "start offset" and all containers will be assigned larger project ids -// (e.g. >= 100000). Then the volumes directory project id will be used as a -// "start offset" and all volumes will be assigned larger project ids -// (e.g. >= 200000). -// This is a way to prevent xfs_quota management from conflicting with -// containers/storage. - -// Then try to create a test directory with the next project id and set a quota -// on it. If that works, continue to scan existing containers to map allocated -// project ids. -func NewControl(basePath string) (*Control, error) { - // - // Get project id of parent dir as minimal id to be used by driver - // - minProjectID, err := getProjectID(basePath) - if err != nil { - return nil, err - } - if minProjectID == 0 { - // Indicates the storage was never initialized - // Generate a unique range of Projectids for this basepath - minProjectID, err = generateUniqueProjectID(basePath) - if err != nil { - return nil, err - } - - } - // - // create backing filesystem device node - // - backingFsBlockDev, err := makeBackingFsDev(basePath) - if err != nil { - return nil, err - } - - // - // Test if filesystem supports project quotas by trying to set - // a quota on the first available project id - // - quota := Quota{ - Size: 0, - Inodes: 0, - } - - q := Control{ - backingFsBlockDev: backingFsBlockDev, - nextProjectID: minProjectID + 1, - quotas: &sync.Map{}, - basePath: basePath, - } - - if err := q.setProjectQuota(minProjectID, quota); err != nil { - return nil, err - } - - // - // get first project id to be used for next container - // - err = q.findNextProjectID() - if err != nil { - return nil, err - } - - logrus.Debugf("NewControl(%s): nextProjectID = %d", basePath, q.nextProjectID) - return &q, nil -} - -// SetQuota - assign a unique project id to directory and set the quota limits -// for that project id -func (q *Control) SetQuota(targetPath string, quota Quota) error { - var projectID uint32 - value, ok := q.quotas.Load(targetPath) - if ok { - projectID, ok = value.(uint32) - } - if !ok { - projectID = q.nextProjectID - - // - // assign project id to new container directory - // - err := setProjectID(targetPath, projectID) - if err != nil { - return err - } - - q.quotas.Store(targetPath, projectID) - q.nextProjectID++ - } - - // - // set the quota limit for the container's project id - // - logrus.Debugf("SetQuota path=%s, size=%d, inodes=%d, projectID=%d", targetPath, quota.Size, quota.Inodes, projectID) - return q.setProjectQuota(projectID, quota) -} - -// ClearQuota removes the map entry in the quotas map for targetPath. -// It does so to prevent the map leaking entries as directories are deleted. -func (q *Control) ClearQuota(targetPath string) { - q.quotas.Delete(targetPath) -} - -// setProjectQuota - set the quota for project id on xfs block device -func (q *Control) setProjectQuota(projectID uint32, quota Quota) error { - var d C.fs_disk_quota_t - d.d_version = C.FS_DQUOT_VERSION - d.d_id = C.__u32(projectID) - d.d_flags = C.FS_PROJ_QUOTA - - if quota.Size > 0 { - d.d_fieldmask = d.d_fieldmask | C.FS_DQ_BHARD | C.FS_DQ_BSOFT - d.d_blk_hardlimit = C.__u64(quota.Size / 512) - d.d_blk_softlimit = d.d_blk_hardlimit - } - if quota.Inodes > 0 { - d.d_fieldmask = d.d_fieldmask | C.FS_DQ_IHARD | C.FS_DQ_ISOFT - d.d_ino_hardlimit = C.__u64(quota.Inodes) - d.d_ino_softlimit = d.d_ino_hardlimit - } - - cs := C.CString(q.backingFsBlockDev) - defer C.free(unsafe.Pointer(cs)) - - runQuotactl := func() syscall.Errno { - _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XSETPQLIM, - uintptr(unsafe.Pointer(cs)), uintptr(d.d_id), - uintptr(unsafe.Pointer(&d)), 0, 0) - return errno - } - - errno := runQuotactl() - - // If the backingFsBlockDev does not exist any more then try to recreate it. - if errors.Is(errno, unix.ENOENT) { - if _, err := makeBackingFsDev(q.basePath); err != nil { - return fmt.Errorf( - "failed to recreate missing backingFsBlockDev %s for projid %d: %w", - q.backingFsBlockDev, projectID, err, - ) - } - - if errno := runQuotactl(); errno != 0 { - return fmt.Errorf("failed to set quota limit for projid %d on %s after backingFsBlockDev recreation: %w", - projectID, q.backingFsBlockDev, errno) - } - - } else if errno != 0 { - return fmt.Errorf("failed to set quota limit for projid %d on %s: %w", - projectID, q.backingFsBlockDev, errno) - } - - return nil -} - -// GetQuota - get the quota limits of a directory that was configured with SetQuota -func (q *Control) GetQuota(targetPath string, quota *Quota) error { - d, err := q.fsDiskQuotaFromPath(targetPath) - if err != nil { - return err - } - quota.Size = uint64(d.d_blk_hardlimit) * 512 - quota.Inodes = uint64(d.d_ino_hardlimit) - return nil -} - -// GetDiskUsage - get the current disk usage of a directory that was configured with SetQuota -func (q *Control) GetDiskUsage(targetPath string, usage *directory.DiskUsage) error { - d, err := q.fsDiskQuotaFromPath(targetPath) - if err != nil { - return err - } - usage.Size = int64(d.d_bcount) * 512 - usage.InodeCount = int64(d.d_icount) - - return nil -} - -func (q *Control) fsDiskQuotaFromPath(targetPath string) (C.fs_disk_quota_t, error) { - var d C.fs_disk_quota_t - var projectID uint32 - value, ok := q.quotas.Load(targetPath) - if ok { - projectID, ok = value.(uint32) - } - if !ok { - return d, fmt.Errorf("quota not found for path : %s", targetPath) - } - - // - // get the quota limit for the container's project id - // - cs := C.CString(q.backingFsBlockDev) - defer C.free(unsafe.Pointer(cs)) - - _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XGETPQUOTA, - uintptr(unsafe.Pointer(cs)), uintptr(C.__u32(projectID)), - uintptr(unsafe.Pointer(&d)), 0, 0) - if errno != 0 { - return d, fmt.Errorf("failed to get quota limit for projid %d on %s: %w", - projectID, q.backingFsBlockDev, errno) - } - - return d, nil -} - -// getProjectID - get the project id of path on xfs -func getProjectID(targetPath string) (uint32, error) { - dir, err := openDir(targetPath) - if err != nil { - return 0, err - } - defer closeDir(dir) - - var fsx C.struct_fsxattr - _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, - uintptr(unsafe.Pointer(&fsx))) - if errno != 0 { - return 0, fmt.Errorf("failed to get projid for %s: %w", targetPath, errno) - } - - return uint32(fsx.fsx_projid), nil -} - -// setProjectID - set the project id of path on xfs -func setProjectID(targetPath string, projectID uint32) error { - dir, err := openDir(targetPath) - if err != nil { - return err - } - defer closeDir(dir) - - var fsx C.struct_fsxattr - _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, - uintptr(unsafe.Pointer(&fsx))) - if errno != 0 { - return fmt.Errorf("failed to get projid for %s: %w", targetPath, errno) - } - fsx.fsx_projid = C.__u32(projectID) - fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT - _, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR, - uintptr(unsafe.Pointer(&fsx))) - if errno != 0 { - return fmt.Errorf("failed to set projid for %s: %w", targetPath, errno) - } - - return nil -} - -// findNextProjectID - find the next project id to be used for containers -// by scanning driver home directory to find used project ids -func (q *Control) findNextProjectID() error { - files, err := os.ReadDir(q.basePath) - if err != nil { - return fmt.Errorf("read directory failed : %s", q.basePath) - } - for _, file := range files { - if !file.IsDir() { - continue - } - path := filepath.Join(q.basePath, file.Name()) - projid, err := getProjectID(path) - if err != nil { - return err - } - if projid > 0 { - q.quotas.Store(path, projid) - } - if q.nextProjectID <= projid { - q.nextProjectID = projid + 1 - } - } - - return nil -} - -func free(p *C.char) { - C.free(unsafe.Pointer(p)) -} - -func openDir(path string) (*C.DIR, error) { - Cpath := C.CString(path) - defer free(Cpath) - - dir, errno := C.opendir(Cpath) - if dir == nil { - return nil, fmt.Errorf("can't open dir %v: %w", Cpath, errno) - } - return dir, nil -} - -func closeDir(dir *C.DIR) { - if dir != nil { - C.closedir(dir) - } -} - -func getDirFd(dir *C.DIR) uintptr { - return uintptr(C.dirfd(dir)) -} - -// Get the backing block device of the driver home directory -// and create a block device node under the home directory -// to be used by quotactl commands -func makeBackingFsDev(home string) (string, error) { - var stat unix.Stat_t - if err := unix.Stat(home, &stat); err != nil { - return "", err - } - - backingFsBlockDev := path.Join(home, BackingFsBlockDeviceLink) - backingFsBlockDevTmp := backingFsBlockDev + ".tmp" - // Re-create just in case someone copied the home directory over to a new device - if err := unix.Mknod(backingFsBlockDevTmp, unix.S_IFBLK|0o600, int(stat.Dev)); err != nil { - if !errors.Is(err, unix.EEXIST) { - return "", fmt.Errorf("failed to mknod %s: %w", backingFsBlockDevTmp, err) - } - // On EEXIST, try again after unlinking any potential leftover. - _ = unix.Unlink(backingFsBlockDevTmp) - if err := unix.Mknod(backingFsBlockDevTmp, unix.S_IFBLK|0o600, int(stat.Dev)); err != nil { - return "", fmt.Errorf("failed to mknod %s: %w", backingFsBlockDevTmp, err) - } - } - if err := unix.Rename(backingFsBlockDevTmp, backingFsBlockDev); err != nil { - return "", fmt.Errorf("failed to rename %s to %s: %w", backingFsBlockDevTmp, backingFsBlockDev, err) - } - - return backingFsBlockDev, nil -} diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota_supported.go b/vendor/github.com/containers/storage/drivers/quota/projectquota_supported.go new file mode 100644 index 000000000..b0623bdac --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/quota/projectquota_supported.go @@ -0,0 +1,449 @@ +//go:build linux && !exclude_disk_quota && cgo +// +build linux,!exclude_disk_quota,cgo + +// +// projectquota.go - implements XFS project quota controls +// for setting quota limits on a newly created directory. +// It currently supports the legacy XFS specific ioctls. +// +// TODO: use generic quota control ioctl FS_IOC_FS{GET,SET}XATTR +// for both xfs/ext4 for kernel version >= v4.5 +// + +package quota + +/* +#include +#include +#include +#include +#include + +#ifndef FS_XFLAG_PROJINHERIT +struct fsxattr { + __u32 fsx_xflags; + __u32 fsx_extsize; + __u32 fsx_nextents; + __u32 fsx_projid; + unsigned char fsx_pad[12]; +}; +#define FS_XFLAG_PROJINHERIT 0x00000200 +#endif +#ifndef FS_IOC_FSGETXATTR +#define FS_IOC_FSGETXATTR _IOR ('X', 31, struct fsxattr) +#endif +#ifndef FS_IOC_FSSETXATTR +#define FS_IOC_FSSETXATTR _IOW ('X', 32, struct fsxattr) +#endif + +#ifndef PRJQUOTA +#define PRJQUOTA 2 +#endif +#ifndef FS_PROJ_QUOTA +#define FS_PROJ_QUOTA 2 +#endif +#ifndef Q_XSETPQLIM +#define Q_XSETPQLIM QCMD(Q_XSETQLIM, PRJQUOTA) +#endif +#ifndef Q_XGETPQUOTA +#define Q_XGETPQUOTA QCMD(Q_XGETQUOTA, PRJQUOTA) +#endif +*/ +import "C" + +import ( + "errors" + "fmt" + "math" + "os" + "path" + "path/filepath" + "sync" + "syscall" + "unsafe" + + "github.com/containers/storage/pkg/directory" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +const projectIDsAllocatedPerQuotaHome = 10000 + +// Quota limit params - currently we only control blocks hard limit and inodes +type Quota struct { + Size uint64 + Inodes uint64 +} + +// Control - Context to be used by storage driver (e.g. overlay) +// who wants to apply project quotas to container dirs +type Control struct { + backingFsBlockDev string + nextProjectID uint32 + quotas *sync.Map + basePath string +} + +// Attempt to generate a unigue projectid. Multiple directories +// per file system can have quota and they need a group of unique +// ids. This function attempts to allocate at least projectIDsAllocatedPerQuotaHome(10000) +// unique projectids, based on the inode of the basepath. +func generateUniqueProjectID(path string) (uint32, error) { + fileinfo, err := os.Stat(path) + if err != nil { + return 0, err + } + stat, ok := fileinfo.Sys().(*syscall.Stat_t) + if !ok { + return 0, fmt.Errorf("not a syscall.Stat_t %s", path) + } + projectID := projectIDsAllocatedPerQuotaHome + (stat.Ino*projectIDsAllocatedPerQuotaHome)%(math.MaxUint32-projectIDsAllocatedPerQuotaHome) + return uint32(projectID), nil +} + +// NewControl - initialize project quota support. +// Test to make sure that quota can be set on a test dir and find +// the first project id to be used for the next container create. +// +// Returns nil (and error) if project quota is not supported. +// +// First get the project id of the basePath directory. +// This test will fail if the backing fs is not xfs. +// +// xfs_quota tool can be used to assign a project id to the driver home directory, e.g.: +// echo 100000:/var/lib/containers/storage/overlay >> /etc/projects +// echo 200000:/var/lib/containers/storage/volumes >> /etc/projects +// echo storage:100000 >> /etc/projid +// echo volumes:200000 >> /etc/projid +// xfs_quota -x -c 'project -s storage volumes' / +// +// In the example above, the storage directory project id will be used as a +// "start offset" and all containers will be assigned larger project ids +// (e.g. >= 100000). Then the volumes directory project id will be used as a +// "start offset" and all volumes will be assigned larger project ids +// (e.g. >= 200000). +// This is a way to prevent xfs_quota management from conflicting with +// containers/storage. + +// Then try to create a test directory with the next project id and set a quota +// on it. If that works, continue to scan existing containers to map allocated +// project ids. +func NewControl(basePath string) (*Control, error) { + // + // Get project id of parent dir as minimal id to be used by driver + // + minProjectID, err := getProjectID(basePath) + if err != nil { + return nil, err + } + if minProjectID == 0 { + // Indicates the storage was never initialized + // Generate a unique range of Projectids for this basepath + minProjectID, err = generateUniqueProjectID(basePath) + if err != nil { + return nil, err + } + + } + // + // create backing filesystem device node + // + backingFsBlockDev, err := makeBackingFsDev(basePath) + if err != nil { + return nil, err + } + + // + // Test if filesystem supports project quotas by trying to set + // a quota on the first available project id + // + quota := Quota{ + Size: 0, + Inodes: 0, + } + + q := Control{ + backingFsBlockDev: backingFsBlockDev, + nextProjectID: minProjectID + 1, + quotas: &sync.Map{}, + basePath: basePath, + } + + if err := q.setProjectQuota(minProjectID, quota); err != nil { + return nil, err + } + + // + // get first project id to be used for next container + // + err = q.findNextProjectID() + if err != nil { + return nil, err + } + + logrus.Debugf("NewControl(%s): nextProjectID = %d", basePath, q.nextProjectID) + return &q, nil +} + +// SetQuota - assign a unique project id to directory and set the quota limits +// for that project id +func (q *Control) SetQuota(targetPath string, quota Quota) error { + var projectID uint32 + value, ok := q.quotas.Load(targetPath) + if ok { + projectID, ok = value.(uint32) + } + if !ok { + projectID = q.nextProjectID + + // + // assign project id to new container directory + // + err := setProjectID(targetPath, projectID) + if err != nil { + return err + } + + q.quotas.Store(targetPath, projectID) + q.nextProjectID++ + } + + // + // set the quota limit for the container's project id + // + logrus.Debugf("SetQuota path=%s, size=%d, inodes=%d, projectID=%d", targetPath, quota.Size, quota.Inodes, projectID) + return q.setProjectQuota(projectID, quota) +} + +// ClearQuota removes the map entry in the quotas map for targetPath. +// It does so to prevent the map leaking entries as directories are deleted. +func (q *Control) ClearQuota(targetPath string) { + q.quotas.Delete(targetPath) +} + +// setProjectQuota - set the quota for project id on xfs block device +func (q *Control) setProjectQuota(projectID uint32, quota Quota) error { + var d C.fs_disk_quota_t + d.d_version = C.FS_DQUOT_VERSION + d.d_id = C.__u32(projectID) + d.d_flags = C.FS_PROJ_QUOTA + + if quota.Size > 0 { + d.d_fieldmask = d.d_fieldmask | C.FS_DQ_BHARD | C.FS_DQ_BSOFT + d.d_blk_hardlimit = C.__u64(quota.Size / 512) + d.d_blk_softlimit = d.d_blk_hardlimit + } + if quota.Inodes > 0 { + d.d_fieldmask = d.d_fieldmask | C.FS_DQ_IHARD | C.FS_DQ_ISOFT + d.d_ino_hardlimit = C.__u64(quota.Inodes) + d.d_ino_softlimit = d.d_ino_hardlimit + } + + cs := C.CString(q.backingFsBlockDev) + defer C.free(unsafe.Pointer(cs)) + + runQuotactl := func() syscall.Errno { + _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XSETPQLIM, + uintptr(unsafe.Pointer(cs)), uintptr(d.d_id), + uintptr(unsafe.Pointer(&d)), 0, 0) + return errno + } + + errno := runQuotactl() + + // If the backingFsBlockDev does not exist any more then try to recreate it. + if errors.Is(errno, unix.ENOENT) { + if _, err := makeBackingFsDev(q.basePath); err != nil { + return fmt.Errorf( + "failed to recreate missing backingFsBlockDev %s for projid %d: %w", + q.backingFsBlockDev, projectID, err, + ) + } + + if errno := runQuotactl(); errno != 0 { + return fmt.Errorf("failed to set quota limit for projid %d on %s after backingFsBlockDev recreation: %w", + projectID, q.backingFsBlockDev, errno) + } + + } else if errno != 0 { + return fmt.Errorf("failed to set quota limit for projid %d on %s: %w", + projectID, q.backingFsBlockDev, errno) + } + + return nil +} + +// GetQuota - get the quota limits of a directory that was configured with SetQuota +func (q *Control) GetQuota(targetPath string, quota *Quota) error { + d, err := q.fsDiskQuotaFromPath(targetPath) + if err != nil { + return err + } + quota.Size = uint64(d.d_blk_hardlimit) * 512 + quota.Inodes = uint64(d.d_ino_hardlimit) + return nil +} + +// GetDiskUsage - get the current disk usage of a directory that was configured with SetQuota +func (q *Control) GetDiskUsage(targetPath string, usage *directory.DiskUsage) error { + d, err := q.fsDiskQuotaFromPath(targetPath) + if err != nil { + return err + } + usage.Size = int64(d.d_bcount) * 512 + usage.InodeCount = int64(d.d_icount) + + return nil +} + +func (q *Control) fsDiskQuotaFromPath(targetPath string) (C.fs_disk_quota_t, error) { + var d C.fs_disk_quota_t + var projectID uint32 + value, ok := q.quotas.Load(targetPath) + if ok { + projectID, ok = value.(uint32) + } + if !ok { + return d, fmt.Errorf("quota not found for path : %s", targetPath) + } + + // + // get the quota limit for the container's project id + // + cs := C.CString(q.backingFsBlockDev) + defer C.free(unsafe.Pointer(cs)) + + _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XGETPQUOTA, + uintptr(unsafe.Pointer(cs)), uintptr(C.__u32(projectID)), + uintptr(unsafe.Pointer(&d)), 0, 0) + if errno != 0 { + return d, fmt.Errorf("failed to get quota limit for projid %d on %s: %w", + projectID, q.backingFsBlockDev, errno) + } + + return d, nil +} + +// getProjectID - get the project id of path on xfs +func getProjectID(targetPath string) (uint32, error) { + dir, err := openDir(targetPath) + if err != nil { + return 0, err + } + defer closeDir(dir) + + var fsx C.struct_fsxattr + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return 0, fmt.Errorf("failed to get projid for %s: %w", targetPath, errno) + } + + return uint32(fsx.fsx_projid), nil +} + +// setProjectID - set the project id of path on xfs +func setProjectID(targetPath string, projectID uint32) error { + dir, err := openDir(targetPath) + if err != nil { + return err + } + defer closeDir(dir) + + var fsx C.struct_fsxattr + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return fmt.Errorf("failed to get projid for %s: %w", targetPath, errno) + } + fsx.fsx_projid = C.__u32(projectID) + fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT + _, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return fmt.Errorf("failed to set projid for %s: %w", targetPath, errno) + } + + return nil +} + +// findNextProjectID - find the next project id to be used for containers +// by scanning driver home directory to find used project ids +func (q *Control) findNextProjectID() error { + files, err := os.ReadDir(q.basePath) + if err != nil { + return fmt.Errorf("read directory failed : %s", q.basePath) + } + for _, file := range files { + if !file.IsDir() { + continue + } + path := filepath.Join(q.basePath, file.Name()) + projid, err := getProjectID(path) + if err != nil { + return err + } + if projid > 0 { + q.quotas.Store(path, projid) + } + if q.nextProjectID <= projid { + q.nextProjectID = projid + 1 + } + } + + return nil +} + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func openDir(path string) (*C.DIR, error) { + Cpath := C.CString(path) + defer free(Cpath) + + dir, errno := C.opendir(Cpath) + if dir == nil { + return nil, fmt.Errorf("can't open dir %v: %w", Cpath, errno) + } + return dir, nil +} + +func closeDir(dir *C.DIR) { + if dir != nil { + C.closedir(dir) + } +} + +func getDirFd(dir *C.DIR) uintptr { + return uintptr(C.dirfd(dir)) +} + +// Get the backing block device of the driver home directory +// and create a block device node under the home directory +// to be used by quotactl commands +func makeBackingFsDev(home string) (string, error) { + var stat unix.Stat_t + if err := unix.Stat(home, &stat); err != nil { + return "", err + } + + backingFsBlockDev := path.Join(home, BackingFsBlockDeviceLink) + backingFsBlockDevTmp := backingFsBlockDev + ".tmp" + // Re-create just in case someone copied the home directory over to a new device + if err := unix.Mknod(backingFsBlockDevTmp, unix.S_IFBLK|0o600, int(stat.Dev)); err != nil { + if !errors.Is(err, unix.EEXIST) { + return "", fmt.Errorf("failed to mknod %s: %w", backingFsBlockDevTmp, err) + } + // On EEXIST, try again after unlinking any potential leftover. + _ = unix.Unlink(backingFsBlockDevTmp) + if err := unix.Mknod(backingFsBlockDevTmp, unix.S_IFBLK|0o600, int(stat.Dev)); err != nil { + return "", fmt.Errorf("failed to mknod %s: %w", backingFsBlockDevTmp, err) + } + } + if err := unix.Rename(backingFsBlockDevTmp, backingFsBlockDev); err != nil { + return "", fmt.Errorf("failed to rename %s to %s: %w", backingFsBlockDevTmp, backingFsBlockDev, err) + } + + return backingFsBlockDev, nil +} diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go index 599cf095d..9b552254b 100644 --- a/vendor/github.com/containers/storage/drivers/vfs/driver.go +++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go @@ -31,8 +31,9 @@ func init() { func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) { d := &Driver{ name: "vfs", - homes: []string{home}, + home: home, idMappings: idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), + imageStore: options.ImageStore, } rootIDs := d.idMappings.RootPair() @@ -47,7 +48,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) key = strings.ToLower(key) switch key { case "vfs.imagestore", ".imagestore": - d.homes = append(d.homes, strings.Split(val, ",")...) + d.additionalHomes = append(d.additionalHomes, strings.Split(val, ",")...) continue case "vfs.mountopt": return nil, fmt.Errorf("vfs driver does not support mount options") @@ -62,12 +63,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) return nil, fmt.Errorf("vfs driver does not support %s options", key) } } - // If --imagestore is provided, lets add writable graphRoot - // to vfs's additional image store, as it is done for - // `overlay` driver. - if options.ImageStore != "" { - d.homes = append(d.homes, options.ImageStore) - } + d.updater = graphdriver.NewNaiveLayerIDMapUpdater(d) d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, d.updater) @@ -80,11 +76,13 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) // Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver type Driver struct { name string - homes []string + home string + additionalHomes []string idMappings *idtools.IDMappings ignoreChownErrors bool naiveDiff graphdriver.DiffDriver updater graphdriver.LayerIDMapUpdater + imageStore string } func (d *Driver) String() string { @@ -158,7 +156,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool idMappings = opts.IDMappings } - dir := d.dir(id) + dir := d.dir2(id, ro) rootIDs := idMappings.RootPair() if err := idtools.MkdirAllAndChown(filepath.Dir(dir), 0o700, rootIDs); err != nil { return err @@ -204,18 +202,32 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool return nil } -func (d *Driver) dir(id string) string { - for i, home := range d.homes { - if i > 0 { - home = filepath.Join(home, d.String()) +func (d *Driver) dir2(id string, useImageStore bool) string { + var homedir string + + if useImageStore && d.imageStore != "" { + homedir = filepath.Join(d.imageStore, d.String(), "dir", filepath.Base(id)) + } else { + homedir = filepath.Join(d.home, "dir", filepath.Base(id)) + } + if _, err := os.Stat(homedir); err != nil { + additionalHomes := d.additionalHomes[:] + if d.imageStore != "" { + additionalHomes = append(additionalHomes, d.imageStore) } - candidate := filepath.Join(home, "dir", filepath.Base(id)) - fi, err := os.Stat(candidate) - if err == nil && fi.IsDir() { - return candidate + for _, home := range additionalHomes { + candidate := filepath.Join(home, d.String(), "dir", filepath.Base(id)) + fi, err := os.Stat(candidate) + if err == nil && fi.IsDir() { + return candidate + } } } - return filepath.Join(d.homes[0], "dir", filepath.Base(id)) + return homedir +} + +func (d *Driver) dir(id string) string { + return d.dir2(id, false) } // Remove deletes the content from the directory for a given id. @@ -263,7 +275,7 @@ func (d *Driver) Exists(id string) bool { // List layers (not including additional image stores) func (d *Driver) ListLayers() ([]string, error) { - entries, err := os.ReadDir(filepath.Join(d.homes[0], "dir")) + entries, err := os.ReadDir(filepath.Join(d.home, "dir")) if err != nil { return nil, err } @@ -285,8 +297,8 @@ func (d *Driver) ListLayers() ([]string, error) { // AdditionalImageStores returns additional image stores supported by the driver func (d *Driver) AdditionalImageStores() []string { - if len(d.homes) > 1 { - return d.homes[1:] + if len(d.additionalHomes) > 0 { + return d.additionalHomes } return nil } diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go index d105e73f6..f1325262b 100644 --- a/vendor/github.com/containers/storage/layers.go +++ b/vendor/github.com/containers/storage/layers.go @@ -126,6 +126,13 @@ type Layer struct { // as a DiffID. UncompressedDigest digest.Digest `json:"diff-digest,omitempty"` + // TOCDigest represents the digest of the Table of Contents (TOC) of the blob. + // This digest is utilized when the UncompressedDigest is not + // validated during the partial image pull process, but the + // TOC itself is validated. + // It serves as an alternative reference under these specific conditions. + TOCDigest digest.Digest `json:"toc-digest,omitempty"` + // UncompressedSize is the length of the blob that was last passed to // ApplyDiff() or create(), after we decompressed it. If // UncompressedDigest is not set, this should be treated as if it were @@ -174,6 +181,13 @@ type DiffOptions struct { Compression *archive.Compression } +// stagedLayerOptions are the options passed to .create to populate a staged +// layer +type stagedLayerOptions struct { + DiffOutput *drivers.DriverWithDifferOutput + DiffOptions *drivers.ApplyDiffWithDifferOpts +} + // roLayerStore wraps a graph driver, adding the ability to refer to layers by // name, and keeping track of parent-child relationships, along with a list of // all known layers. @@ -228,6 +242,10 @@ type roLayerStore interface { // specified uncompressed digest value recorded for them. LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) + // LayersByTOCDigest returns a slice of the layers with the + // specified uncompressed digest value recorded for them. + LayersByTOCDigest(d digest.Digest) ([]Layer, error) + // Layers returns a slice of the known layers. Layers() ([]Layer, error) } @@ -256,7 +274,7 @@ type rwLayerStore interface { // underlying drivers do not themselves distinguish between writeable // and read-only layers. Returns the new layer structure and the size of the // diff which was applied to its parent to initialize its contents. - create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader) (*Layer, int64, error) + create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader, slo *stagedLayerOptions) (*Layer, int64, error) // updateNames modifies names associated with a layer based on (op, names). updateNames(id string, names []string, op updateNameOperation) error @@ -296,13 +314,13 @@ type rwLayerStore interface { // ApplyDiffWithDiffer applies the changes through the differ callback function. // If to is the empty string, then a staging directory is created by the driver. - ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) + ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) // CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors CleanupStagingDirectory(stagingDirectory string) error - // ApplyDiffFromStagingDirectory uses stagingDirectory to create the diff. - ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error + // applyDiffFromStagingDirectory uses diffOutput.Target to create the diff. + applyDiffFromStagingDirectory(id string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error // DifferTarget gets the location where files are stored for the layer. DifferTarget(id string) (string, error) @@ -316,10 +334,71 @@ type rwLayerStore interface { GarbageCollect() error } +type multipleLockFile struct { + lockfiles []*lockfile.LockFile +} + +func (l multipleLockFile) Lock() { + for _, lock := range l.lockfiles { + lock.Lock() + } +} + +func (l multipleLockFile) RLock() { + for _, lock := range l.lockfiles { + lock.RLock() + } +} + +func (l multipleLockFile) Unlock() { + for _, lock := range l.lockfiles { + lock.Unlock() + } +} + +func (l multipleLockFile) ModifiedSince(lastWrite lockfile.LastWrite) (lockfile.LastWrite, bool, error) { + // Look up only the first lockfile, since this is the value returned by RecordWrite(). + return l.lockfiles[0].ModifiedSince(lastWrite) +} + +func (l multipleLockFile) AssertLockedForWriting() { + for _, lock := range l.lockfiles { + lock.AssertLockedForWriting() + } +} + +func (l multipleLockFile) GetLastWrite() (lockfile.LastWrite, error) { + return l.lockfiles[0].GetLastWrite() +} + +func (l multipleLockFile) RecordWrite() (lockfile.LastWrite, error) { + var lastWrite *lockfile.LastWrite + for _, lock := range l.lockfiles { + lw, err := lock.RecordWrite() + if err != nil { + return lw, err + } + // Return the first value we get so we know that + // all the locks have a write time >= to this one. + if lastWrite == nil { + lastWrite = &lw + } + } + return *lastWrite, nil +} + +func (l multipleLockFile) IsReadWrite() bool { + return l.lockfiles[0].IsReadWrite() +} + +func newMultipleLockFile(l ...*lockfile.LockFile) *multipleLockFile { + return &multipleLockFile{lockfiles: l} +} + type layerStore struct { // The following fields are only set when constructing layerStore, and must never be modified afterwards. // They are safe to access without any other locking. - lockfile *lockfile.LockFile // lockfile.IsReadWrite can be used to distinguish between read-write and read-only layer stores. + lockfile *multipleLockFile // lockfile.IsReadWrite can be used to distinguish between read-write and read-only layer stores. mountsLockfile *lockfile.LockFile // Can _only_ be obtained with inProcessLock held. rundir string jsonPath [numLayerLocationIndex]string @@ -337,6 +416,7 @@ type layerStore struct { bymount map[string]*Layer bycompressedsum map[digest.Digest][]string byuncompressedsum map[digest.Digest][]string + bytocsum map[digest.Digest][]string layerspathsModified [numLayerLocationIndex]time.Time // FIXME: This field is only set when constructing layerStore, but locking rules of the driver @@ -366,6 +446,7 @@ func copyLayer(l *Layer) *Layer { CompressedSize: l.CompressedSize, UncompressedDigest: l.UncompressedDigest, UncompressedSize: l.UncompressedSize, + TOCDigest: l.TOCDigest, CompressionType: l.CompressionType, ReadOnly: l.ReadOnly, volatileStore: l.volatileStore, @@ -745,6 +826,7 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) { names := make(map[string]*Layer) compressedsums := make(map[digest.Digest][]string) uncompressedsums := make(map[digest.Digest][]string) + tocsums := make(map[digest.Digest][]string) var errorToResolveBySaving error // == nil; if there are multiple errors, this is one of them. if r.lockfile.IsReadWrite() { selinux.ClearLabels() @@ -765,6 +847,9 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) { if layer.UncompressedDigest != "" { uncompressedsums[layer.UncompressedDigest] = append(uncompressedsums[layer.UncompressedDigest], layer.ID) } + if layer.TOCDigest != "" { + tocsums[layer.TOCDigest] = append(tocsums[layer.TOCDigest], layer.ID) + } if layer.MountLabel != "" { selinux.ReserveLabel(layer.MountLabel) } @@ -792,6 +877,7 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) { r.byname = names r.bycompressedsum = compressedsums r.byuncompressedsum = uncompressedsums + r.bytocsum = tocsums // Load and merge information about which layers are mounted, and where. if r.lockfile.IsReadWrite() { @@ -998,22 +1084,37 @@ func (r *layerStore) saveMounts() error { return r.loadMounts() } -func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Driver, transient bool) (rwLayerStore, error) { +func (s *store) newLayerStore(rundir, layerdir, imagedir string, driver drivers.Driver, transient bool) (rwLayerStore, error) { if err := os.MkdirAll(rundir, 0o700); err != nil { return nil, err } if err := os.MkdirAll(layerdir, 0o700); err != nil { return nil, err } + if imagedir != "" { + if err := os.MkdirAll(imagedir, 0o700); err != nil { + return nil, err + } + } // Note: While the containers.lock file is in rundir for transient stores // we don't want to do this here, because the non-transient layers in // layers.json might be used externally as a read-only layer (using e.g. // additionalimagestores), and that would look for the lockfile in the // same directory + var lockFiles []*lockfile.LockFile lockFile, err := lockfile.GetLockFile(filepath.Join(layerdir, "layers.lock")) if err != nil { return nil, err } + lockFiles = append(lockFiles, lockFile) + if imagedir != "" { + lockFile, err := lockfile.GetLockFile(filepath.Join(imagedir, "layers.lock")) + if err != nil { + return nil, err + } + lockFiles = append(lockFiles, lockFile) + } + mountsLockfile, err := lockfile.GetLockFile(filepath.Join(rundir, "mountpoints.lock")) if err != nil { return nil, err @@ -1023,7 +1124,7 @@ func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Dri volatileDir = rundir } rlstore := layerStore{ - lockfile: lockFile, + lockfile: newMultipleLockFile(lockFiles...), mountsLockfile: mountsLockfile, rundir: rundir, jsonPath: [numLayerLocationIndex]string{ @@ -1060,7 +1161,7 @@ func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (roL return nil, err } rlstore := layerStore{ - lockfile: lockfile, + lockfile: newMultipleLockFile(lockfile), mountsLockfile: nil, rundir: rundir, jsonPath: [numLayerLocationIndex]string{ @@ -1112,7 +1213,7 @@ func (r *layerStore) Size(name string) (int64, error) { // We use the presence of a non-empty digest as an indicator that the size value was intentionally set, and that // a zero value is not just present because it was never set to anything else (which can happen if the layer was // created by a version of this library that didn't keep track of digest and size information). - if layer.UncompressedDigest != "" { + if layer.TOCDigest != "" || layer.UncompressedDigest != "" { return layer.UncompressedSize, nil } return -1, nil @@ -1201,6 +1302,9 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s if layer.UncompressedDigest != "" { r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID) } + if layer.TOCDigest != "" { + r.bytocsum[layer.TOCDigest] = append(r.bytocsum[layer.TOCDigest], layer.ID) + } if err := r.saveFor(layer); err != nil { if e := r.Delete(layer.ID); e != nil { logrus.Errorf("While recovering from a failure to save layers, error deleting layer %#v: %v", id, e) @@ -1211,7 +1315,7 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s } // Requires startWriting. -func (r *layerStore) create(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader) (layer *Layer, size int64, err error) { +func (r *layerStore) create(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader, slo *stagedLayerOptions) (layer *Layer, size int64, err error) { if moreOptions == nil { moreOptions = &LayerOptions{} } @@ -1251,6 +1355,7 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount templateCompressedDigest digest.Digest templateCompressedSize int64 templateUncompressedDigest digest.Digest + templateTOCDigest digest.Digest templateUncompressedSize int64 templateCompressionType archive.Compression templateUIDs, templateGIDs []uint32 @@ -1263,6 +1368,7 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount } templateMetadata = templateLayer.Metadata templateIDMappings = idtools.NewIDMappingsFromMaps(templateLayer.UIDMap, templateLayer.GIDMap) + templateTOCDigest = templateLayer.TOCDigest templateCompressedDigest, templateCompressedSize = templateLayer.CompressedDigest, templateLayer.CompressedSize templateUncompressedDigest, templateUncompressedSize = templateLayer.UncompressedDigest, templateLayer.UncompressedSize templateCompressionType = templateLayer.CompressionType @@ -1291,6 +1397,7 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount CompressedDigest: templateCompressedDigest, CompressedSize: templateCompressedSize, UncompressedDigest: templateUncompressedDigest, + TOCDigest: templateTOCDigest, UncompressedSize: templateUncompressedSize, CompressionType: templateCompressionType, UIDs: templateUIDs, @@ -1402,6 +1509,11 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount cleanupFailureContext = "applying layer diff" return nil, -1, err } + } else if slo != nil { + if err := r.applyDiffFromStagingDirectory(layer.ID, slo.DiffOutput, slo.DiffOptions); err != nil { + cleanupFailureContext = "applying staged directory diff" + return nil, -1, err + } } else { // applyDiffWithOptions() would have updated r.bycompressedsum // and r.byuncompressedsum for us, but if we used a template @@ -1413,6 +1525,9 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount if layer.UncompressedDigest != "" { r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID) } + if layer.TOCDigest != "" { + r.bytocsum[layer.TOCDigest] = append(r.bytocsum[layer.TOCDigest], layer.ID) + } } delete(layer.Flags, incompleteFlag) @@ -2007,9 +2122,16 @@ func (s *simpleGetCloser) Close() error { // LOCKING BUG: See the comments in layerStore.Diff func (r *layerStore) newFileGetter(id string) (drivers.FileGetCloser, error) { if getter, ok := r.driver.(drivers.DiffGetterDriver); ok { - return getter.DiffGetter(id) + fgc, err := getter.DiffGetter(id) + if err != nil { + return nil, err + } + if fgc != nil { + return fgc, nil + } } - path, err := r.Mount(id, drivers.MountOpts{}) + + path, err := r.Mount(id, drivers.MountOpts{Options: []string{"ro"}}) if err != nil { return nil, err } @@ -2197,6 +2319,25 @@ func (r *layerStore) DiffSize(from, to string) (size int64, err error) { return r.driver.DiffSize(to, r.layerMappings(toLayer), from, r.layerMappings(fromLayer), toLayer.MountLabel) } +func updateDigestMap(m *map[digest.Digest][]string, oldvalue, newvalue digest.Digest, id string) { + var newList []string + if oldvalue != "" { + for _, value := range (*m)[oldvalue] { + if value != id { + newList = append(newList, value) + } + } + if len(newList) > 0 { + (*m)[oldvalue] = newList + } else { + delete(*m, oldvalue) + } + } + if newvalue != "" { + (*m)[newvalue] = append((*m)[newvalue], id) + } +} + // Requires startWriting. func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error) { return r.applyDiffWithOptions(to, nil, diff) @@ -2233,7 +2374,7 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions, if layerOptions != nil && layerOptions.UncompressedDigest != "" && layerOptions.UncompressedDigest.Algorithm() == digest.Canonical { uncompressedDigest = layerOptions.UncompressedDigest - } else { + } else if compression != archive.Uncompressed { uncompressedDigester = digest.Canonical.Digester() } @@ -2312,28 +2453,17 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions, if uncompressedDigester != nil { uncompressedDigest = uncompressedDigester.Digest() } - - updateDigestMap := func(m *map[digest.Digest][]string, oldvalue, newvalue digest.Digest, id string) { - var newList []string - if oldvalue != "" { - for _, value := range (*m)[oldvalue] { - if value != id { - newList = append(newList, value) - } - } - if len(newList) > 0 { - (*m)[oldvalue] = newList - } else { - delete(*m, oldvalue) - } - } - if newvalue != "" { - (*m)[newvalue] = append((*m)[newvalue], id) - } + if uncompressedDigest == "" && compression == archive.Uncompressed { + uncompressedDigest = compressedDigest } + updateDigestMap(&r.bycompressedsum, layer.CompressedDigest, compressedDigest, layer.ID) layer.CompressedDigest = compressedDigest - layer.CompressedSize = compressedCounter.Count + if layerOptions != nil && layerOptions.OriginalDigest != "" && layerOptions.OriginalSize != nil { + layer.CompressedSize = *layerOptions.OriginalSize + } else { + layer.CompressedSize = compressedCounter.Count + } updateDigestMap(&r.byuncompressedsum, layer.UncompressedDigest, uncompressedDigest, layer.ID) layer.UncompressedDigest = uncompressedDigest layer.UncompressedSize = uncompressedCounter.Count @@ -2372,7 +2502,7 @@ func (r *layerStore) DifferTarget(id string) (string, error) { } // Requires startWriting. -func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error { +func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error { ddriver, ok := r.driver.(drivers.DriverWithDiffer) if !ok { return ErrNotSupported @@ -2382,20 +2512,39 @@ func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string, return ErrLayerUnknown } if options == nil { - options = &drivers.ApplyDiffOpts{ - Mappings: r.layerMappings(layer), - MountLabel: layer.MountLabel, + options = &drivers.ApplyDiffWithDifferOpts{ + ApplyDiffOpts: drivers.ApplyDiffOpts{ + Mappings: r.layerMappings(layer), + MountLabel: layer.MountLabel, + }, + Flags: nil, } } - err := ddriver.ApplyDiffFromStagingDirectory(layer.ID, layer.Parent, stagingDirectory, diffOutput, options) + + err := ddriver.ApplyDiffFromStagingDirectory(layer.ID, layer.Parent, diffOutput, options) if err != nil { return err } layer.UIDs = diffOutput.UIDs layer.GIDs = diffOutput.GIDs + updateDigestMap(&r.byuncompressedsum, layer.UncompressedDigest, diffOutput.UncompressedDigest, layer.ID) layer.UncompressedDigest = diffOutput.UncompressedDigest + updateDigestMap(&r.bytocsum, diffOutput.TOCDigest, diffOutput.TOCDigest, layer.ID) + layer.TOCDigest = diffOutput.TOCDigest layer.UncompressedSize = diffOutput.Size layer.Metadata = diffOutput.Metadata + if options != nil && options.Flags != nil { + if layer.Flags == nil { + layer.Flags = make(map[string]interface{}) + } + for k, v := range options.Flags { + layer.Flags[k] = v + } + } + if err = r.saveFor(layer); err != nil { + return err + } + if len(diffOutput.TarSplit) != 0 { tsdata := bytes.Buffer{} compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed) @@ -2425,14 +2574,11 @@ func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string, return err } } - if err = r.saveFor(layer); err != nil { - return err - } return err } // Requires startWriting. -func (r *layerStore) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) { +func (r *layerStore) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) { ddriver, ok := r.driver.(drivers.DriverWithDiffer) if !ok { return nil, ErrNotSupported @@ -2448,9 +2594,11 @@ func (r *layerStore) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOp return nil, ErrLayerUnknown } if options == nil { - options = &drivers.ApplyDiffOpts{ - Mappings: r.layerMappings(layer), - MountLabel: layer.MountLabel, + options = &drivers.ApplyDiffWithDifferOpts{ + ApplyDiffOpts: drivers.ApplyDiffOpts{ + Mappings: r.layerMappings(layer), + MountLabel: layer.MountLabel, + }, } } output, err := ddriver.ApplyDiffWithDiffer(layer.ID, layer.Parent, options, differ) @@ -2494,6 +2642,11 @@ func (r *layerStore) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error return r.layersByDigestMap(r.byuncompressedsum, d) } +// Requires startReading or startWriting. +func (r *layerStore) LayersByTOCDigest(d digest.Digest) ([]Layer, error) { + return r.layersByDigestMap(r.bytocsum, d) +} + func closeAll(closes ...func() error) (rErr error) { for _, f := range closes { if err := f(); err != nil { diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go index 05d257118..70f76d66d 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive.go @@ -339,12 +339,43 @@ func (compression *Compression) Extension() string { return "" } +// nosysFileInfo hides the system-dependent info of the wrapped FileInfo to +// prevent tar.FileInfoHeader from introspecting it and potentially calling into +// glibc. +type nosysFileInfo struct { + os.FileInfo +} + +func (fi nosysFileInfo) Sys() interface{} { + // A Sys value of type *tar.Header is safe as it is system-independent. + // The tar.FileInfoHeader function copies the fields into the returned + // header without performing any OS lookups. + if sys, ok := fi.FileInfo.Sys().(*tar.Header); ok { + return sys + } + return nil +} + +// sysStatOverride, if non-nil, populates hdr from system-dependent fields of fi. +var sysStatOverride func(fi os.FileInfo, hdr *tar.Header) error + +func fileInfoHeaderNoLookups(fi os.FileInfo, link string) (*tar.Header, error) { + if sysStatOverride == nil { + return tar.FileInfoHeader(fi, link) + } + hdr, err := tar.FileInfoHeader(nosysFileInfo{fi}, link) + if err != nil { + return nil, err + } + return hdr, sysStatOverride(fi, hdr) +} + // FileInfoHeader creates a populated Header from fi. // Compared to archive pkg this function fills in more information. // Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), // which have been deleted since Go 1.9 archive/tar. func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { - hdr, err := tar.FileInfoHeader(fi, link) + hdr, err := fileInfoHeaderNoLookups(fi, link) if err != nil { return nil, err } @@ -385,7 +416,7 @@ func ReadUserXattrToTarHeader(path string, hdr *tar.Header) error { return err } for _, key := range xattrs { - if strings.HasPrefix(key, "user.") { + if strings.HasPrefix(key, "user.") && !strings.HasPrefix(key, "user.overlay.") { value, err := system.Lgetxattr(path, key) if err != nil { if errors.Is(err, system.E2BIG) { @@ -477,7 +508,7 @@ func (ta *tarAppender) addTarFile(path, name string) error { } } if fi.Mode()&os.ModeSocket != 0 { - logrus.Warnf("archive: skipping %q since it is a socket", path) + logrus.Infof("archive: skipping %q since it is a socket", path) return nil } @@ -534,6 +565,10 @@ func (ta *tarAppender) addTarFile(path, name string) error { if ta.ChownOpts != nil { hdr.Uid = ta.ChownOpts.UID hdr.Gid = ta.ChownOpts.GID + // Don’t expose the user names from the local system; they probably don’t match the ta.ChownOpts value anyway, + // and they unnecessarily give recipients of the tar file potentially private data. + hdr.Uname = "" + hdr.Gname = "" } maybeTruncateHeaderModTime(hdr) diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go index 88192f220..c6811031f 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go @@ -15,6 +15,31 @@ import ( "golang.org/x/sys/unix" ) +func init() { + sysStatOverride = statUnix +} + +// statUnix populates hdr from system-dependent fields of fi without performing +// any OS lookups. +// Adapted from Moby. +func statUnix(fi os.FileInfo, hdr *tar.Header) error { + s, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return nil + } + + hdr.Uid = int(s.Uid) + hdr.Gid = int(s.Gid) + + if s.Mode&unix.S_IFBLK != 0 || + s.Mode&unix.S_IFCHR != 0 { + hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) //nolint: unconvert + hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) //nolint: unconvert + } + + return nil +} + // fixVolumePathPrefix does platform specific processing to ensure that if // the path being passed in is not in a volume path format, convert it to one. func fixVolumePathPrefix(srcPath string) string { diff --git a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go index 5d4befc23..1e3ad86d1 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go @@ -25,7 +25,7 @@ import ( const ( cacheKey = "chunked-manifest-cache" - cacheVersion = 1 + cacheVersion = 2 digestSha256Empty = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" ) @@ -207,9 +207,9 @@ func calculateHardLinkFingerprint(f *internal.FileMetadata) (string, error) { return string(digester.Digest()), nil } -// generateFileLocation generates a file location in the form $OFFSET@$PATH -func generateFileLocation(path string, offset uint64) []byte { - return []byte(fmt.Sprintf("%d@%s", offset, path)) +// generateFileLocation generates a file location in the form $OFFSET:$LEN:$PATH +func generateFileLocation(path string, offset, len uint64) []byte { + return []byte(fmt.Sprintf("%d:%d:%s", offset, len, path)) } // generateTag generates a tag in the form $DIGEST$OFFSET@LEN. @@ -245,7 +245,7 @@ func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id strin var tags []string for _, k := range toc { if k.Digest != "" { - location := generateFileLocation(k.Name, 0) + location := generateFileLocation(k.Name, 0, uint64(k.Size)) off := uint64(vdata.Len()) l := uint64(len(location)) @@ -276,7 +276,7 @@ func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id strin digestLen = len(k.Digest) } if k.ChunkDigest != "" { - location := generateFileLocation(k.Name, uint64(k.ChunkOffset)) + location := generateFileLocation(k.Name, uint64(k.ChunkOffset), uint64(k.ChunkSize)) off := uint64(vdata.Len()) l := uint64(len(location)) d := generateTag(k.ChunkDigest, off, l) @@ -490,7 +490,9 @@ func findTag(digest string, metadata *metadata) (string, uint64, uint64) { if digest == d { startOff := i*metadata.tagLen + metadata.digestLen parts := strings.Split(string(metadata.tags[startOff:(i+1)*metadata.tagLen]), "@") + off, _ := strconv.ParseInt(parts[0], 10, 64) + len, _ := strconv.ParseInt(parts[1], 10, 64) return digest, uint64(off), uint64(len) } @@ -507,12 +509,16 @@ func (c *layersCache) findDigestInternal(digest string) (string, string, int64, defer c.mutex.RUnlock() for _, layer := range c.layers { - digest, off, len := findTag(digest, layer.metadata) + digest, off, tagLen := findTag(digest, layer.metadata) if digest != "" { - position := string(layer.metadata.vdata[off : off+len]) - parts := strings.SplitN(position, "@", 2) + position := string(layer.metadata.vdata[off : off+tagLen]) + parts := strings.SplitN(position, ":", 3) + if len(parts) != 3 { + continue + } offFile, _ := strconv.ParseInt(parts[0], 10, 64) - return layer.target, parts[1], offFile, nil + // parts[1] is the chunk length, currently unused. + return layer.target, parts[2], offFile, nil } } @@ -578,7 +584,10 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) { return byteSliceAsString(buf.Bytes()[from:to]) } - iter = jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest) + pool := iter.Pool() + pool.ReturnIterator(iter) + iter = pool.BorrowIterator(manifest) + for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { if strings.ToLower(field) == "version" { toc.Version = iter.ReadInt() @@ -657,8 +666,17 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) { } toc.Entries = append(toc.Entries, m) } - break } + + // validate there is no extra data in the provided input. This is a security measure to avoid + // that the digest we calculate for the TOC refers to the entire document. + if iter.Error != nil && iter.Error != io.EOF { + return nil, iter.Error + } + if iter.WhatIsNext() != jsoniter.InvalidValue || !errors.Is(iter.Error, io.EOF) { + return nil, fmt.Errorf("unexpected data after manifest") + } + toc.StringsBuf = buf return &toc, nil } diff --git a/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go b/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go index 1d8141e38..112ca2c7c 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go @@ -257,8 +257,8 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, ann return decodedBlob, decodedTarSplit, int64(footerData.Offset), err } -func decodeAndValidateBlob(blob []byte, lengthUncompressed uint64, expectedUncompressedChecksum string) ([]byte, error) { - d, err := digest.Parse(expectedUncompressedChecksum) +func decodeAndValidateBlob(blob []byte, lengthUncompressed uint64, expectedCompressedChecksum string) ([]byte, error) { + d, err := digest.Parse(expectedCompressedChecksum) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go index ca7ce30f7..a2fd904ca 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go +++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go @@ -420,6 +420,14 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r zstdWriter.Close() return err } + + // make sure the entire tarball is flushed to the output as it might contain + // some trailing zeros that affect the checksum. + if _, err := io.Copy(zstdWriter, its); err != nil { + zstdWriter.Close() + return err + } + if err := zstdWriter.Flush(); err != nil { zstdWriter.Close() return err @@ -452,12 +460,12 @@ type zstdChunkedWriter struct { } func (w zstdChunkedWriter) Close() error { - err := <-w.tarSplitErr - if err != nil { - w.tarSplitOut.Close() + errClose := w.tarSplitOut.Close() + + if err := <-w.tarSplitErr; err != nil && err != io.EOF { return err } - return w.tarSplitOut.Close() + return errClose } func (w zstdChunkedWriter) Write(p []byte) (int, error) { diff --git a/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go b/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go index a08928034..d3c105c4d 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go +++ b/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go @@ -4,6 +4,7 @@ import ( "bufio" "fmt" "io" + "path/filepath" "strings" "time" "unicode" @@ -93,13 +94,18 @@ func getStMode(mode uint32, typ string) (uint32, error) { return mode, nil } -func dumpNode(out io.Writer, links map[string]int, verityDigests map[string]string, entry *internal.FileMetadata) error { - path := entry.Name - if path == "" { +func sanitizeName(name string) string { + path := filepath.Clean(name) + if path == "." { path = "/" } else if path[0] != '/' { path = "/" + path } + return path +} + +func dumpNode(out io.Writer, links map[string]int, verityDigests map[string]string, entry *internal.FileMetadata) error { + path := sanitizeName(entry.Name) if _, err := fmt.Fprint(out, escaped(path, ESCAPE_STANDARD)); err != nil { return err @@ -133,9 +139,10 @@ func dumpNode(out io.Writer, links map[string]int, verityDigests map[string]stri var payload string if entry.Linkname != "" { - payload = entry.Linkname - if entry.Type == internal.TypeLink && payload[0] != '/' { - payload = "/" + payload + if entry.Type == internal.TypeSymlink { + payload = entry.Linkname + } else { + payload = sanitizeName(entry.Linkname) } } else { if len(entry.Digest) > 10 { @@ -198,10 +205,13 @@ func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader, if e.Linkname == "" { continue } + if e.Type == internal.TypeSymlink { + continue + } links[e.Linkname] = links[e.Linkname] + 1 } - if len(toc.Entries) == 0 || (toc.Entries[0].Name != "" && toc.Entries[0].Name != "/") { + if len(toc.Entries) == 0 || (sanitizeName(toc.Entries[0].Name) != "/") { root := &internal.FileMetadata{ Name: "/", Type: internal.TypeDir, diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go index 8493a2c19..f300df347 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go @@ -25,6 +25,7 @@ import ( "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/chunked/compressor" "github.com/containers/storage/pkg/chunked/internal" + "github.com/containers/storage/pkg/fsverity" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/system" "github.com/containers/storage/types" @@ -40,12 +41,14 @@ import ( const ( maxNumberMissingChunks = 1024 + autoMergePartsThreshold = 128 // if the gap between two ranges is below this threshold, automatically merge them. newFileFlags = (unix.O_CREAT | unix.O_TRUNC | unix.O_EXCL | unix.O_WRONLY) containersOverrideXattr = "user.containers.override_stat" bigDataKey = "zstd-chunked-manifest" chunkedData = "zstd-chunked-data" chunkedLayerDataKey = "zstd-chunked-layer-data" tocKey = "toc" + fsVerityDigestsKey = "fs-verity-digests" fileTypeZstdChunked = iota fileTypeEstargz @@ -71,11 +74,9 @@ type chunkedDiffer struct { zstdReader *zstd.Decoder rawReader io.Reader - // contentDigest is the digest of the uncompressed content - // (diffID) when the layer is fully retrieved. If the layer - // is not fully retrieved, instead of using the digest of the - // uncompressed content, it refers to the digest of the TOC. - contentDigest digest.Digest + // tocDigest is the digest of the TOC document when the layer + // is partially pulled. + tocDigest digest.Digest // convertedToZstdChunked is set to true if the layer needs to // be converted to the zstd:chunked format before it can be @@ -86,9 +87,18 @@ type chunkedDiffer struct { // the layer are trusted and should not be validated. skipValidation bool + // blobDigest is the digest of the whole compressed layer. It is used if + // convertToZstdChunked to validate a layer when it is converted since there + // is no TOC referenced by the manifest. + blobDigest digest.Digest + blobSize int64 storeOpts *types.StoreOptions + + useFsVerity graphdriver.DifferFsVerity + fsVerityDigests map[string]string + fsVerityMutex sync.Mutex } var xattrsToIgnore = map[string]interface{}{ @@ -188,33 +198,7 @@ func (f *seekableFile) GetBlobAt(chunks []ImageSourceChunk) (chan io.ReadCloser, return streams, errs, nil } -func convertTarToZstdChunked(destDirectory string, blobSize int64, iss ImageSourceSeekable) (*seekableFile, digest.Digest, map[string]string, error) { - var payload io.ReadCloser - var streams chan io.ReadCloser - var errs chan error - var err error - - chunksToRequest := []ImageSourceChunk{ - { - Offset: 0, - Length: uint64(blobSize), - }, - } - - streams, errs, err = iss.GetBlobAt(chunksToRequest) - if err != nil { - return nil, "", nil, err - } - select { - case p := <-streams: - payload = p - case err := <-errs: - return nil, "", nil, err - } - if payload == nil { - return nil, "", nil, errors.New("invalid stream returned") - } - +func convertTarToZstdChunked(destDirectory string, payload *os.File) (*seekableFile, digest.Digest, map[string]string, error) { diff, err := archive.DecompressStream(payload) if err != nil { return nil, "", nil, err @@ -235,10 +219,8 @@ func convertTarToZstdChunked(destDirectory string, blobSize int64, iss ImageSour return nil, "", nil, err } - digester := digest.Canonical.Digester() - hash := digester.Hash() - - if _, err := io.Copy(io.MultiWriter(chunked, hash), diff); err != nil { + convertedOutputDigester := digest.Canonical.Digester() + if _, err := io.Copy(io.MultiWriter(chunked, convertedOutputDigester.Hash()), diff); err != nil { f.Close() return nil, "", nil, err } @@ -249,27 +231,39 @@ func convertTarToZstdChunked(destDirectory string, blobSize int64, iss ImageSour is := seekableFile{ file: f, } - return &is, digester.Digest(), newAnnotations, nil + + return &is, convertedOutputDigester.Digest(), newAnnotations, nil } // GetDiffer returns a differ than can be used with ApplyDiffWithDiffer. -func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) { - storeOpts, err := types.DefaultStoreOptionsAutoDetectUID() +func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) { + storeOpts, err := types.DefaultStoreOptions() if err != nil { return nil, err } - if _, ok := annotations[internal.ManifestChecksumKey]; ok { + if !parseBooleanPullOption(&storeOpts, "enable_partial_images", true) { + return nil, errors.New("enable_partial_images not configured") + } + + _, hasZstdChunkedTOC := annotations[internal.ManifestChecksumKey] + _, hasEstargzTOC := annotations[estargz.TOCJSONDigestAnnotation] + + if hasZstdChunkedTOC && hasEstargzTOC { + return nil, errors.New("both zstd:chunked and eStargz TOC found") + } + + if hasZstdChunkedTOC { return makeZstdChunkedDiffer(ctx, store, blobSize, annotations, iss, &storeOpts) } - if _, ok := annotations[estargz.TOCJSONDigestAnnotation]; ok { + if hasEstargzTOC { return makeEstargzChunkedDiffer(ctx, store, blobSize, annotations, iss, &storeOpts) } - return makeConvertFromRawDiffer(ctx, store, blobSize, annotations, iss, &storeOpts) + return makeConvertFromRawDiffer(ctx, store, blobDigest, blobSize, annotations, iss, &storeOpts) } -func makeConvertFromRawDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) { +func makeConvertFromRawDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) { if !parseBooleanPullOption(storeOpts, "convert_images", false) { return nil, errors.New("convert_images not configured") } @@ -280,6 +274,8 @@ func makeConvertFromRawDiffer(ctx context.Context, store storage.Store, blobSize } return &chunkedDiffer{ + fsVerityDigests: make(map[string]string), + blobDigest: blobDigest, blobSize: blobSize, convertToZstdChunked: true, copyBuffer: makeCopyBuffer(), @@ -299,22 +295,23 @@ func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize in return nil, err } - contentDigest, err := digest.Parse(annotations[internal.ManifestChecksumKey]) + tocDigest, err := digest.Parse(annotations[internal.ManifestChecksumKey]) if err != nil { return nil, fmt.Errorf("parse TOC digest %q: %w", annotations[internal.ManifestChecksumKey], err) } return &chunkedDiffer{ - blobSize: blobSize, - contentDigest: contentDigest, - copyBuffer: makeCopyBuffer(), - fileType: fileTypeZstdChunked, - layersCache: layersCache, - manifest: manifest, - storeOpts: storeOpts, - stream: iss, - tarSplit: tarSplit, - tocOffset: tocOffset, + fsVerityDigests: make(map[string]string), + blobSize: blobSize, + tocDigest: tocDigest, + copyBuffer: makeCopyBuffer(), + fileType: fileTypeZstdChunked, + layersCache: layersCache, + manifest: manifest, + storeOpts: storeOpts, + stream: iss, + tarSplit: tarSplit, + tocOffset: tocOffset, }, nil } @@ -328,21 +325,22 @@ func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize return nil, err } - contentDigest, err := digest.Parse(annotations[estargz.TOCJSONDigestAnnotation]) + tocDigest, err := digest.Parse(annotations[estargz.TOCJSONDigestAnnotation]) if err != nil { return nil, fmt.Errorf("parse TOC digest %q: %w", annotations[estargz.TOCJSONDigestAnnotation], err) } return &chunkedDiffer{ - blobSize: blobSize, - contentDigest: contentDigest, - copyBuffer: makeCopyBuffer(), - fileType: fileTypeEstargz, - layersCache: layersCache, - manifest: manifest, - storeOpts: storeOpts, - stream: iss, - tocOffset: tocOffset, + fsVerityDigests: make(map[string]string), + blobSize: blobSize, + tocDigest: tocDigest, + copyBuffer: makeCopyBuffer(), + fileType: fileTypeEstargz, + layersCache: layersCache, + manifest: manifest, + storeOpts: storeOpts, + stream: iss, + tocOffset: tocOffset, }, nil } @@ -939,6 +937,8 @@ func (c *chunkedDiffer) appendCompressedStreamToFile(compression compressedFileT return nil } +type recordFsVerityFunc func(string, *os.File) error + type destinationFile struct { digester digest.Digester dirfd int @@ -948,9 +948,10 @@ type destinationFile struct { options *archive.TarOptions skipValidation bool to io.Writer + recordFsVerity recordFsVerityFunc } -func openDestinationFile(dirfd int, metadata *internal.FileMetadata, options *archive.TarOptions, skipValidation bool) (*destinationFile, error) { +func openDestinationFile(dirfd int, metadata *internal.FileMetadata, options *archive.TarOptions, skipValidation bool, recordFsVerity recordFsVerityFunc) (*destinationFile, error) { file, err := openFileUnderRoot(metadata.Name, dirfd, newFileFlags, 0) if err != nil { return nil, err @@ -977,15 +978,32 @@ func openDestinationFile(dirfd int, metadata *internal.FileMetadata, options *ar options: options, dirfd: dirfd, skipValidation: skipValidation, + recordFsVerity: recordFsVerity, }, nil } func (d *destinationFile) Close() (Err error) { defer func() { - err := d.file.Close() + var roFile *os.File + var err error + + if d.recordFsVerity != nil { + roFile, err = reopenFileReadOnly(d.file) + if err == nil { + defer roFile.Close() + } else if Err == nil { + Err = err + } + } + + err = d.file.Close() if Err == nil { Err = err } + + if Err == nil && roFile != nil { + Err = d.recordFsVerity(d.metadata.Name, roFile) + } }() if !d.skipValidation { @@ -1008,6 +1026,35 @@ func closeDestinationFiles(files chan *destinationFile, errors chan error) { close(errors) } +func (c *chunkedDiffer) recordFsVerity(path string, roFile *os.File) error { + if c.useFsVerity == graphdriver.DifferFsVerityDisabled { + return nil + } + // fsverity.EnableVerity doesn't return an error if fs-verity was already + // enabled on the file. + err := fsverity.EnableVerity(path, int(roFile.Fd())) + if err != nil { + if c.useFsVerity == graphdriver.DifferFsVerityRequired { + return err + } + + // If it is not required, ignore the error if the filesystem does not support it. + if errors.Is(err, unix.ENOTSUP) || errors.Is(err, unix.ENOTTY) { + return nil + } + } + verity, err := fsverity.MeasureVerity(path, int(roFile.Fd())) + if err != nil { + return err + } + + c.fsVerityMutex.Lock() + c.fsVerityDigests[path] = verity + c.fsVerityMutex.Unlock() + + return nil +} + func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan error, dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) (Err error) { var destFile *destinationFile @@ -1095,7 +1142,11 @@ func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan } filesToClose <- destFile } - destFile, err = openDestinationFile(dirfd, mf.File, options, c.skipValidation) + recordFsVerity := c.recordFsVerity + if c.useFsVerity == graphdriver.DifferFsVerityDisabled { + recordFsVerity = nil + } + destFile, err = openDestinationFile(dirfd, mf.File, options, c.skipValidation, recordFsVerity) if err != nil { Err = err goto exit @@ -1130,22 +1181,12 @@ func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan } func mergeMissingChunks(missingParts []missingPart, target int) []missingPart { - getGap := func(missingParts []missingPart, i int) int { + getGap := func(missingParts []missingPart, i int) uint64 { prev := missingParts[i-1].SourceChunk.Offset + missingParts[i-1].SourceChunk.Length - return int(missingParts[i].SourceChunk.Offset - prev) - } - getCost := func(missingParts []missingPart, i int) int { - cost := getGap(missingParts, i) - if missingParts[i-1].OriginFile != nil { - cost += int(missingParts[i-1].SourceChunk.Length) - } - if missingParts[i].OriginFile != nil { - cost += int(missingParts[i].SourceChunk.Length) - } - return cost + return missingParts[i].SourceChunk.Offset - prev } - // simple case: merge chunks from the same file. + // simple case: merge chunks from the same file. Useful to reduce the number of parts to work with later. newMissingParts := missingParts[0:1] prevIndex := 0 for i := 1; i < len(missingParts); i++ { @@ -1165,28 +1206,50 @@ func mergeMissingChunks(missingParts []missingPart, target int) []missingPart { } missingParts = newMissingParts - if len(missingParts) <= target { - return missingParts - } - - // this implementation doesn't account for duplicates, so it could merge - // more than necessary to reach the specified target. Since target itself - // is a heuristic value, it doesn't matter. - costs := make([]int, len(missingParts)-1) - for i := 1; i < len(missingParts); i++ { - costs[i-1] = getCost(missingParts, i) + type gap struct { + from int + to int + cost uint64 + } + var requestGaps []gap + lastOffset := int(-1) + numberSourceChunks := 0 + for i, c := range missingParts { + if c.OriginFile != nil || c.Hole { + // it does not require a network request + continue + } + numberSourceChunks++ + if lastOffset >= 0 { + prevEnd := missingParts[lastOffset].SourceChunk.Offset + missingParts[lastOffset].SourceChunk.Length + cost := c.SourceChunk.Offset - prevEnd + g := gap{ + from: lastOffset, + to: i, + cost: cost, + } + requestGaps = append(requestGaps, g) + } + lastOffset = i } - sort.Ints(costs) - - toShrink := len(missingParts) - target - if toShrink >= len(costs) { - toShrink = len(costs) - 1 + sort.Slice(requestGaps, func(i, j int) bool { + return requestGaps[i].cost < requestGaps[j].cost + }) + toMergeMap := make([]bool, len(missingParts)) + remainingToMerge := numberSourceChunks - target + for _, g := range requestGaps { + if remainingToMerge < 0 && g.cost > autoMergePartsThreshold { + continue + } + for i := g.from + 1; i <= g.to; i++ { + toMergeMap[i] = true + } + remainingToMerge-- } - targetValue := costs[toShrink] newMissingParts = missingParts[0:1] for i := 1; i < len(missingParts); i++ { - if getCost(missingParts, i) > targetValue { + if !toMergeMap[i] { newMissingParts = append(newMissingParts, missingParts[i]) } else { gap := getGap(missingParts, i) @@ -1218,6 +1281,7 @@ func (c *chunkedDiffer) retrieveMissingFiles(stream ImageSourceSeekable, dest st } } + missingParts = mergeMissingChunks(missingParts, maxNumberMissingChunks) calculateChunksToRequest() // There are some missing files. Prepare a multirange request for the missing chunks. @@ -1231,14 +1295,13 @@ func (c *chunkedDiffer) retrieveMissingFiles(stream ImageSourceSeekable, dest st } if _, ok := err.(ErrBadRequest); ok { - requested := len(missingParts) // If the server cannot handle at least 64 chunks in a single request, just give up. - if requested < 64 { + if len(chunksToRequest) < 64 { return err } // Merge more chunks to request - missingParts = mergeMissingChunks(missingParts, requested/2) + missingParts = mergeMissingChunks(missingParts, len(chunksToRequest)/2) calculateChunksToRequest() continue } @@ -1426,15 +1489,39 @@ type findAndCopyFileOptions struct { options *archive.TarOptions } +func reopenFileReadOnly(f *os.File) (*os.File, error) { + path := fmt.Sprintf("/proc/self/fd/%d", f.Fd()) + fd, err := unix.Open(path, unix.O_RDONLY|unix.O_CLOEXEC, 0) + if err != nil { + return nil, err + } + return os.NewFile(uintptr(fd), f.Name()), nil +} + func (c *chunkedDiffer) findAndCopyFile(dirfd int, r *internal.FileMetadata, copyOptions *findAndCopyFileOptions, mode os.FileMode) (bool, error) { finalizeFile := func(dstFile *os.File) error { - if dstFile != nil { - defer dstFile.Close() - if err := setFileAttrs(dirfd, dstFile, mode, r, copyOptions.options, false); err != nil { - return err - } + if dstFile == nil { + return nil } - return nil + err := setFileAttrs(dirfd, dstFile, mode, r, copyOptions.options, false) + if err != nil { + dstFile.Close() + return err + } + var roFile *os.File + if c.useFsVerity != graphdriver.DifferFsVerityDisabled { + roFile, err = reopenFileReadOnly(dstFile) + } + dstFile.Close() + if err != nil { + return err + } + if roFile == nil { + return nil + } + + defer roFile.Close() + return c.recordFsVerity(r.Name, roFile) } found, dstFile, _, err := findFileInOtherLayers(c.layersCache, r, dirfd, copyOptions.useHardLinks) @@ -1491,6 +1578,43 @@ func makeEntriesFlat(mergedEntries []internal.FileMetadata) ([]internal.FileMeta return new, nil } +func (c *chunkedDiffer) copyAllBlobToFile(destination *os.File) (digest.Digest, error) { + var payload io.ReadCloser + var streams chan io.ReadCloser + var errs chan error + var err error + + chunksToRequest := []ImageSourceChunk{ + { + Offset: 0, + Length: uint64(c.blobSize), + }, + } + + streams, errs, err = c.stream.GetBlobAt(chunksToRequest) + if err != nil { + return "", err + } + select { + case p := <-streams: + payload = p + case err := <-errs: + return "", err + } + if payload == nil { + return "", errors.New("invalid stream returned") + } + + originalRawDigester := digest.Canonical.Digester() + + r := io.TeeReader(payload, originalRawDigester.Hash()) + + // copy the entire tarball and compute its digest + _, err = io.Copy(destination, r) + + return originalRawDigester.Digest(), err +} + func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, differOpts *graphdriver.DifferOptions) (graphdriver.DriverWithDifferOutput, error) { defer c.layersCache.release() defer func() { @@ -1499,11 +1623,40 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff } }() + c.useFsVerity = differOpts.UseFsVerity + // stream to use for reading the zstd:chunked or Estargz file. stream := c.stream + var uncompressedDigest digest.Digest + if c.convertToZstdChunked { - fileSource, diffID, annotations, err := convertTarToZstdChunked(dest, c.blobSize, c.stream) + fd, err := unix.Open(dest, unix.O_TMPFILE|unix.O_RDWR|unix.O_CLOEXEC, 0o600) + if err != nil { + return graphdriver.DriverWithDifferOutput{}, err + } + blobFile := os.NewFile(uintptr(fd), "blob-file") + defer func() { + if blobFile != nil { + blobFile.Close() + } + }() + + // calculate the checksum before accessing the file. + compressedDigest, err := c.copyAllBlobToFile(blobFile) + if err != nil { + return graphdriver.DriverWithDifferOutput{}, err + } + + if compressedDigest != c.blobDigest { + return graphdriver.DriverWithDifferOutput{}, fmt.Errorf("invalid digest to convert: expected %q, got %q", c.blobDigest, compressedDigest) + } + + if _, err := blobFile.Seek(0, io.SeekStart); err != nil { + return graphdriver.DriverWithDifferOutput{}, err + } + + fileSource, diffID, annotations, err := convertTarToZstdChunked(dest, blobFile) if err != nil { return graphdriver.DriverWithDifferOutput{}, err } @@ -1511,6 +1664,10 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff // need to keep it open until the entire file is processed. defer fileSource.Close() + // Close the file so that the file descriptor is released and the file is deleted. + blobFile.Close() + blobFile = nil + manifest, tarSplit, tocOffset, err := readZstdChunkedManifest(fileSource, c.blobSize, annotations) if err != nil { return graphdriver.DriverWithDifferOutput{}, fmt.Errorf("read zstd:chunked manifest: %w", err) @@ -1523,12 +1680,12 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff c.fileType = fileTypeZstdChunked c.manifest = manifest c.tarSplit = tarSplit - // since we retrieved the whole file and it was validated, use the diffID instead of the TOC digest. - c.contentDigest = diffID c.tocOffset = tocOffset // the file was generated by us and the digest for each file was already computed, no need to validate it again. c.skipValidation = true + // since we retrieved the whole file and it was validated, set the uncompressed digest. + uncompressedDigest = diffID } lcd := chunkedLayerData{ @@ -1557,11 +1714,8 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff Artifacts: map[string]interface{}{ tocKey: toc, }, - TOCDigest: c.contentDigest, - } - - if !parseBooleanPullOption(c.storeOpts, "enable_partial_images", false) { - return output, errors.New("enable_partial_images not configured") + TOCDigest: c.tocDigest, + UncompressedDigest: uncompressedDigest, } // When the hard links deduplication is used, file attributes are ignored because setting them @@ -1678,13 +1832,17 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff mode := os.FileMode(r.Mode) - r.Name = filepath.Clean(r.Name) - r.Linkname = filepath.Clean(r.Linkname) - t, err := typeToTarType(r.Type) if err != nil { return output, err } + + r.Name = filepath.Clean(r.Name) + // do not modify the value of symlinks + if r.Linkname != "" && t != tar.TypeSymlink { + r.Linkname = filepath.Clean(r.Linkname) + } + if whiteoutConverter != nil { hdr := archivetar.Header{ Typeflag: t, @@ -1730,6 +1888,9 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff } case tar.TypeDir: + if r.Name == "" || r.Name == "." { + output.RootDirMode = &mode + } if err := safeMkdir(dirfd, mode, r.Name, &r, options); err != nil { return output, err } @@ -1851,7 +2012,6 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff } // There are some missing files. Prepare a multirange request for the missing chunks. if len(missingParts) > 0 { - missingParts = mergeMissingChunks(missingParts, maxNumberMissingChunks) if err := c.retrieveMissingFiles(stream, dest, dirfd, missingParts, options); err != nil { return output, err } @@ -1867,6 +2027,8 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff logrus.Debugf("Missing %d bytes out of %d (%.2f %%)", missingPartsSize, totalChunksSize, float32(missingPartsSize*100.0)/float32(totalChunksSize)) } + output.Artifacts[fsVerityDigestsKey] = c.fsVerityDigests + return output, nil } @@ -1926,7 +2088,10 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i e.Chunks = make([]*internal.FileMetadata, nChunks+1) for j := 0; j <= nChunks; j++ { - e.Chunks[j] = &entries[i+j] + // we need a copy here, otherwise we override the + // .Size later + copy := entries[i+j] + e.Chunks[j] = © e.EndOffset = entries[i+j].EndOffset } i += nChunks diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go b/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go index 8d3fcf2ba..ac6bdfec7 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go @@ -9,9 +9,10 @@ import ( storage "github.com/containers/storage" graphdriver "github.com/containers/storage/drivers" + digest "github.com/opencontainers/go-digest" ) // GetDiffer returns a differ than can be used with ApplyDiffWithDiffer. -func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) { +func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) { return nil, errors.New("format not supported on this system") } diff --git a/vendor/github.com/containers/storage/pkg/chunked/toc/toc.go b/vendor/github.com/containers/storage/pkg/chunked/toc/toc.go new file mode 100644 index 000000000..6fbaa41b5 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/toc/toc.go @@ -0,0 +1,41 @@ +package toc + +import ( + "errors" + + "github.com/containers/storage/pkg/chunked/internal" + digest "github.com/opencontainers/go-digest" +) + +// tocJSONDigestAnnotation is the annotation key for the digest of the estargz +// TOC JSON. +// It is defined in github.com/containerd/stargz-snapshotter/estargz as TOCJSONDigestAnnotation +// Duplicate it here to avoid a dependency on the package. +const tocJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest" + +// GetTOCDigest returns the digest of the TOC as recorded in the annotations. +// This function retrieves a digest that represents the content of a +// table of contents (TOC) from the image's annotations. +// This is an experimental feature and may be changed/removed in the future. +func GetTOCDigest(annotations map[string]string) (*digest.Digest, error) { + d1, ok1 := annotations[tocJSONDigestAnnotation] + d2, ok2 := annotations[internal.ManifestChecksumKey] + switch { + case ok1 && ok2: + return nil, errors.New("both zstd:chunked and eStargz TOC found") + case ok1: + d, err := digest.Parse(d1) + if err != nil { + return nil, err + } + return &d, nil + case ok2: + d, err := digest.Parse(d2) + if err != nil { + return nil, err + } + return &d, nil + default: + return nil, nil + } +} diff --git a/vendor/github.com/containers/storage/pkg/config/config.go b/vendor/github.com/containers/storage/pkg/config/config.go index 20d72ca89..febe8a0c5 100644 --- a/vendor/github.com/containers/storage/pkg/config/config.go +++ b/vendor/github.com/containers/storage/pkg/config/config.go @@ -97,6 +97,8 @@ type OverlayOptionsConfig struct { Inodes string `toml:"inodes,omitempty"` // Do not create a bind mount on the storage home SkipMountHome string `toml:"skip_mount_home,omitempty"` + // Specify whether composefs must be used to mount the data layers + UseComposefs string `toml:"use_composefs,omitempty"` // ForceMask indicates the permissions mask (e.g. "0755") to use for new // files and directories ForceMask string `toml:"force_mask,omitempty"` @@ -147,6 +149,9 @@ type OptionsConfig struct { // ignored when building an image. IgnoreChownErrors string `toml:"ignore_chown_errors,omitempty"` + // Specify whether composefs must be used to mount the data layers + UseComposefs string `toml:"use_composefs,omitempty"` + // ForceMask indicates the permissions mask (e.g. "0755") to use for new // files and directories. ForceMask os.FileMode `toml:"force_mask,omitempty"` @@ -283,6 +288,7 @@ func GetGraphDriverOptions(driverName string, options OptionsConfig) []string { } case "overlay", "overlay2": + // Specify whether composefs must be used to mount the data layers if options.Overlay.IgnoreChownErrors != "" { doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.Overlay.IgnoreChownErrors)) } else if options.IgnoreChownErrors != "" { @@ -316,6 +322,9 @@ func GetGraphDriverOptions(driverName string, options OptionsConfig) []string { } else if options.ForceMask != 0 { doptions = append(doptions, fmt.Sprintf("%s.force_mask=%s", driverName, options.ForceMask)) } + if options.Overlay.UseComposefs != "" { + doptions = append(doptions, fmt.Sprintf("%s.use_composefs=%s", driverName, options.Overlay.UseComposefs)) + } case "vfs": if options.Vfs.IgnoreChownErrors != "" { doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.Vfs.IgnoreChownErrors)) diff --git a/vendor/github.com/containers/storage/pkg/fsverity/fsverity_linux.go b/vendor/github.com/containers/storage/pkg/fsverity/fsverity_linux.go new file mode 100644 index 000000000..5b21c4b76 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fsverity/fsverity_linux.go @@ -0,0 +1,45 @@ +package fsverity + +import ( + "errors" + "fmt" + "syscall" + "unsafe" + + "golang.org/x/sys/unix" +) + +// verityDigest struct represents the digest used for verifying the integrity of a file. +type verityDigest struct { + Fsv unix.FsverityDigest + Buf [64]byte +} + +// EnableVerity enables the verity feature on a file represented by the file descriptor 'fd'. The file must be opened +// in read-only mode. +// The 'description' parameter is a human-readable description of the file. +func EnableVerity(description string, fd int) error { + enableArg := unix.FsverityEnableArg{ + Version: 1, + Hash_algorithm: unix.FS_VERITY_HASH_ALG_SHA256, + Block_size: 4096, + } + + _, _, e1 := syscall.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.FS_IOC_ENABLE_VERITY), uintptr(unsafe.Pointer(&enableArg))) + if e1 != 0 && !errors.Is(e1, unix.EEXIST) { + return fmt.Errorf("failed to enable verity for %q: %w", description, e1) + } + return nil +} + +// MeasureVerity measures and returns the verity digest for the file represented by 'fd'. +// The 'description' parameter is a human-readable description of the file. +func MeasureVerity(description string, fd int) (string, error) { + var digest verityDigest + digest.Fsv.Size = 64 + _, _, e1 := syscall.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.FS_IOC_MEASURE_VERITY), uintptr(unsafe.Pointer(&digest))) + if e1 != 0 { + return "", fmt.Errorf("failed to measure verity for %q: %w", description, e1) + } + return fmt.Sprintf("%x", digest.Buf[:digest.Fsv.Size]), nil +} diff --git a/vendor/github.com/containers/storage/pkg/fsverity/fsverity_unsupported.go b/vendor/github.com/containers/storage/pkg/fsverity/fsverity_unsupported.go new file mode 100644 index 000000000..46e68c578 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fsverity/fsverity_unsupported.go @@ -0,0 +1,21 @@ +//go:build !linux +// +build !linux + +package fsverity + +import ( + "fmt" +) + +// EnableVerity enables the verity feature on a file represented by the file descriptor 'fd'. The file must be opened +// in read-only mode. +// The 'description' parameter is a human-readable description of the file. +func EnableVerity(description string, fd int) error { + return fmt.Errorf("fs-verity is not supported on this platform") +} + +// MeasureVerity measures and returns the verity digest for the file represented by 'fd'. +// The 'description' parameter is a human-readable description of the file. +func MeasureVerity(description string, fd int) (string, error) { + return "", fmt.Errorf("fs-verity is not supported on this platform") +} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir.go b/vendor/github.com/containers/storage/pkg/homedir/homedir.go index 85c5e76c8..7eb63b67a 100644 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir.go +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir.go @@ -6,21 +6,6 @@ import ( "path/filepath" ) -// GetConfigHome returns XDG_CONFIG_HOME. -// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetConfigHome() (string, error) { - if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" { - return xdgConfigHome, nil - } - home := Get() - if home == "" { - return "", errors.New("could not get either XDG_CONFIG_HOME or HOME") - } - return filepath.Join(home, ".config"), nil -} - // GetDataHome returns XDG_DATA_HOME. // GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set. // diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go index 0883ee023..9057fe1b2 100644 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go @@ -1,5 +1,5 @@ -//go:build !linux && !darwin && !freebsd -// +build !linux,!darwin,!freebsd +//go:build !linux && !darwin && !freebsd && !windows +// +build !linux,!darwin,!freebsd,!windows package homedir @@ -8,6 +8,8 @@ package homedir import ( "errors" + "os" + "path/filepath" ) // GetRuntimeDir is unsupported on non-linux system. @@ -19,3 +21,18 @@ func GetRuntimeDir() (string, error) { func StickRuntimeDirContents(files []string) ([]string, error) { return nil, errors.New("homedir.StickRuntimeDirContents() is not supported on this system") } + +// GetConfigHome returns XDG_CONFIG_HOME. +// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetConfigHome() (string, error) { + if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" { + return xdgConfigHome, nil + } + home := Get() + if home == "" { + return "", errors.New("could not get either XDG_CONFIG_HOME or HOME") + } + return filepath.Join(home, ".config"), nil +} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go index 9976f19af..45be87659 100644 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go @@ -7,12 +7,16 @@ package homedir // NOTE: this package has originally been copied from github.com/docker/docker. import ( - "errors" + "fmt" "os" "path/filepath" + "strconv" "strings" + "sync" + "syscall" "github.com/containers/storage/pkg/unshare" + "github.com/sirupsen/logrus" ) // Key returns the env var name for the user's home dir based on @@ -40,18 +44,6 @@ func GetShortcutString() string { return "~" } -// GetRuntimeDir returns XDG_RUNTIME_DIR. -// XDG_RUNTIME_DIR is typically configured via pam_systemd. -// GetRuntimeDir returns non-nil error if XDG_RUNTIME_DIR is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetRuntimeDir() (string, error) { - if xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR"); xdgRuntimeDir != "" { - return filepath.EvalSymlinks(xdgRuntimeDir) - } - return "", errors.New("could not get XDG_RUNTIME_DIR") -} - // StickRuntimeDirContents sets the sticky bit on files that are under // XDG_RUNTIME_DIR, so that the files won't be periodically removed by the system. // @@ -94,3 +86,98 @@ func stick(f string) error { m |= os.ModeSticky return os.Chmod(f, m) } + +var ( + rootlessConfigHomeDirError error + rootlessConfigHomeDirOnce sync.Once + rootlessConfigHomeDir string + rootlessRuntimeDirOnce sync.Once + rootlessRuntimeDir string +) + +// isWriteableOnlyByOwner checks that the specified permission mask allows write +// access only to the owner. +func isWriteableOnlyByOwner(perm os.FileMode) bool { + return (perm & 0o722) == 0o700 +} + +// GetConfigHome returns XDG_CONFIG_HOME. +// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetConfigHome() (string, error) { + rootlessConfigHomeDirOnce.Do(func() { + cfgHomeDir := os.Getenv("XDG_CONFIG_HOME") + if cfgHomeDir == "" { + home := Get() + resolvedHome, err := filepath.EvalSymlinks(home) + if err != nil { + rootlessConfigHomeDirError = fmt.Errorf("cannot resolve %s: %w", home, err) + return + } + tmpDir := filepath.Join(resolvedHome, ".config") + _ = os.MkdirAll(tmpDir, 0o700) + st, err := os.Stat(tmpDir) + if err != nil { + rootlessConfigHomeDirError = err + return + } else if int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() { + cfgHomeDir = tmpDir + } else { + rootlessConfigHomeDirError = fmt.Errorf("path %q exists and it is not owned by the current user", tmpDir) + return + } + } + rootlessConfigHomeDir = cfgHomeDir + }) + + return rootlessConfigHomeDir, rootlessConfigHomeDirError +} + +// GetRuntimeDir returns a directory suitable to store runtime files. +// The function will try to use the XDG_RUNTIME_DIR env variable if it is set. +// XDG_RUNTIME_DIR is typically configured via pam_systemd. +// If XDG_RUNTIME_DIR is not set, GetRuntimeDir will try to find a suitable +// directory for the current user. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetRuntimeDir() (string, error) { + var rootlessRuntimeDirError error + + rootlessRuntimeDirOnce.Do(func() { + runtimeDir := os.Getenv("XDG_RUNTIME_DIR") + + if runtimeDir != "" { + rootlessRuntimeDir, rootlessRuntimeDirError = filepath.EvalSymlinks(runtimeDir) + return + } + + uid := strconv.Itoa(unshare.GetRootlessUID()) + if runtimeDir == "" { + tmpDir := filepath.Join("/run", "user", uid) + if err := os.MkdirAll(tmpDir, 0o700); err != nil { + logrus.Debug(err) + } + st, err := os.Lstat(tmpDir) + if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && isWriteableOnlyByOwner(st.Mode().Perm()) { + runtimeDir = tmpDir + } + } + if runtimeDir == "" { + tmpDir := filepath.Join(os.TempDir(), fmt.Sprintf("storage-run-%s", uid)) + if err := os.MkdirAll(tmpDir, 0o700); err != nil { + logrus.Debug(err) + } + st, err := os.Lstat(tmpDir) + if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && isWriteableOnlyByOwner(st.Mode().Perm()) { + runtimeDir = tmpDir + } else { + rootlessRuntimeDirError = fmt.Errorf("path %q exists and it is not writeable only by the current user", tmpDir) + return + } + } + rootlessRuntimeDir = runtimeDir + }) + + return rootlessRuntimeDir, rootlessRuntimeDirError +} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go index af65f2c03..a76610f90 100644 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go @@ -5,6 +5,7 @@ package homedir import ( "os" + "path/filepath" ) // Key returns the env var name for the user's home dir based on @@ -25,8 +26,36 @@ func Get() string { return home } +// GetConfigHome returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +func GetConfigHome() (string, error) { + return filepath.Join(Get(), ".config"), nil +} + // GetShortcutString returns the string that is shortcut to user's home directory // in the native shell of the platform running on. func GetShortcutString() string { return "%USERPROFILE%" // be careful while using in format functions } + +// StickRuntimeDirContents is a no-op on Windows +func StickRuntimeDirContents(files []string) ([]string, error) { + return nil, nil +} + +// GetRuntimeDir returns a directory suitable to store runtime files. +// The function will try to use the XDG_RUNTIME_DIR env variable if it is set. +// XDG_RUNTIME_DIR is typically configured via pam_systemd. +// If XDG_RUNTIME_DIR is not set, GetRuntimeDir will try to find a suitable +// directory for the current user. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetRuntimeDir() (string, error) { + data, err := GetDataHome() + if err != nil { + return "", err + } + runtimeDir := filepath.Join(data, "containers", "storage") + return runtimeDir, nil +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go index 4701dc5ac..d7cb4ac2f 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go @@ -14,7 +14,7 @@ import ( "syscall" "github.com/containers/storage/pkg/system" - "github.com/opencontainers/runc/libcontainer/user" + "github.com/moby/sys/user" ) var ( diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go index 83de680c2..e3160d0da 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go @@ -17,7 +17,7 @@ const ( // IsRootless tells us if we are running in rootless mode func IsRootless() bool { - return false + return os.Getuid() != 0 } // GetRootlessUID returns the UID of the user in the parent userNS diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf index cb4525f27..924e8f13a 100644 --- a/vendor/github.com/containers/storage/storage.conf +++ b/vendor/github.com/containers/storage/storage.conf @@ -59,7 +59,7 @@ additionalimagestores = [ # can deduplicate pulling of content, disk storage of content and can allow the # kernel to use less memory when running containers. -# containers/storage supports three keys +# containers/storage supports four keys # * enable_partial_images="true" | "false" # Tells containers/storage to look for files previously pulled in storage # rather then always pulling them from the container registry. @@ -70,7 +70,12 @@ additionalimagestores = [ # Tells containers/storage where an ostree repository exists that might have # previously pulled content which can be used when attempting to avoid # pulling content from the container registry -pull_options = {enable_partial_images = "false", use_hard_links = "false", ostree_repos=""} +# * convert_images = "false" | "true" +# If set to true, containers/storage will convert images to a +# format compatible with partial pulls in order to take advantage +# of local deduplication and hard linking. It is an expensive +# operation so it is not enabled by default. +pull_options = {enable_partial_images = "true", use_hard_links = "false", ostree_repos=""} # Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of # a container, to the UIDs/GIDs as they should appear outside of the container, @@ -130,6 +135,9 @@ mountopt = "nodev" # Set to skip a PRIVATE bind mount on the storage home directory. # skip_mount_home = "false" +# Set to use composefs to mount data layers with overlay. +# use_composefs = "false" + # Size is used to set a maximum size of the container image. # size = "" diff --git a/vendor/github.com/containers/storage/storage.conf-freebsd b/vendor/github.com/containers/storage/storage.conf-freebsd index c8abee64f..03bbe2461 100644 --- a/vendor/github.com/containers/storage/storage.conf-freebsd +++ b/vendor/github.com/containers/storage/storage.conf-freebsd @@ -96,6 +96,9 @@ mountopt = "nodev" # Set to skip a PRIVATE bind mount on the storage home directory. # skip_mount_home = "false" +# Set to use composefs to mount data layers with overlay. +# use_composefs = "false" + # Size is used to set a maximum size of the container image. # size = "" diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index 6753b296f..c6f125189 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -1,6 +1,7 @@ package storage import ( + _ "embed" "encoding/base64" "errors" "fmt" @@ -10,6 +11,7 @@ import ( "reflect" "strings" "sync" + "syscall" "time" // register all of the built-in drivers @@ -69,6 +71,19 @@ type metadataStore interface { rwMetadataStore } +// ApplyStagedLayerOptions contains options to pass to ApplyStagedLayer +type ApplyStagedLayerOptions struct { + ID string // Mandatory + ParentLayer string // Optional + Names []string // Optional + MountLabel string // Optional + Writeable bool // Optional + LayerOptions *LayerOptions // Optional + + DiffOutput *drivers.DriverWithDifferOutput // Mandatory + DiffOptions *drivers.ApplyDiffWithDifferOpts // Mandatory +} + // An roBigDataStore wraps up the read-only big-data related methods of the // various types of file-based lookaside stores that we implement. type roBigDataStore interface { @@ -313,14 +328,24 @@ type Store interface { // ApplyDiffer applies a diff to a layer. // It is the caller responsibility to clean the staging directory if it is not // successfully applied with ApplyDiffFromStagingDirectory. - ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) + ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) // ApplyDiffFromStagingDirectory uses stagingDirectory to create the diff. - ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error + // Deprecated: it will be removed soon. Use ApplyStagedLayer instead. + ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error // CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors + // Deprecated: it will be removed soon. Use CleanupStagedLayer instead. CleanupStagingDirectory(stagingDirectory string) error + // ApplyStagedLayer combines the functions of CreateLayer and ApplyDiffFromStagingDirectory, + // marking the layer for automatic removal if applying the diff fails + // for any reason. + ApplyStagedLayer(args ApplyStagedLayerOptions) (*Layer, error) + + // CleanupStagedLayer cleanups the staging directory. It can be used to cleanup the staging directory on errors + CleanupStagedLayer(diffOutput *drivers.DriverWithDifferOutput) error + // DifferTarget gets the path to the differ target. DifferTarget(id string) (string, error) @@ -332,6 +357,10 @@ type Store interface { // specified uncompressed digest value recorded for them. LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) + // LayersByTOCDigest returns a slice of the layers with the + // specified TOC digest value recorded for them. + LayersByTOCDigest(d digest.Digest) ([]Layer, error) + // LayerSize returns a cached approximation of the layer's size, or -1 // if we don't have a value on hand. LayerSize(id string) (int64, error) @@ -391,6 +420,18 @@ type Store interface { // allow ImagesByDigest to find images by their correct digests. SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error + // ImageDirectory returns a path of a directory which the caller can + // use to store data, specific to the image, which the library does not + // directly manage. The directory will be deleted when the image is + // deleted. + ImageDirectory(id string) (string, error) + + // ImageRunDirectory returns a path of a directory which the caller can + // use to store data, specific to the image, which the library does not + // directly manage. The directory will be deleted when the host system + // is restarted. + ImageRunDirectory(id string) (string, error) + // ListLayerBigData retrieves a list of the (possibly large) chunks of // named data associated with a layer. ListLayerBigData(id string) ([]string, error) @@ -562,10 +603,19 @@ type LayerOptions struct { // initialize this layer. If set, it should be a child of the layer // which we want to use as the parent of the new layer. TemplateLayer string - // OriginalDigest specifies a digest of the tarstream (diff), if one is + // OriginalDigest specifies a digest of the (possibly-compressed) tarstream (diff), if one is // provided along with these LayerOptions, and reliably known by the caller. + // The digest might not be exactly the digest of the provided tarstream + // (e.g. the digest might be of a compressed representation, while providing + // an uncompressed one); in that case the caller is responsible for the two matching. // Use the default "" if this fields is not applicable or the value is not known. OriginalDigest digest.Digest + // OriginalSize specifies a size of the (possibly-compressed) tarstream corresponding + // to OriginalDigest. + // If the digest does not match the provided tarstream, OriginalSize must match OriginalDigest, + // not the tarstream. + // Use nil if not applicable or not known. + OriginalSize *int64 // UncompressedDigest specifies a digest of the uncompressed version (“DiffID”) // of the tarstream (diff), if one is provided along with these LayerOptions, // and reliably known by the caller. @@ -922,11 +972,13 @@ func (s *store) load() error { if err := os.MkdirAll(gipath, 0o700); err != nil { return err } - ris, err := newImageStore(gipath) + imageStore, err := newImageStore(gipath) if err != nil { return err } - s.imageStore = ris + s.imageStore = imageStore + + s.rwImageStores = []rwImageStore{imageStore} gcpath := filepath.Join(s.graphRoot, driverPrefix+"containers") if err := os.MkdirAll(gcpath, 0o700); err != nil { @@ -944,13 +996,16 @@ func (s *store) load() error { s.containerStore = rcs - for _, store := range driver.AdditionalImageStores() { + additionalImageStores := s.graphDriver.AdditionalImageStores() + if s.imageStoreDir != "" { + additionalImageStores = append([]string{s.graphRoot}, additionalImageStores...) + } + + for _, store := range additionalImageStores { gipath := filepath.Join(store, driverPrefix+"images") var ris roImageStore - if s.imageStoreDir != "" && store == s.graphRoot { - // If --imagestore was set and current store - // is `graphRoot` then mount it as a `rw` additional - // store instead of `readonly` additional store. + // both the graphdriver and the imagestore must be used read-write. + if store == s.imageStoreDir || store == s.graphRoot { imageStore, err := newImageStore(gipath) if err != nil { return err @@ -960,6 +1015,10 @@ func (s *store) load() error { } else { ris, err = newROImageStore(gipath) if err != nil { + if errors.Is(err, syscall.EROFS) { + logrus.Debugf("Ignoring creation of lockfiles on read-only file systems %q, %v", gipath, err) + continue + } return err } } @@ -1031,15 +1090,9 @@ func (s *store) stopUsingGraphDriver() { // Almost all users should use startUsingGraphDriver instead. // The caller must hold s.graphLock. func (s *store) createGraphDriverLocked() (drivers.Driver, error) { - driverRoot := s.imageStoreDir - imageStoreBase := s.graphRoot - if driverRoot == "" { - driverRoot = s.graphRoot - imageStoreBase = "" - } config := drivers.Options{ - Root: driverRoot, - ImageStore: imageStoreBase, + Root: s.graphRoot, + ImageStore: s.imageStoreDir, RunRoot: s.runRoot, DriverPriority: s.graphDriverPriority, DriverOptions: s.graphOptions, @@ -1069,15 +1122,15 @@ func (s *store) getLayerStoreLocked() (rwLayerStore, error) { if err := os.MkdirAll(rlpath, 0o700); err != nil { return nil, err } - imgStoreRoot := s.imageStoreDir - if imgStoreRoot == "" { - imgStoreRoot = s.graphRoot - } - glpath := filepath.Join(imgStoreRoot, driverPrefix+"layers") + glpath := filepath.Join(s.graphRoot, driverPrefix+"layers") if err := os.MkdirAll(glpath, 0o700); err != nil { return nil, err } - rls, err := s.newLayerStore(rlpath, glpath, s.graphDriver, s.transientStore) + ilpath := "" + if s.imageStoreDir != "" { + ilpath = filepath.Join(s.imageStoreDir, driverPrefix+"layers") + } + rls, err := s.newLayerStore(rlpath, glpath, ilpath, s.graphDriver, s.transientStore) if err != nil { return nil, err } @@ -1108,8 +1161,10 @@ func (s *store) getROLayerStoresLocked() ([]roLayerStore, error) { if err := os.MkdirAll(rlpath, 0o700); err != nil { return nil, err } + for _, store := range s.graphDriver.AdditionalImageStores() { glpath := filepath.Join(store, driverPrefix+"layers") + rls, err := newROLayerStore(rlpath, glpath, s.graphDriver) if err != nil { return nil, err @@ -1390,8 +1445,7 @@ func (s *store) canUseShifting(uidmap, gidmap []idtools.IDMap) bool { return true } -func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, lOptions *LayerOptions, diff io.Reader) (*Layer, int64, error) { - var parentLayer *Layer +func (s *store) putLayer(id, parent string, names []string, mountLabel string, writeable bool, lOptions *LayerOptions, diff io.Reader, slo *stagedLayerOptions) (*Layer, int64, error) { rlstore, rlstores, err := s.bothLayerStoreKinds() if err != nil { return nil, -1, err @@ -1404,6 +1458,8 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w return nil, -1, err } defer s.containerStore.stopWriting() + + var parentLayer *Layer var options LayerOptions if lOptions != nil { options = *lOptions @@ -1463,6 +1519,7 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w } layerOptions := LayerOptions{ OriginalDigest: options.OriginalDigest, + OriginalSize: options.OriginalSize, UncompressedDigest: options.UncompressedDigest, Flags: options.Flags, } @@ -1476,7 +1533,11 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w GIDMap: copyIDMap(gidMap), } } - return rlstore.create(id, parentLayer, names, mountLabel, nil, &layerOptions, writeable, diff) + return rlstore.create(id, parentLayer, names, mountLabel, nil, &layerOptions, writeable, diff, slo) +} + +func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, lOptions *LayerOptions, diff io.Reader) (*Layer, int64, error) { + return s.putLayer(id, parent, names, mountLabel, writeable, lOptions, diff, nil) } func (s *store) CreateLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions) (*Layer, error) { @@ -1686,7 +1747,7 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore roImageStore, rlst } } layerOptions.TemplateLayer = layer.ID - mappedLayer, _, err := rlstore.create("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil) + mappedLayer, _, err := rlstore.create("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil, nil) if err != nil { return nil, fmt.Errorf("creating an ID-mapped copy of layer %q: %w", layer.ID, err) } @@ -1857,7 +1918,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat options.Flags[mountLabelFlag] = mountLabel } - clayer, _, err := rlstore.create(layer, imageTopLayer, nil, mlabel, options.StorageOpt, layerOptions, true, nil) + clayer, _, err := rlstore.create(layer, imageTopLayer, nil, mlabel, options.StorageOpt, layerOptions, true, nil, nil) if err != nil { return nil, err } @@ -2530,7 +2591,7 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) if err := s.writeToAllStores(func(rlstore rwLayerStore) error { // Delete image from all available imagestores configured to be used. imageFound := false - for _, is := range append([]rwImageStore{s.imageStore}, s.rwImageStores...) { + for _, is := range s.rwImageStores { if is != s.imageStore { // This is an additional writeable image store // so we must perform lock @@ -2741,7 +2802,13 @@ func (s *store) Status() ([][2]string, error) { return rlstore.Status() } +//go:embed VERSION +var storageVersion string + func (s *store) Version() ([][2]string, error) { + if trimmedVersion := strings.TrimSpace(storageVersion); trimmedVersion != "" { + return [][2]string{{"Version", trimmedVersion}}, nil + } return [][2]string{}, nil } @@ -2915,16 +2982,29 @@ func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, erro return nil, ErrLayerUnknown } -func (s *store) ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error { +func (s *store) ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error { + if stagingDirectory != diffOutput.Target { + return fmt.Errorf("invalid value for staging directory, it must be the same as the differ target directory") + } _, err := writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) { if !rlstore.Exists(to) { return struct{}{}, ErrLayerUnknown } - return struct{}{}, rlstore.ApplyDiffFromStagingDirectory(to, stagingDirectory, diffOutput, options) + return struct{}{}, rlstore.applyDiffFromStagingDirectory(to, diffOutput, options) }) return err } +func (s *store) ApplyStagedLayer(args ApplyStagedLayerOptions) (*Layer, error) { + slo := stagedLayerOptions{ + DiffOutput: args.DiffOutput, + DiffOptions: args.DiffOptions, + } + + layer, _, err := s.putLayer(args.ID, args.ParentLayer, args.Names, args.MountLabel, args.Writeable, args.LayerOptions, nil, &slo) + return layer, err +} + func (s *store) CleanupStagingDirectory(stagingDirectory string) error { _, err := writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) { return struct{}{}, rlstore.CleanupStagingDirectory(stagingDirectory) @@ -2932,7 +3012,14 @@ func (s *store) CleanupStagingDirectory(stagingDirectory string) error { return err } -func (s *store) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) { +func (s *store) CleanupStagedLayer(diffOutput *drivers.DriverWithDifferOutput) error { + _, err := writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) { + return struct{}{}, rlstore.CleanupStagingDirectory(diffOutput.Target) + }) + return err +} + +func (s *store) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) { return writeToLayerStore(s, func(rlstore rwLayerStore) (*drivers.DriverWithDifferOutput, error) { if to != "" && !rlstore.Exists(to) { return nil, ErrLayerUnknown @@ -2994,6 +3081,13 @@ func (s *store) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) { return s.layersByMappedDigest(func(r roLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByUncompressedDigest(d) }, d) } +func (s *store) LayersByTOCDigest(d digest.Digest) ([]Layer, error) { + if err := d.Validate(); err != nil { + return nil, fmt.Errorf("looking for TOC matching digest %q: %w", d, err) + } + return s.layersByMappedDigest(func(r roLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByTOCDigest(d) }, d) +} + func (s *store) LayerSize(id string) (int64, error) { if res, done, err := readAllLayerStores(s, func(store roLayerStore) (int64, bool, error) { if store.Exists(id) { @@ -3288,6 +3382,27 @@ func (s *store) ContainerByLayer(id string) (*Container, error) { return nil, ErrContainerUnknown } +func (s *store) ImageDirectory(id string) (string, error) { + foundImage := false + if res, done, err := readAllImageStores(s, func(store roImageStore) (string, bool, error) { + if store.Exists(id) { + foundImage = true + } + middleDir := s.graphDriverName + "-images" + gipath := filepath.Join(s.GraphRoot(), middleDir, id, "userdata") + if err := os.MkdirAll(gipath, 0o700); err != nil { + return "", true, err + } + return gipath, true, nil + }); done { + return res, err + } + if foundImage { + return "", fmt.Errorf("locating image with ID %q (consider removing the image to resolve the issue): %w", id, os.ErrNotExist) + } + return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) +} + func (s *store) ContainerDirectory(id string) (string, error) { res, _, err := readContainerStore(s, func() (string, bool, error) { id, err := s.containerStore.Lookup(id) @@ -3305,6 +3420,28 @@ func (s *store) ContainerDirectory(id string) (string, error) { return res, err } +func (s *store) ImageRunDirectory(id string) (string, error) { + foundImage := false + if res, done, err := readAllImageStores(s, func(store roImageStore) (string, bool, error) { + if store.Exists(id) { + foundImage = true + } + + middleDir := s.graphDriverName + "-images" + rcpath := filepath.Join(s.RunRoot(), middleDir, id, "userdata") + if err := os.MkdirAll(rcpath, 0o700); err != nil { + return "", true, err + } + return rcpath, true, nil + }); done { + return res, err + } + if foundImage { + return "", fmt.Errorf("locating image with ID %q (consider removing the image to resolve the issue): %w", id, os.ErrNotExist) + } + return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown) +} + func (s *store) ContainerRunDirectory(id string) (string, error) { res, _, err := readContainerStore(s, func() (string, bool, error) { id, err := s.containerStore.Lookup(id) @@ -3545,8 +3682,8 @@ func SetDefaultConfigFilePath(path string) { } // DefaultConfigFile returns the path to the storage config file used -func DefaultConfigFile(rootless bool) (string, error) { - return types.DefaultConfigFile(rootless) +func DefaultConfigFile() (string, error) { + return types.DefaultConfigFile() } // ReloadConfigurationFile parses the specified configuration file and overrides diff --git a/vendor/github.com/containers/storage/types/options.go b/vendor/github.com/containers/storage/types/options.go index 5ae667a49..ad0bfa43a 100644 --- a/vendor/github.com/containers/storage/types/options.go +++ b/vendor/github.com/containers/storage/types/options.go @@ -11,7 +11,9 @@ import ( "github.com/BurntSushi/toml" cfg "github.com/containers/storage/pkg/config" + "github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/unshare" "github.com/sirupsen/logrus" ) @@ -87,7 +89,7 @@ func loadDefaultStoreOptions() { _, err := os.Stat(defaultOverrideConfigFile) if err == nil { - // The DefaultConfigFile(rootless) function returns the path + // The DefaultConfigFile() function returns the path // of the used storage.conf file, by returning defaultConfigFile // If override exists containers/storage uses it by default. defaultConfigFile = defaultOverrideConfigFile @@ -109,21 +111,41 @@ func loadDefaultStoreOptions() { setDefaults() } -// defaultStoreOptionsIsolated is an internal implementation detail of DefaultStoreOptions to allow testing. -// Everyone but the tests this is intended for should only call DefaultStoreOptions, never this function. -func defaultStoreOptionsIsolated(rootless bool, rootlessUID int, storageConf string) (StoreOptions, error) { +// loadStoreOptions returns the default storage ops for containers +func loadStoreOptions() (StoreOptions, error) { + storageConf, err := DefaultConfigFile() + if err != nil { + return defaultStoreOptions, err + } + return loadStoreOptionsFromConfFile(storageConf) +} + +// usePerUserStorage returns whether the user private storage must be used. +// We cannot simply use the unshare.IsRootless() condition, because +// that checks only if the current process needs a user namespace to +// work and it would break cases where the process is already created +// in a user namespace (e.g. nested Podman/Buildah) and the desired +// behavior is to use system paths instead of user private paths. +func usePerUserStorage() bool { + return unshare.IsRootless() && unshare.GetRootlessUID() != 0 +} + +// loadStoreOptionsFromConfFile is an internal implementation detail of DefaultStoreOptions to allow testing. +// Everyone but the tests this is intended for should only call loadStoreOptions, never this function. +func loadStoreOptionsFromConfFile(storageConf string) (StoreOptions, error) { var ( defaultRootlessRunRoot string defaultRootlessGraphRoot string err error ) + defaultStoreOptionsOnce.Do(loadDefaultStoreOptions) if loadDefaultStoreOptionsErr != nil { return StoreOptions{}, loadDefaultStoreOptionsErr } storageOpts := defaultStoreOptions - if rootless && rootlessUID != 0 { - storageOpts, err = getRootlessStorageOpts(rootlessUID, storageOpts) + if usePerUserStorage() { + storageOpts, err = getRootlessStorageOpts(storageOpts) if err != nil { return storageOpts, err } @@ -137,7 +159,7 @@ func defaultStoreOptionsIsolated(rootless bool, rootlessUID int, storageConf str defaultRootlessGraphRoot = storageOpts.GraphRoot storageOpts = StoreOptions{} reloadConfigurationFileIfNeeded(storageConf, &storageOpts) - if rootless && rootlessUID != 0 { + if usePerUserStorage() { // If the file did not specify a graphroot or runroot, // set sane defaults so we don't try and use root-owned // directories @@ -156,6 +178,7 @@ func defaultStoreOptionsIsolated(rootless bool, rootlessUID int, storageConf str if storageOpts.RunRoot == "" { return storageOpts, fmt.Errorf("runroot must be set") } + rootlessUID := unshare.GetRootlessUID() runRoot, err := expandEnvPath(storageOpts.RunRoot, rootlessUID) if err != nil { return storageOpts, err @@ -186,26 +209,17 @@ func defaultStoreOptionsIsolated(rootless bool, rootlessUID int, storageConf str return storageOpts, nil } -// loadStoreOptions returns the default storage ops for containers -func loadStoreOptions(rootless bool, rootlessUID int) (StoreOptions, error) { - storageConf, err := DefaultConfigFile(rootless && rootlessUID != 0) - if err != nil { - return defaultStoreOptions, err - } - return defaultStoreOptionsIsolated(rootless, rootlessUID, storageConf) -} - // UpdateOptions should be called iff container engine received a SIGHUP, // otherwise use DefaultStoreOptions -func UpdateStoreOptions(rootless bool, rootlessUID int) (StoreOptions, error) { - storeOptions, storeError = loadStoreOptions(rootless, rootlessUID) +func UpdateStoreOptions() (StoreOptions, error) { + storeOptions, storeError = loadStoreOptions() return storeOptions, storeError } // DefaultStoreOptions returns the default storage ops for containers -func DefaultStoreOptions(rootless bool, rootlessUID int) (StoreOptions, error) { +func DefaultStoreOptions() (StoreOptions, error) { once.Do(func() { - storeOptions, storeError = loadStoreOptions(rootless, rootlessUID) + storeOptions, storeError = loadStoreOptions() }) return storeOptions, storeError } @@ -270,14 +284,26 @@ func isRootlessDriver(driver string) bool { } // getRootlessStorageOpts returns the storage opts for containers running as non root -func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOptions, error) { +func getRootlessStorageOpts(systemOpts StoreOptions) (StoreOptions, error) { var opts StoreOptions - dataDir, rootlessRuntime, err := getRootlessDirInfo(rootlessUID) + rootlessUID := unshare.GetRootlessUID() + + dataDir, err := homedir.GetDataHome() + if err != nil { + return opts, err + } + + rootlessRuntime, err := homedir.GetRuntimeDir() if err != nil { return opts, err } - opts.RunRoot = rootlessRuntime + + opts.RunRoot = filepath.Join(rootlessRuntime, "containers") + if err := os.MkdirAll(opts.RunRoot, 0o700); err != nil { + return opts, fmt.Errorf("unable to make rootless runtime: %w", err) + } + opts.PullOptions = systemOpts.PullOptions if systemOpts.RootlessStoragePath != "" { opts.GraphRoot, err = expandEnvPath(systemOpts.RootlessStoragePath, rootlessUID) @@ -343,12 +369,6 @@ func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOpti return opts, nil } -// DefaultStoreOptionsAutoDetectUID returns the default storage ops for containers -func DefaultStoreOptionsAutoDetectUID() (StoreOptions, error) { - uid := getRootlessUID() - return DefaultStoreOptions(uid != 0, uid) -} - var prevReloadConfig = struct { storeOptions *StoreOptions mod time.Time @@ -518,8 +538,8 @@ func Options() (StoreOptions, error) { } // Save overwrites the tomlConfig in storage.conf with the given conf -func Save(conf TomlConfig, rootless bool) error { - configFile, err := DefaultConfigFile(rootless) +func Save(conf TomlConfig) error { + configFile, err := DefaultConfigFile() if err != nil { return err } @@ -537,10 +557,10 @@ func Save(conf TomlConfig, rootless bool) error { } // StorageConfig is used to retrieve the storage.conf toml in order to overwrite it -func StorageConfig(rootless bool) (*TomlConfig, error) { +func StorageConfig() (*TomlConfig, error) { config := new(TomlConfig) - configFile, err := DefaultConfigFile(rootless) + configFile, err := DefaultConfigFile() if err != nil { return nil, err } diff --git a/vendor/github.com/containers/storage/types/utils.go b/vendor/github.com/containers/storage/types/utils.go index 73134f82d..5b4b31b80 100644 --- a/vendor/github.com/containers/storage/types/utils.go +++ b/vendor/github.com/containers/storage/types/utils.go @@ -2,162 +2,15 @@ package types import ( "errors" - "fmt" "os" "path/filepath" "strconv" "strings" "github.com/containers/storage/pkg/homedir" - "github.com/containers/storage/pkg/system" "github.com/sirupsen/logrus" ) -// GetRootlessRuntimeDir returns the runtime directory when running as non root -func GetRootlessRuntimeDir(rootlessUID int) (string, error) { - path, err := getRootlessRuntimeDir(rootlessUID) - if err != nil { - return "", err - } - path = filepath.Join(path, "containers") - if err := os.MkdirAll(path, 0o700); err != nil { - return "", fmt.Errorf("unable to make rootless runtime: %w", err) - } - return path, nil -} - -type rootlessRuntimeDirEnvironment interface { - getProcCommandFile() string - getRunUserDir() string - getTmpPerUserDir() string - - homeDirGetRuntimeDir() (string, error) - systemLstat(string) (*system.StatT, error) - homedirGet() string -} - -type rootlessRuntimeDirEnvironmentImplementation struct { - procCommandFile string - runUserDir string - tmpPerUserDir string -} - -func (env rootlessRuntimeDirEnvironmentImplementation) getProcCommandFile() string { - return env.procCommandFile -} - -func (env rootlessRuntimeDirEnvironmentImplementation) getRunUserDir() string { - return env.runUserDir -} - -func (env rootlessRuntimeDirEnvironmentImplementation) getTmpPerUserDir() string { - return env.tmpPerUserDir -} - -func (rootlessRuntimeDirEnvironmentImplementation) homeDirGetRuntimeDir() (string, error) { - return homedir.GetRuntimeDir() -} - -func (rootlessRuntimeDirEnvironmentImplementation) systemLstat(path string) (*system.StatT, error) { - return system.Lstat(path) -} - -func (rootlessRuntimeDirEnvironmentImplementation) homedirGet() string { - return homedir.Get() -} - -func isRootlessRuntimeDirOwner(dir string, env rootlessRuntimeDirEnvironment) bool { - st, err := env.systemLstat(dir) - return err == nil && int(st.UID()) == os.Getuid() && st.Mode()&0o700 == 0o700 && st.Mode()&0o066 == 0o000 -} - -// getRootlessRuntimeDirIsolated is an internal implementation detail of getRootlessRuntimeDir to allow testing. -// Everyone but the tests this is intended for should only call getRootlessRuntimeDir, never this function. -func getRootlessRuntimeDirIsolated(env rootlessRuntimeDirEnvironment) (string, error) { - runtimeDir, err := env.homeDirGetRuntimeDir() - if err == nil { - return runtimeDir, nil - } - - initCommand, err := os.ReadFile(env.getProcCommandFile()) - if err != nil || string(initCommand) == "systemd" { - runUserDir := env.getRunUserDir() - if isRootlessRuntimeDirOwner(runUserDir, env) { - return runUserDir, nil - } - } - - tmpPerUserDir := env.getTmpPerUserDir() - if tmpPerUserDir != "" { - if _, err := env.systemLstat(tmpPerUserDir); os.IsNotExist(err) { - if err := os.Mkdir(tmpPerUserDir, 0o700); err != nil { - logrus.Errorf("Failed to create temp directory for user: %v", err) - } else { - return tmpPerUserDir, nil - } - } else if isRootlessRuntimeDirOwner(tmpPerUserDir, env) { - return tmpPerUserDir, nil - } - } - - homeDir := env.homedirGet() - if homeDir == "" { - return "", errors.New("neither XDG_RUNTIME_DIR nor temp dir nor HOME was set non-empty") - } - resolvedHomeDir, err := filepath.EvalSymlinks(homeDir) - if err != nil { - return "", err - } - return filepath.Join(resolvedHomeDir, "rundir"), nil -} - -func getRootlessRuntimeDir(rootlessUID int) (string, error) { - return getRootlessRuntimeDirIsolated( - rootlessRuntimeDirEnvironmentImplementation{ - "/proc/1/comm", - fmt.Sprintf("/run/user/%d", rootlessUID), - fmt.Sprintf("%s/containers-user-%d", os.TempDir(), rootlessUID), - }, - ) -} - -// getRootlessDirInfo returns the parent path of where the storage for containers and -// volumes will be in rootless mode -func getRootlessDirInfo(rootlessUID int) (string, string, error) { - rootlessRuntime, err := GetRootlessRuntimeDir(rootlessUID) - if err != nil { - return "", "", err - } - - dataDir, err := homedir.GetDataHome() - if err == nil { - return dataDir, rootlessRuntime, nil - } - - home := homedir.Get() - if home == "" { - return "", "", fmt.Errorf("neither XDG_DATA_HOME nor HOME was set non-empty: %w", err) - } - // runc doesn't like symlinks in the rootfs path, and at least - // on CoreOS /home is a symlink to /var/home, so resolve any symlink. - resolvedHome, err := filepath.EvalSymlinks(home) - if err != nil { - return "", "", err - } - dataDir = filepath.Join(resolvedHome, ".local", "share") - - return dataDir, rootlessRuntime, nil -} - -func getRootlessUID() int { - uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID") - if uidEnv != "" { - u, _ := strconv.Atoi(uidEnv) - return u - } - return os.Geteuid() -} - func expandEnvPath(path string, rootlessUID int) (string, error) { var err error path = strings.Replace(path, "$UID", strconv.Itoa(rootlessUID), -1) @@ -169,7 +22,7 @@ func expandEnvPath(path string, rootlessUID int) (string, error) { return newpath, nil } -func DefaultConfigFile(rootless bool) (string, error) { +func DefaultConfigFile() (string, error) { if defaultConfigFileSet { return defaultConfigFile, nil } @@ -177,7 +30,7 @@ func DefaultConfigFile(rootless bool) (string, error) { if path, ok := os.LookupEnv(storageConfEnv); ok { return path, nil } - if !rootless { + if !usePerUserStorage() { if _, err := os.Stat(defaultOverrideConfigFile); err == nil { return defaultOverrideConfigFile, nil } diff --git a/vendor/github.com/containers/storage/userns.go b/vendor/github.com/containers/storage/userns.go index 32ae830be..57120731b 100644 --- a/vendor/github.com/containers/storage/userns.go +++ b/vendor/github.com/containers/storage/userns.go @@ -11,7 +11,7 @@ import ( "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/unshare" "github.com/containers/storage/types" - libcontainerUser "github.com/opencontainers/runc/libcontainer/user" + libcontainerUser "github.com/moby/sys/user" "github.com/sirupsen/logrus" ) @@ -175,7 +175,7 @@ outer: // We need to create a temporary layer so we can mount it and lookup the // maximum IDs used. - clayer, _, err := rlstore.create("", topLayer, nil, "", nil, layerOptions, false, nil) + clayer, _, err := rlstore.create("", topLayer, nil, "", nil, layerOptions, false, nil, nil) if err != nil { return 0, err } diff --git a/vendor/github.com/containers/storage/utils.go b/vendor/github.com/containers/storage/utils.go index 6b5a3421a..5bade6ffe 100644 --- a/vendor/github.com/containers/storage/utils.go +++ b/vendor/github.com/containers/storage/utils.go @@ -11,19 +11,9 @@ func ParseIDMapping(UIDMapSlice, GIDMapSlice []string, subUIDMap, subGIDMap stri return types.ParseIDMapping(UIDMapSlice, GIDMapSlice, subUIDMap, subGIDMap) } -// GetRootlessRuntimeDir returns the runtime directory when running as non root -func GetRootlessRuntimeDir(rootlessUID int) (string, error) { - return types.GetRootlessRuntimeDir(rootlessUID) -} - -// DefaultStoreOptionsAutoDetectUID returns the default storage options for containers -func DefaultStoreOptionsAutoDetectUID() (types.StoreOptions, error) { - return types.DefaultStoreOptionsAutoDetectUID() -} - // DefaultStoreOptions returns the default storage options for containers -func DefaultStoreOptions(rootless bool, rootlessUID int) (types.StoreOptions, error) { - return types.DefaultStoreOptions(rootless, rootlessUID) +func DefaultStoreOptions() (types.StoreOptions, error) { + return types.DefaultStoreOptions() } func validateMountOptions(mountOptions []string) error { diff --git a/vendor/github.com/docker/distribution/reference/helpers_deprecated.go b/vendor/github.com/docker/distribution/reference/helpers_deprecated.go deleted file mode 100644 index cbd119250..000000000 --- a/vendor/github.com/docker/distribution/reference/helpers_deprecated.go +++ /dev/null @@ -1,34 +0,0 @@ -package reference - -import "github.com/distribution/reference" - -// IsNameOnly returns true if reference only contains a repo name. -// -// Deprecated: use [reference.IsNameOnly]. -func IsNameOnly(ref reference.Named) bool { - return reference.IsNameOnly(ref) -} - -// FamiliarName returns the familiar name string -// for the given named, familiarizing if needed. -// -// Deprecated: use [reference.FamiliarName]. -func FamiliarName(ref reference.Named) string { - return reference.FamiliarName(ref) -} - -// FamiliarString returns the familiar string representation -// for the given reference, familiarizing if needed. -// -// Deprecated: use [reference.FamiliarString]. -func FamiliarString(ref reference.Reference) string { - return reference.FamiliarString(ref) -} - -// FamiliarMatch reports whether ref matches the specified pattern. -// See [path.Match] for supported patterns. -// -// Deprecated: use [reference.FamiliarMatch]. -func FamiliarMatch(pattern string, ref reference.Reference) (bool, error) { - return reference.FamiliarMatch(pattern, ref) -} diff --git a/vendor/github.com/docker/distribution/reference/normalize_deprecated.go b/vendor/github.com/docker/distribution/reference/normalize_deprecated.go deleted file mode 100644 index 1b4a459d7..000000000 --- a/vendor/github.com/docker/distribution/reference/normalize_deprecated.go +++ /dev/null @@ -1,92 +0,0 @@ -package reference - -import ( - "regexp" - - "github.com/distribution/reference" - "github.com/opencontainers/go-digest" - "github.com/opencontainers/go-digest/digestset" -) - -// ParseNormalizedNamed parses a string into a named reference -// transforming a familiar name from Docker UI to a fully -// qualified reference. If the value may be an identifier -// use ParseAnyReference. -// -// Deprecated: use [reference.ParseNormalizedNamed]. -func ParseNormalizedNamed(s string) (reference.Named, error) { - return reference.ParseNormalizedNamed(s) -} - -// ParseDockerRef normalizes the image reference following the docker convention, -// which allows for references to contain both a tag and a digest. -// -// Deprecated: use [reference.ParseDockerRef]. -func ParseDockerRef(ref string) (reference.Named, error) { - return reference.ParseDockerRef(ref) -} - -// TagNameOnly adds the default tag "latest" to a reference if it only has -// a repo name. -// -// Deprecated: use [reference.TagNameOnly]. -func TagNameOnly(ref reference.Named) reference.Named { - return reference.TagNameOnly(ref) -} - -// ParseAnyReference parses a reference string as a possible identifier, -// full digest, or familiar name. -// -// Deprecated: use [reference.ParseAnyReference]. -func ParseAnyReference(ref string) (reference.Reference, error) { - return reference.ParseAnyReference(ref) -} - -// Functions and types below have been removed in distribution v3 and -// have not been ported to github.com/distribution/reference. See -// https://github.com/distribution/distribution/pull/3774 - -var ( - // ShortIdentifierRegexp is the format used to represent a prefix - // of an identifier. A prefix may be used to match a sha256 identifier - // within a list of trusted identifiers. - // - // Deprecated: support for short-identifiers is deprecated, and will be removed in v3. - ShortIdentifierRegexp = regexp.MustCompile(shortIdentifier) - - shortIdentifier = `([a-f0-9]{6,64})` - - // anchoredShortIdentifierRegexp is used to check if a value - // is a possible identifier prefix, anchored at start and end - // of string. - anchoredShortIdentifierRegexp = regexp.MustCompile(`^` + shortIdentifier + `$`) -) - -type digestReference digest.Digest - -func (d digestReference) String() string { - return digest.Digest(d).String() -} - -func (d digestReference) Digest() digest.Digest { - return digest.Digest(d) -} - -// ParseAnyReferenceWithSet parses a reference string as a possible short -// identifier to be matched in a digest set, a full digest, or familiar name. -// -// Deprecated: support for short-identifiers is deprecated, and will be removed in v3. -func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) { - if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok { - dgst, err := ds.Lookup(ref) - if err == nil { - return digestReference(dgst), nil - } - } else { - if dgst, err := digest.Parse(ref); err == nil { - return digestReference(dgst), nil - } - } - - return reference.ParseNormalizedNamed(ref) -} diff --git a/vendor/github.com/docker/distribution/reference/reference_deprecated.go b/vendor/github.com/docker/distribution/reference/reference_deprecated.go deleted file mode 100644 index 5b732498e..000000000 --- a/vendor/github.com/docker/distribution/reference/reference_deprecated.go +++ /dev/null @@ -1,172 +0,0 @@ -// Package reference is deprecated, and has moved to github.com/distribution/reference. -// -// Deprecated: use github.com/distribution/reference instead. -package reference - -import ( - "github.com/distribution/reference" - "github.com/opencontainers/go-digest" -) - -const ( - // NameTotalLengthMax is the maximum total number of characters in a repository name. - // - // Deprecated: use [reference.NameTotalLengthMax]. - NameTotalLengthMax = reference.NameTotalLengthMax -) - -var ( - // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. - // - // Deprecated: use [reference.ErrReferenceInvalidFormat]. - ErrReferenceInvalidFormat = reference.ErrReferenceInvalidFormat - - // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. - // - // Deprecated: use [reference.ErrTagInvalidFormat]. - ErrTagInvalidFormat = reference.ErrTagInvalidFormat - - // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. - // - // Deprecated: use [reference.ErrDigestInvalidFormat]. - ErrDigestInvalidFormat = reference.ErrDigestInvalidFormat - - // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. - // - // Deprecated: use [reference.ErrNameContainsUppercase]. - ErrNameContainsUppercase = reference.ErrNameContainsUppercase - - // ErrNameEmpty is returned for empty, invalid repository names. - // - // Deprecated: use [reference.ErrNameEmpty]. - ErrNameEmpty = reference.ErrNameEmpty - - // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. - // - // Deprecated: use [reference.ErrNameTooLong]. - ErrNameTooLong = reference.ErrNameTooLong - - // ErrNameNotCanonical is returned when a name is not canonical. - // - // Deprecated: use [reference.ErrNameNotCanonical]. - ErrNameNotCanonical = reference.ErrNameNotCanonical -) - -// Reference is an opaque object reference identifier that may include -// modifiers such as a hostname, name, tag, and digest. -// -// Deprecated: use [reference.Reference]. -type Reference = reference.Reference - -// Field provides a wrapper type for resolving correct reference types when -// working with encoding. -// -// Deprecated: use [reference.Field]. -type Field = reference.Field - -// AsField wraps a reference in a Field for encoding. -// -// Deprecated: use [reference.AsField]. -func AsField(ref reference.Reference) reference.Field { - return reference.AsField(ref) -} - -// Named is an object with a full name -// -// Deprecated: use [reference.Named]. -type Named = reference.Named - -// Tagged is an object which has a tag -// -// Deprecated: use [reference.Tagged]. -type Tagged = reference.Tagged - -// NamedTagged is an object including a name and tag. -// -// Deprecated: use [reference.NamedTagged]. -type NamedTagged reference.NamedTagged - -// Digested is an object which has a digest -// in which it can be referenced by -// -// Deprecated: use [reference.Digested]. -type Digested reference.Digested - -// Canonical reference is an object with a fully unique -// name including a name with domain and digest -// -// Deprecated: use [reference.Canonical]. -type Canonical reference.Canonical - -// Domain returns the domain part of the [Named] reference. -// -// Deprecated: use [reference.Domain]. -func Domain(named reference.Named) string { - return reference.Domain(named) -} - -// Path returns the name without the domain part of the [Named] reference. -// -// Deprecated: use [reference.Path]. -func Path(named reference.Named) (name string) { - return reference.Path(named) -} - -// SplitHostname splits a named reference into a -// hostname and name string. If no valid hostname is -// found, the hostname is empty and the full value -// is returned as name -// -// Deprecated: Use [reference.Domain] or [reference.Path]. -func SplitHostname(named reference.Named) (string, string) { - return reference.SplitHostname(named) -} - -// Parse parses s and returns a syntactically valid Reference. -// If an error was encountered it is returned, along with a nil Reference. -// -// Deprecated: use [reference.Parse]. -func Parse(s string) (reference.Reference, error) { - return reference.Parse(s) -} - -// ParseNamed parses s and returns a syntactically valid reference implementing -// the Named interface. The reference must have a name and be in the canonical -// form, otherwise an error is returned. -// If an error was encountered it is returned, along with a nil Reference. -// -// Deprecated: use [reference.ParseNamed]. -func ParseNamed(s string) (reference.Named, error) { - return reference.ParseNamed(s) -} - -// WithName returns a named object representing the given string. If the input -// is invalid ErrReferenceInvalidFormat will be returned. -// -// Deprecated: use [reference.WithName]. -func WithName(name string) (reference.Named, error) { - return reference.WithName(name) -} - -// WithTag combines the name from "name" and the tag from "tag" to form a -// reference incorporating both the name and the tag. -// -// Deprecated: use [reference.WithTag]. -func WithTag(name reference.Named, tag string) (reference.NamedTagged, error) { - return reference.WithTag(name, tag) -} - -// WithDigest combines the name from "name" and the digest from "digest" to form -// a reference incorporating both the name and the digest. -// -// Deprecated: use [reference.WithDigest]. -func WithDigest(name reference.Named, digest digest.Digest) (reference.Canonical, error) { - return reference.WithDigest(name, digest) -} - -// TrimNamed removes any tag or digest from the named reference. -// -// Deprecated: use [reference.TrimNamed]. -func TrimNamed(ref reference.Named) reference.Named { - return reference.TrimNamed(ref) -} diff --git a/vendor/github.com/docker/distribution/reference/regexp_deprecated.go b/vendor/github.com/docker/distribution/reference/regexp_deprecated.go deleted file mode 100644 index 4b9c1b58e..000000000 --- a/vendor/github.com/docker/distribution/reference/regexp_deprecated.go +++ /dev/null @@ -1,50 +0,0 @@ -package reference - -import ( - "github.com/distribution/reference" -) - -// DigestRegexp matches well-formed digests, including algorithm (e.g. "sha256:"). -// -// Deprecated: use [reference.DigestRegexp]. -var DigestRegexp = reference.DigestRegexp - -// DomainRegexp matches hostname or IP-addresses, optionally including a port -// number. It defines the structure of potential domain components that may be -// part of image names. This is purposely a subset of what is allowed by DNS to -// ensure backwards compatibility with Docker image names. It may be a subset of -// DNS domain name, an IPv4 address in decimal format, or an IPv6 address between -// square brackets (excluding zone identifiers as defined by [RFC 6874] or special -// addresses such as IPv4-Mapped). -// -// Deprecated: use [reference.DomainRegexp]. -// -// [RFC 6874]: https://www.rfc-editor.org/rfc/rfc6874. -var DomainRegexp = reference.DigestRegexp - -// IdentifierRegexp is the format for string identifier used as a -// content addressable identifier using sha256. These identifiers -// are like digests without the algorithm, since sha256 is used. -// -// Deprecated: use [reference.IdentifierRegexp]. -var IdentifierRegexp = reference.IdentifierRegexp - -// NameRegexp is the format for the name component of references, including -// an optional domain and port, but without tag or digest suffix. -// -// Deprecated: use [reference.NameRegexp]. -var NameRegexp = reference.NameRegexp - -// ReferenceRegexp is the full supported format of a reference. The regexp -// is anchored and has capturing groups for name, tag, and digest -// components. -// -// Deprecated: use [reference.ReferenceRegexp]. -var ReferenceRegexp = reference.ReferenceRegexp - -// TagRegexp matches valid tag names. From [docker/docker:graph/tags.go]. -// -// Deprecated: use [reference.TagRegexp]. -// -// [docker/docker:graph/tags.go]: https://github.com/moby/moby/blob/v1.6.0/graph/tags.go#L26-L28 -var TagRegexp = reference.TagRegexp diff --git a/vendor/github.com/docker/distribution/reference/sort_deprecated.go b/vendor/github.com/docker/distribution/reference/sort_deprecated.go deleted file mode 100644 index a73251b6f..000000000 --- a/vendor/github.com/docker/distribution/reference/sort_deprecated.go +++ /dev/null @@ -1,10 +0,0 @@ -package reference - -import "github.com/distribution/reference" - -// Sort sorts string references preferring higher information references. -// -// Deprecated: use [reference.Sort]. -func Sort(references []string) []string { - return reference.Sort(references) -} diff --git a/vendor/github.com/docker/docker-credential-helpers/client/client.go b/vendor/github.com/docker/docker-credential-helpers/client/client.go index 678153cf8..7ca5ab722 100644 --- a/vendor/github.com/docker/docker-credential-helpers/client/client.go +++ b/vendor/github.com/docker/docker-credential-helpers/client/client.go @@ -16,11 +16,9 @@ func isValidCredsMessage(msg string) error { if credentials.IsCredentialsMissingServerURLMessage(msg) { return credentials.NewErrCredentialsMissingServerURL() } - if credentials.IsCredentialsMissingUsernameMessage(msg) { return credentials.NewErrCredentialsMissingUsername() } - return nil } @@ -36,13 +34,10 @@ func Store(program ProgramFunc, creds *credentials.Credentials) error { out, err := cmd.Output() if err != nil { - t := strings.TrimSpace(string(out)) - - if isValidErr := isValidCredsMessage(t); isValidErr != nil { + if isValidErr := isValidCredsMessage(string(out)); isValidErr != nil { err = isValidErr } - - return fmt.Errorf("error storing credentials - err: %v, out: `%s`", err, t) + return fmt.Errorf("error storing credentials - err: %v, out: `%s`", err, strings.TrimSpace(string(out))) } return nil @@ -55,17 +50,15 @@ func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error out, err := cmd.Output() if err != nil { - t := strings.TrimSpace(string(out)) - - if credentials.IsErrCredentialsNotFoundMessage(t) { + if credentials.IsErrCredentialsNotFoundMessage(string(out)) { return nil, credentials.NewErrCredentialsNotFound() } - if isValidErr := isValidCredsMessage(t); isValidErr != nil { + if isValidErr := isValidCredsMessage(string(out)); isValidErr != nil { err = isValidErr } - return nil, fmt.Errorf("error getting credentials - err: %v, out: `%s`", err, t) + return nil, fmt.Errorf("error getting credentials - err: %v, out: `%s`", err, strings.TrimSpace(string(out))) } resp := &credentials.Credentials{ diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/error.go b/vendor/github.com/docker/docker-credential-helpers/credentials/error.go index 8fa4d5d25..2283d5a44 100644 --- a/vendor/github.com/docker/docker-credential-helpers/credentials/error.go +++ b/vendor/github.com/docker/docker-credential-helpers/credentials/error.go @@ -1,6 +1,9 @@ package credentials -import "errors" +import ( + "errors" + "strings" +) const ( // ErrCredentialsNotFound standardizes the not found error, so every helper returns @@ -47,7 +50,7 @@ func IsErrCredentialsNotFound(err error) bool { // This function helps to check messages returned by an // external program via its standard output. func IsErrCredentialsNotFoundMessage(err string) bool { - return err == errCredentialsNotFoundMessage + return strings.TrimSpace(err) == errCredentialsNotFoundMessage } // errCredentialsMissingServerURL represents an error raised @@ -104,7 +107,7 @@ func IsCredentialsMissingServerURL(err error) bool { // IsCredentialsMissingServerURLMessage checks for an // errCredentialsMissingServerURL in the error message. func IsCredentialsMissingServerURLMessage(err string) bool { - return err == errCredentialsMissingServerURLMessage + return strings.TrimSpace(err) == errCredentialsMissingServerURLMessage } // IsCredentialsMissingUsername returns true if the error @@ -117,5 +120,5 @@ func IsCredentialsMissingUsername(err error) bool { // IsCredentialsMissingUsernameMessage checks for an // errCredentialsMissingUsername in the error message. func IsCredentialsMissingUsernameMessage(err string) bool { - return err == errCredentialsMissingUsernameMessage + return strings.TrimSpace(err) == errCredentialsMissingUsernameMessage } diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS index b31418192..48d04f9a9 100644 --- a/vendor/github.com/docker/docker/AUTHORS +++ b/vendor/github.com/docker/docker/AUTHORS @@ -27,6 +27,7 @@ Adam Miller Adam Mills Adam Pointer Adam Singer +Adam Thornton Adam Walz Adam Williams AdamKorcz @@ -173,6 +174,7 @@ Andy Rothfusz Andy Smith Andy Wilson Andy Zhang +Aneesh Kulkarni Anes Hasicic Angel Velazquez Anil Belur @@ -236,6 +238,7 @@ Ben Golub Ben Gould Ben Hall Ben Langfeld +Ben Lovy Ben Sargent Ben Severson Ben Toews @@ -262,7 +265,7 @@ Billy Ridgway Bily Zhang Bin Liu Bingshen Wang -Bjorn Neergaard +Bjorn Neergaard Blake Geno Boaz Shuster bobby abbott @@ -279,6 +282,7 @@ Brandon Liu Brandon Philips Brandon Rhodes Brendan Dixon +Brennan Kinney <5098581+polarathene@users.noreply.github.com> Brent Salisbury Brett Higgins Brett Kochendorfer @@ -363,6 +367,7 @@ chenyuzhu Chetan Birajdar Chewey Chia-liang Kao +Chiranjeevi Tirunagari chli Cholerae Hu Chris Alfonso @@ -433,8 +438,8 @@ Cristian Staretu cristiano balducci Cristina Yenyxe Gonzalez Garcia Cruceru Calin-Cristian +cui fliter CUI Wei -cuishuang Cuong Manh Le Cyprian Gracz Cyril F @@ -513,6 +518,7 @@ David Dooling David Gageot David Gebler David Glasser +David Karlsson <35727626+dvdksn@users.noreply.github.com> David Lawrence David Lechner David M. Karr @@ -602,6 +608,7 @@ Donald Huang Dong Chen Donghwa Kim Donovan Jones +Dorin Geman Doron Podoleanu Doug Davis Doug MacEachern @@ -636,6 +643,7 @@ Emily Rose Emir Ozer Eng Zer Jun Enguerran +Enrico Weigelt, metux IT consult Eohyung Lee epeterso er0k @@ -676,6 +684,7 @@ Evan Allrich Evan Carmi Evan Hazlett Evan Krall +Evan Lezar Evan Phoenix Evan Wies Evelyn Xu @@ -744,6 +753,7 @@ Frank Groeneveld Frank Herrmann Frank Macreery Frank Rosquin +Frank Villaro-Dixon Frank Yang Fred Lifton Frederick F. Kautz IV @@ -983,6 +993,7 @@ Jean Rouge Jean-Baptiste Barth Jean-Baptiste Dalido Jean-Christophe Berthon +Jean-Michel Rouet Jean-Paul Calderone Jean-Pierre Huynh Jean-Tiare Le Bigot @@ -1013,6 +1024,7 @@ Jeroen Jacobs Jesse Dearing Jesse Dubay Jessica Frazelle +Jeyanthinath Muthuram Jezeniel Zapanta Jhon Honce Ji.Zhilong @@ -1141,6 +1153,7 @@ junxu Jussi Nummelin Justas Brazauskas Justen Martin +Justin Chadwell Justin Cormack Justin Force Justin Keller <85903732+jk-vb@users.noreply.github.com> @@ -1183,6 +1196,7 @@ Ke Xu Kei Ohmura Keith Hudgins Keli Hu +Ken Bannister Ken Cochrane Ken Herner Ken ICHIKAWA @@ -1192,7 +1206,7 @@ Kenjiro Nakayama Kent Johnson Kenta Tada Kevin "qwazerty" Houdebert -Kevin Alvarez +Kevin Alvarez Kevin Burke Kevin Clark Kevin Feyrer @@ -1225,6 +1239,7 @@ Konstantin Gribov Konstantin L Konstantin Pelykh Kostadin Plachkov +kpcyrd Krasi Georgiev Krasimir Georgiev Kris-Mikael Krister @@ -1306,6 +1321,7 @@ Lorenzo Fontana Lotus Fenn Louis Delossantos Louis Opter +Luboslav Pivarc Luca Favatella Luca Marturana Luca Orlandi @@ -1344,6 +1360,7 @@ Manuel Meurer Manuel Rüger Manuel Woelker mapk0y +Marat Radchenko Marc Abramowitz Marc Kuo Marc Tamsky @@ -1383,6 +1400,7 @@ Martijn van Oosterhout Martin Braun Martin Dojcak Martin Honermeyer +Martin Jirku Martin Kelly Martin Mosegaard Amdisen Martin Muzatko @@ -1461,6 +1479,7 @@ Michael Holzheu Michael Hudson-Doyle Michael Huettermann Michael Irwin +Michael Kebe Michael Kuehn Michael Käufl Michael Neale @@ -1509,10 +1528,11 @@ Mike Lundy Mike MacCana Mike Naberezny Mike Snitzer +Mike Sul mikelinjie <294893458@qq.com> Mikhail Sobolev Miklos Szegedi -Milas Bowman +Milas Bowman Milind Chawre Miloslav Trmač mingqing @@ -1524,6 +1544,7 @@ mlarcher Mohammad Banikazemi Mohammad Nasirifar Mohammed Aaqib Ansari +Mohd Sadiq Mohit Soni Moorthy RS Morgan Bauer @@ -1606,6 +1627,7 @@ Noah Treuhaft NobodyOnSE noducks Nolan Darilek +Nolan Miles Noriki Nakamura nponeccop Nurahmadie @@ -1661,6 +1683,7 @@ Paul Lietar Paul Liljenberg Paul Morie Paul Nasrat +Paul Seiffert Paul Weaver Paulo Gomes Paulo Ribeiro @@ -1674,6 +1697,7 @@ Pavlos Ratis Pavol Vargovcik Pawel Konczalski Paweł Gronowski +payall4u Peeyush Gupta Peggy Li Pei Su @@ -1703,7 +1727,9 @@ Phil Estes Phil Sphicas Phil Spitler Philip Alexander Etling +Philip K. Warren Philip Monroe +Philipp Fruck Philipp Gillé Philipp Wahala Philipp Weissensteiner @@ -1741,6 +1767,7 @@ Quentin Brossard Quentin Perez Quentin Tayssier r0n22 +Rachit Sharma Radostin Stoyanov Rafal Jeczalik Rafe Colton @@ -1773,6 +1800,7 @@ Rich Horwood Rich Moyse Rich Seymour Richard Burnison +Richard Hansen Richard Harvey Richard Mathie Richard Metzler @@ -1788,6 +1816,7 @@ Ritesh H Shukla Riyaz Faizullabhoy Rob Cowsill <42620235+rcowsill@users.noreply.github.com> Rob Gulewich +Rob Murray Rob Vesse Robert Bachmann Robert Bittle @@ -1869,6 +1898,7 @@ ryancooper7 RyanDeng Ryo Nakao Ryoga Saito +Régis Behmo Rémy Greinhofer s. rannou Sabin Basyal @@ -1885,6 +1915,7 @@ Sam J Sharpe Sam Neirinck Sam Reis Sam Rijs +Sam Thibault Sam Whited Sambuddha Basu Sami Wagiaalla @@ -1908,6 +1939,7 @@ Satoshi Tagomori Scott Bessler Scott Collier Scott Johnston +Scott Moser Scott Percival Scott Stamp Scott Walls @@ -1923,6 +1955,7 @@ Sebastiaan van Steenis Sebastiaan van Stijn Sebastian Höffner Sebastian Radloff +Sebastian Thomschke Sebastien Goasguen Senthil Kumar Selvaraj Senthil Kumaran @@ -1996,6 +2029,7 @@ Stanislav Bondarenko Stanislav Levin Steeve Morin Stefan Berger +Stefan Gehrig Stefan J. Wernli Stefan Praszalowicz Stefan S. @@ -2003,6 +2037,7 @@ Stefan Scherer Stefan Staudenmeyer Stefan Weil Steffen Butzer +Stephan Henningsen Stephan Spindler Stephen Benjamin Stephen Crosby @@ -2204,6 +2239,7 @@ Vinod Kulkarni Vishal Doshi Vishnu Kannan Vitaly Ostrosablin +Vitor Anjos Vitor Monteiro Vivek Agarwal Vivek Dasgupta @@ -2250,6 +2286,7 @@ Wenxuan Zhao Wenyu You <21551128@zju.edu.cn> Wenzhi Liang Wes Morgan +Wesley Pettit Wewang Xiaorenfine Wiktor Kwapisiewicz Will Dietz @@ -2289,7 +2326,7 @@ xiekeyang Ximo Guanter Gonzálbez xin.li Xinbo Weng -Xinfeng Liu +Xinfeng Liu Xinzi Zhou Xiuming Chen Xuecong Liao @@ -2355,6 +2392,7 @@ Zen Lin(Zhinan Lin) Zhang Kun Zhang Wei Zhang Wentao +zhangguanzhang ZhangHang zhangxianwei Zhenan Ye <21551168@zju.edu.cn> @@ -2381,6 +2419,7 @@ Zuhayr Elahi Zunayed Ali Álvaro Lázaro Átila Camurça Alves +吴小白 <296015668@qq.com> 尹吉峰 屈骏 徐俊杰 diff --git a/vendor/github.com/docker/docker/api/README.md b/vendor/github.com/docker/docker/api/README.md index f136c3433..381f19881 100644 --- a/vendor/github.com/docker/docker/api/README.md +++ b/vendor/github.com/docker/docker/api/README.md @@ -37,6 +37,6 @@ There is hopefully enough example material in the file for you to copy a similar When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly. -Run `make swagger-docs` and a preview will be running at `http://localhost`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation. +Run `make swagger-docs` and a preview will be running at `http://localhost:9000`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation. The production documentation is generated by vendoring `swagger.yaml` into [docker/docker.github.io](https://github.com/docker/docker.github.io). diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go index cba66bc46..37e553d41 100644 --- a/vendor/github.com/docker/docker/api/common.go +++ b/vendor/github.com/docker/docker/api/common.go @@ -3,7 +3,7 @@ package api // import "github.com/docker/docker/api" // Common constants for daemon and client. const ( // DefaultVersion of Current REST API - DefaultVersion = "1.43" + DefaultVersion = "1.44" // NoBaseImageSpecifier is the symbol used by the FROM // command to specify that no base image is to be used. diff --git a/vendor/github.com/docker/docker/api/common_unix.go b/vendor/github.com/docker/docker/api/common_unix.go deleted file mode 100644 index 19fc63d65..000000000 --- a/vendor/github.com/docker/docker/api/common_unix.go +++ /dev/null @@ -1,7 +0,0 @@ -//go:build !windows -// +build !windows - -package api // import "github.com/docker/docker/api" - -// MinVersion represents Minimum REST API version supported -const MinVersion = "1.12" diff --git a/vendor/github.com/docker/docker/api/common_windows.go b/vendor/github.com/docker/docker/api/common_windows.go deleted file mode 100644 index 590ba5479..000000000 --- a/vendor/github.com/docker/docker/api/common_windows.go +++ /dev/null @@ -1,8 +0,0 @@ -package api // import "github.com/docker/docker/api" - -// MinVersion represents Minimum REST API version supported -// Technically the first daemon API version released on Windows is v1.25 in -// engine version 1.13. However, some clients are explicitly using downlevel -// APIs (e.g. docker-compose v2.1 file format) and that is just too restrictive. -// Hence also allowing 1.24 on Windows. -const MinVersion string = "1.24" diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml index 7635b9f66..e55a76fc6 100644 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -19,10 +19,10 @@ produces: consumes: - "application/json" - "text/plain" -basePath: "/v1.43" +basePath: "/v1.44" info: title: "Docker Engine API" - version: "1.43" + version: "1.44" x-logo: url: "https://docs.docker.com/assets/images/logo-docker-main.png" description: | @@ -55,8 +55,8 @@ info: the URL is not supported by the daemon, a HTTP `400 Bad Request` error message is returned. - If you omit the version-prefix, the current version of the API (v1.43) is used. - For example, calling `/info` is the same as calling `/v1.43/info`. Using the + If you omit the version-prefix, the current version of the API (v1.44) is used. + For example, calling `/info` is the same as calling `/v1.44/info`. Using the API without a version-prefix is deprecated and will be removed in a future release. Engine releases in the near future should support this version of the API, @@ -388,6 +388,16 @@ definitions: description: "Create mount point on host if missing" type: "boolean" default: false + ReadOnlyNonRecursive: + description: | + Make the mount non-recursively read-only, but still leave the mount recursive + (unless NonRecursive is set to true in conjunction). + type: "boolean" + default: false + ReadOnlyForceRecursive: + description: "Raise an error if the mount cannot be made recursively read-only." + type: "boolean" + default: false VolumeOptions: description: "Optional configuration for the `volume` type." type: "object" @@ -794,6 +804,12 @@ definitions: 1000000 (1 ms). 0 means inherit. type: "integer" format: "int64" + StartInterval: + description: | + The time to wait between checks in nanoseconds during the start period. + It should be 0 or at least 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" Health: description: | @@ -1297,7 +1313,10 @@ definitions: type: "boolean" x-nullable: true MacAddress: - description: "MAC address of the container." + description: | + MAC address of the container. + + Deprecated: this field is deprecated in API v1.44 and up. Use EndpointSettings.MacAddress instead. type: "string" x-nullable: true OnBuild: @@ -1347,16 +1366,16 @@ definitions: EndpointsConfig: description: | A mapping of network name to endpoint configuration for that network. + The endpoint configuration can be left empty to connect to that + network with no particular endpoint configuration. type: "object" additionalProperties: $ref: "#/definitions/EndpointSettings" example: # putting an example here, instead of using the example values from - # /definitions/EndpointSettings, because containers/create currently - # does not support attaching to multiple networks, so the example request - # would be confusing if it showed that multiple networks can be contained - # in the EndpointsConfig. - # TODO remove once we support multiple networks on container create (see https://github.com/moby/moby/blob/07e6b843594e061f82baa5fa23c2ff7d536c2a05/daemon/create.go#L323) + # /definitions/EndpointSettings, because EndpointSettings contains + # operational data returned when inspecting a container that we don't + # accept here. EndpointsConfig: isolated_nw: IPAMConfig: @@ -1365,19 +1384,22 @@ definitions: LinkLocalIPs: - "169.254.34.68" - "fe80::3468" + MacAddress: "02:42:ac:12:05:02" Links: - "container_1" - "container_2" Aliases: - "server_x" - "server_y" + database_nw: {} NetworkSettings: description: "NetworkSettings exposes the network settings in the API" type: "object" properties: Bridge: - description: Name of the network's bridge (for example, `docker0`). + description: | + Name of the default bridge interface when dockerd's --bridge flag is set. type: "string" example: "docker0" SandboxID: @@ -1387,34 +1409,40 @@ definitions: HairpinMode: description: | Indicates if hairpin NAT should be enabled on the virtual interface. + + Deprecated: This field is never set and will be removed in a future release. type: "boolean" example: false LinkLocalIPv6Address: - description: IPv6 unicast address using the link-local prefix. + description: | + IPv6 unicast address using the link-local prefix. + + Deprecated: This field is never set and will be removed in a future release. type: "string" - example: "fe80::42:acff:fe11:1" + example: "" LinkLocalIPv6PrefixLen: - description: Prefix length of the IPv6 unicast address. + description: | + Prefix length of the IPv6 unicast address. + + Deprecated: This field is never set and will be removed in a future release. type: "integer" - example: "64" + example: "" Ports: $ref: "#/definitions/PortMap" SandboxKey: - description: SandboxKey identifies the sandbox + description: SandboxKey is the full path of the netns handle type: "string" example: "/var/run/docker/netns/8ab54b426c38" - # TODO is SecondaryIPAddresses actually used? SecondaryIPAddresses: - description: "" + description: "Deprecated: This field is never set and will be removed in a future release." type: "array" items: $ref: "#/definitions/Address" x-nullable: true - # TODO is SecondaryIPv6Addresses actually used? SecondaryIPv6Addresses: - description: "" + description: "Deprecated: This field is never set and will be removed in a future release." type: "array" items: $ref: "#/definitions/Address" @@ -1723,10 +1751,15 @@ definitions: The ID of the container that was used to create the image. Depending on how the image was created, this field may be empty. + + **Deprecated**: this field is kept for backward compatibility, but + will be removed in API v1.45. type: "string" - x-nullable: false example: "65974bc86f1770ae4bff79f651ebdbce166ae9aada632ee3fa9af3a264911735" ContainerConfig: + description: | + **Deprecated**: this field is kept for backward compatibility, but + will be removed in API v1.45. $ref: "#/definitions/ContainerConfig" DockerVersion: description: | @@ -1781,13 +1814,7 @@ definitions: description: | Total size of the image including all layers it is composed of. - In versions of Docker before v1.10, this field was calculated from - the image itself and all of its parent images. Images are now stored - self-contained, and no longer use a parent-chain, making this field - an equivalent of the Size field. - - > **Deprecated**: this field is kept for backward compatibility, but - > will be removed in API v1.44. + Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. type: "integer" format: "int64" example: 1239828 @@ -1829,6 +1856,7 @@ definitions: x-nullable: true ImageSummary: type: "object" + x-go-name: "Summary" required: - Id - ParentId @@ -1925,12 +1953,7 @@ definitions: description: |- Total size of the image including all layers it is composed of. - In versions of Docker before v1.10, this field was calculated from - the image itself and all of its parent images. Images are now stored - self-contained, and no longer use a parent-chain, making this field - an equivalent of the Size field. - - Deprecated: this field is kept for backward compatibility, and will be removed in API v1.44. + Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. type: "integer" format: "int64" example: 172064416 @@ -2448,6 +2471,11 @@ definitions: example: - "container_1" - "container_2" + MacAddress: + description: | + MAC address for the endpoint on this network. The network driver might ignore this parameter. + type: "string" + example: "02:42:ac:11:00:04" Aliases: type: "array" items: @@ -2498,11 +2526,6 @@ definitions: type: "integer" format: "int64" example: 64 - MacAddress: - description: | - MAC address for the endpoint on this network. - type: "string" - example: "02:42:ac:11:00:04" DriverOpts: description: | DriverOpts is a mapping of driver options and values. These options @@ -2514,6 +2537,21 @@ definitions: example: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" + DNSNames: + description: | + List of all DNS names an endpoint has on a specific network. This + list is based on the container name, network aliases, container short + ID, and hostname. + + These DNS names are non-fully qualified but can contain several dots. + You can get fully qualified DNS names by appending `.`. + For instance, if container name is `my.ctr` and the network is named + `testnet`, `DNSNames` will contain `my.ctr` and the FQDN will be + `my.ctr.testnet`. + type: array + items: + type: string + example: ["foobar", "server_x", "server_y", "my.ctr"] EndpointIPAMConfig: description: | @@ -3011,8 +3049,6 @@ definitions: Name: "journald" - Type: "Log" Name: "json-file" - - Type: "Log" - Name: "logentries" - Type: "Log" Name: "splunk" - Type: "Log" @@ -3547,6 +3583,32 @@ definitions: Level: type: "string" description: "SELinux level label" + Seccomp: + type: "object" + description: "Options for configuring seccomp on the container" + properties: + Mode: + type: "string" + enum: + - "default" + - "unconfined" + - "custom" + Profile: + description: "The custom seccomp profile as a json object" + type: "string" + AppArmor: + type: "object" + description: "Options for configuring AppArmor on the container" + properties: + Mode: + type: "string" + enum: + - "default" + - "disabled" + NoNewPrivileges: + type: "boolean" + description: "Configuration of the no_new_privs bit in the container" + TTY: description: "Whether a pseudo-TTY should be allocated." type: "boolean" @@ -3941,6 +4003,44 @@ definitions: - "remove" - "orphaned" + ContainerStatus: + type: "object" + description: "represents the status of a container." + properties: + ContainerID: + type: "string" + PID: + type: "integer" + ExitCode: + type: "integer" + + PortStatus: + type: "object" + description: "represents the port status of a task's host ports whose service has published host ports" + properties: + Ports: + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + + TaskStatus: + type: "object" + description: "represents the status of a task." + properties: + Timestamp: + type: "string" + format: "dateTime" + State: + $ref: "#/definitions/TaskState" + Message: + type: "string" + Err: + type: "string" + ContainerStatus: + $ref: "#/definitions/ContainerStatus" + PortStatus: + $ref: "#/definitions/PortStatus" + Task: type: "object" properties: @@ -3976,26 +4076,7 @@ definitions: AssignedGenericResources: $ref: "#/definitions/GenericResources" Status: - type: "object" - properties: - Timestamp: - type: "string" - format: "dateTime" - State: - $ref: "#/definitions/TaskState" - Message: - type: "string" - Err: - type: "string" - ContainerStatus: - type: "object" - properties: - ContainerID: - type: "string" - PID: - type: "integer" - ExitCode: - type: "integer" + $ref: "#/definitions/TaskStatus" DesiredState: $ref: "#/definitions/TaskState" JobIteration: @@ -4211,7 +4292,10 @@ definitions: - "stop-first" - "start-first" Networks: - description: "Specifies which networks the service should attach to." + description: | + Specifies which networks the service should attach to. + + Deprecated: This field is deprecated since v1.44. The Networks field in TaskSpec should be used instead. type: "array" items: $ref: "#/definitions/NetworkAttachmentConfig" @@ -4445,6 +4529,7 @@ definitions: ImageDeleteResponseItem: type: "object" + x-go-name: "DeleteResponse" properties: Untagged: description: "The image ID of an image that was untagged" @@ -4453,6 +4538,29 @@ definitions: description: "The image ID of an image that was deleted" type: "string" + ServiceCreateResponse: + type: "object" + description: | + contains the information returned to a client on the + creation of a new service. + properties: + ID: + description: "The ID of the created service." + type: "string" + x-nullable: false + example: "ak7w3gjqoa3kuz8xcpnyy0pvl" + Warnings: + description: | + Optional warning message. + + FIXME(thaJeztah): this should have "omitempty" in the generated type. + type: "array" + x-nullable: true + items: + type: "string" + example: + - "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + ServiceUpdateResponse: type: "object" properties: @@ -4462,7 +4570,8 @@ definitions: items: type: "string" example: - Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + Warnings: + - "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" ContainerSummary: type: "object" @@ -5296,7 +5405,25 @@ definitions: - "WARNING: No memory limit support" - "WARNING: bridge-nf-call-iptables is disabled" - "WARNING: bridge-nf-call-ip6tables is disabled" + CDISpecDirs: + description: | + List of directories where (Container Device Interface) CDI + specifications are located. + These specifications define vendor-specific modifications to an OCI + runtime specification for a container being created. + + An empty list indicates that CDI device injection is disabled. + + Note that since using CDI device injection requires the daemon to have + experimental enabled. For non-experimental daemons an empty list will + always be returned. + type: "array" + items: + type: "string" + example: + - "/etc/cdi" + - "/var/run/cdi" # PluginsInfo is a temp struct holding Plugins name # registered with docker daemon. It is used by Info struct @@ -5334,7 +5461,7 @@ definitions: type: "array" items: type: "string" - example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "logentries", "splunk", "syslog"] + example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "splunk", "syslog"] RegistryServiceConfig: @@ -5532,6 +5659,28 @@ definitions: items: type: "string" example: ["--debug", "--systemd-cgroup=false"] + status: + description: | + Information specific to the runtime. + + While this API specification does not define data provided by runtimes, + the following well-known properties may be provided by runtimes: + + `org.opencontainers.runtime-spec.features`: features structure as defined + in the [OCI Runtime Specification](https://github.com/opencontainers/runtime-spec/blob/main/features.md), + in a JSON string representation. + +


+ + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + "org.opencontainers.runtime-spec.features": "{\"ociVersionMin\":\"1.0.0\",\"ociVersionMax\":\"1.1.0\",\"...\":\"...\"}" Commit: description: | @@ -6416,6 +6565,7 @@ paths: Aliases: - "server_x" - "server_y" + database_nw: {} required: true responses: @@ -6563,7 +6713,7 @@ paths: StopSignal: "SIGTERM" StopTimeout: 10 Created: "2015-01-06T15:47:31.485331387Z" - Driver: "devicemapper" + Driver: "overlay2" ExecIDs: - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" @@ -7994,6 +8144,7 @@ paths: - `label=key` or `label="key=value"` of an image label - `reference`=(`[:]`) - `since`=(`[:]`, `` or ``) + - `until=` type: "string" - name: "shared-size" in: "query" @@ -8176,6 +8327,16 @@ paths: description: "BuildKit output configuration" type: "string" default: "" + - name: "version" + in: "query" + type: "string" + default: "1" + enum: ["1", "2"] + description: | + Version of the builder backend to use. + + - `1` is the first generation classic (deprecated) builder in the Docker daemon (default) + - `2` is [BuildKit](https://github.com/moby/buildkit) responses: 200: description: "no error" @@ -8245,7 +8406,7 @@ paths: /images/create: post: summary: "Create an image" - description: "Create an image by either pulling it from a registry or importing it." + description: "Pull or import an image." operationId: "ImageCreate" consumes: - "text/plain" @@ -8596,28 +8757,36 @@ paths: is_official: type: "boolean" is_automated: + description: | + Whether this repository has automated builds enabled. + +


+ + > **Deprecated**: This field is deprecated and will always + > be "false" in future. type: "boolean" + example: false name: type: "string" star_count: type: "integer" examples: application/json: - - description: "" - is_official: false + - description: "A minimal Docker image based on Alpine Linux with a complete package index and only 5 MB in size!" + is_official: true is_automated: false - name: "wma55/u1210sshd" - star_count: 0 - - description: "" - is_official: false + name: "alpine" + star_count: 10093 + - description: "Busybox base image." + is_official: true is_automated: false - name: "jdswinbank/sshd" - star_count: 0 - - description: "" - is_official: false + name: "Busybox base image." + star_count: 3037 + - description: "The PostgreSQL object-relational database system provides reliability and data integrity." + is_official: true is_automated: false - name: "vgauthier/sshd" - star_count: 0 + name: "postgres" + star_count: 12408 500: description: "Server error" schema: @@ -8637,9 +8806,13 @@ paths: description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: - - `is-automated=(true|false)` + - `is-automated=(true|false)` (deprecated, see below) - `is-official=(true|false)` - `stars=` Matches images that has at least 'number' stars. + + The `is-automated` filter is deprecated. The `is_automated` field has + been deprecated by Docker Hub's search API. Consequently, searching + for `is-automated=true` will yield no results. type: "string" tags: ["Image"] /images/prune: @@ -9032,7 +9205,6 @@ paths: Created: 1466724217 Size: 1092588 SharedSize: 0 - VirtualSize: 1092588 Labels: {} Containers: 1 Containers: @@ -9895,6 +10067,10 @@ paths: example: Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" Warning: "" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" 403: description: | Forbidden operation. This happens when trying to create a network named after a pre-defined network, @@ -9924,13 +10100,7 @@ paths: type: "string" CheckDuplicate: description: | - Check for networks with duplicate names. Since Network is - primarily keyed based on a random ID and not on the name, and - network name is strictly a user-friendly alias to the network - which is uniquely identified using ID, there is no guaranteed - way to check for duplicates. CheckDuplicate is there to provide - a best effort checking of any networks which has the same name - but it is not guaranteed to catch all name collisions. + Deprecated: CheckDuplicate is now always enabled. type: "boolean" Driver: description: "Name of the network driver plugin to use." @@ -9998,14 +10168,19 @@ paths: /networks/{id}/connect: post: summary: "Connect a container to a network" + description: "The network must be either a local-scoped network or a swarm-scoped network with the `attachable` option set. A network cannot be re-attached to a running container" operationId: "NetworkConnect" consumes: - "application/json" responses: 200: description: "No error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" 403: - description: "Operation not supported for swarm scoped networks" + description: "Operation forbidden" schema: $ref: "#/definitions/ErrorResponse" 404: @@ -10040,6 +10215,7 @@ paths: IPAMConfig: IPv4Address: "172.24.56.89" IPv6Address: "2001:db8::5689" + MacAddress: "02:42:ac:12:05:02" tags: ["Network"] /networks/{id}/disconnect: @@ -11033,18 +11209,7 @@ paths: 201: description: "no error" schema: - type: "object" - title: "ServiceCreateResponse" - properties: - ID: - description: "The ID of the created service." - type: "string" - Warning: - description: "Optional warning message" - type: "string" - example: - ID: "ak7w3gjqoa3kuz8xcpnyy0pvl" - Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + $ref: "#/definitions/ServiceCreateResponse" 400: description: "bad parameter" schema: diff --git a/vendor/github.com/docker/docker/api/types/auth.go b/vendor/github.com/docker/docker/api/types/auth.go deleted file mode 100644 index 9ee329a2f..000000000 --- a/vendor/github.com/docker/docker/api/types/auth.go +++ /dev/null @@ -1,7 +0,0 @@ -package types // import "github.com/docker/docker/api/types" -import "github.com/docker/docker/api/types/registry" - -// AuthConfig contains authorization information for connecting to a Registry. -// -// Deprecated: use github.com/docker/docker/api/types/registry.AuthConfig -type AuthConfig = registry.AuthConfig diff --git a/vendor/github.com/docker/docker/api/types/backend/backend.go b/vendor/github.com/docker/docker/api/types/backend/backend.go new file mode 100644 index 000000000..613da5517 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/backend/backend.go @@ -0,0 +1,165 @@ +// Package backend includes types to send information to server backends. +package backend // import "github.com/docker/docker/api/types/backend" + +import ( + "io" + "time" + + "github.com/distribution/reference" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ContainerCreateConfig is the parameter set to ContainerCreate() +type ContainerCreateConfig struct { + Name string + Config *container.Config + HostConfig *container.HostConfig + NetworkingConfig *network.NetworkingConfig + Platform *ocispec.Platform + AdjustCPUShares bool +} + +// ContainerRmConfig holds arguments for the container remove +// operation. This struct is used to tell the backend what operations +// to perform. +type ContainerRmConfig struct { + ForceRemove, RemoveVolume, RemoveLink bool +} + +// ContainerAttachConfig holds the streams to use when connecting to a container to view logs. +type ContainerAttachConfig struct { + GetStreams func(multiplexed bool) (io.ReadCloser, io.Writer, io.Writer, error) + UseStdin bool + UseStdout bool + UseStderr bool + Logs bool + Stream bool + DetachKeys string + // Used to signify that streams must be multiplexed by producer as endpoint can't manage multiple streams. + // This is typically set by HTTP endpoint, while websocket can transport raw streams + MuxStreams bool +} + +// PartialLogMetaData provides meta data for a partial log message. Messages +// exceeding a predefined size are split into chunks with this metadata. The +// expectation is for the logger endpoints to assemble the chunks using this +// metadata. +type PartialLogMetaData struct { + Last bool // true if this message is last of a partial + ID string // identifies group of messages comprising a single record + Ordinal int // ordering of message in partial group +} + +// LogMessage is datastructure that represents piece of output produced by some +// container. The Line member is a slice of an array whose contents can be +// changed after a log driver's Log() method returns. +type LogMessage struct { + Line []byte + Source string + Timestamp time.Time + Attrs []LogAttr + PLogMetaData *PartialLogMetaData + + // Err is an error associated with a message. Completeness of a message + // with Err is not expected, tho it may be partially complete (fields may + // be missing, gibberish, or nil) + Err error +} + +// LogAttr is used to hold the extra attributes available in the log message. +type LogAttr struct { + Key string + Value string +} + +// LogSelector is a list of services and tasks that should be returned as part +// of a log stream. It is similar to swarmapi.LogSelector, with the difference +// that the names don't have to be resolved to IDs; this is mostly to avoid +// accidents later where a swarmapi LogSelector might have been incorrectly +// used verbatim (and to avoid the handler having to import swarmapi types) +type LogSelector struct { + Services []string + Tasks []string +} + +// ContainerStatsConfig holds information for configuring the runtime +// behavior of a backend.ContainerStats() call. +type ContainerStatsConfig struct { + Stream bool + OneShot bool + OutStream io.Writer + Version string +} + +// ExecInspect holds information about a running process started +// with docker exec. +type ExecInspect struct { + ID string + Running bool + ExitCode *int + ProcessConfig *ExecProcessConfig + OpenStdin bool + OpenStderr bool + OpenStdout bool + CanRemove bool + ContainerID string + DetachKeys []byte + Pid int +} + +// ExecProcessConfig holds information about the exec process +// running on the host. +type ExecProcessConfig struct { + Tty bool `json:"tty"` + Entrypoint string `json:"entrypoint"` + Arguments []string `json:"arguments"` + Privileged *bool `json:"privileged,omitempty"` + User string `json:"user,omitempty"` +} + +// CreateImageConfig is the configuration for creating an image from a +// container. +type CreateImageConfig struct { + Tag reference.NamedTagged + Pause bool + Author string + Comment string + Config *container.Config + Changes []string +} + +// CommitConfig is the configuration for creating an image as part of a build. +type CommitConfig struct { + Author string + Comment string + Config *container.Config + ContainerConfig *container.Config + ContainerID string + ContainerMountLabel string + ContainerOS string + ParentImageID string +} + +// PluginRmConfig holds arguments for plugin remove. +type PluginRmConfig struct { + ForceRemove bool +} + +// PluginEnableConfig holds arguments for plugin enable +type PluginEnableConfig struct { + Timeout int +} + +// PluginDisableConfig holds arguments for plugin disable. +type PluginDisableConfig struct { + ForceDisable bool +} + +// NetworkListConfig stores the options available for listing networks +type NetworkListConfig struct { + // TODO(@cpuguy83): naming is hard, this is pulled from what was being used in the router before moving here + Detailed bool + Verbose bool +} diff --git a/vendor/github.com/docker/docker/api/types/backend/build.go b/vendor/github.com/docker/docker/api/types/backend/build.go new file mode 100644 index 000000000..91715d0b9 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/backend/build.go @@ -0,0 +1,46 @@ +package backend // import "github.com/docker/docker/api/types/backend" + +import ( + "io" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/pkg/streamformatter" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// PullOption defines different modes for accessing images +type PullOption int + +const ( + // PullOptionNoPull only returns local images + PullOptionNoPull PullOption = iota + // PullOptionForcePull always tries to pull a ref from the registry first + PullOptionForcePull + // PullOptionPreferLocal uses local image if it exists, otherwise pulls + PullOptionPreferLocal +) + +// ProgressWriter is a data object to transport progress streams to the client +type ProgressWriter struct { + Output io.Writer + StdoutFormatter io.Writer + StderrFormatter io.Writer + AuxFormatter *streamformatter.AuxFormatter + ProgressReaderFunc func(io.ReadCloser) io.ReadCloser +} + +// BuildConfig is the configuration used by a BuildManager to start a build +type BuildConfig struct { + Source io.ReadCloser + ProgressWriter ProgressWriter + Options *types.ImageBuildOptions +} + +// GetImageAndLayerOptions are the options supported by GetImageAndReleasableLayer +type GetImageAndLayerOptions struct { + PullOption PullOption + AuthConfig map[string]registry.AuthConfig + Output io.Writer + Platform *ocispec.Platform +} diff --git a/vendor/github.com/docker/docker/api/types/checkpoint/list.go b/vendor/github.com/docker/docker/api/types/checkpoint/list.go new file mode 100644 index 000000000..94a9c0a47 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/checkpoint/list.go @@ -0,0 +1,7 @@ +package checkpoint + +// Summary represents the details of a checkpoint when listing endpoints. +type Summary struct { + // Name is the name of the checkpoint. + Name string +} diff --git a/vendor/github.com/docker/docker/api/types/checkpoint/options.go b/vendor/github.com/docker/docker/api/types/checkpoint/options.go new file mode 100644 index 000000000..9477458c2 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/checkpoint/options.go @@ -0,0 +1,19 @@ +package checkpoint + +// CreateOptions holds parameters to create a checkpoint from a container. +type CreateOptions struct { + CheckpointID string + CheckpointDir string + Exit bool +} + +// ListOptions holds parameters to list checkpoints for a container. +type ListOptions struct { + CheckpointDir string +} + +// DeleteOptions holds parameters to delete a checkpoint from a container. +type DeleteOptions struct { + CheckpointID string + CheckpointDir string +} diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go index d8cd30613..24b00a275 100644 --- a/vendor/github.com/docker/docker/api/types/client.go +++ b/vendor/github.com/docker/docker/api/types/client.go @@ -11,44 +11,6 @@ import ( units "github.com/docker/go-units" ) -// CheckpointCreateOptions holds parameters to create a checkpoint from a container -type CheckpointCreateOptions struct { - CheckpointID string - CheckpointDir string - Exit bool -} - -// CheckpointListOptions holds parameters to list checkpoints for a container -type CheckpointListOptions struct { - CheckpointDir string -} - -// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container -type CheckpointDeleteOptions struct { - CheckpointID string - CheckpointDir string -} - -// ContainerAttachOptions holds parameters to attach to a container. -type ContainerAttachOptions struct { - Stream bool - Stdin bool - Stdout bool - Stderr bool - DetachKeys string - Logs bool -} - -// ContainerCommitOptions holds parameters to commit changes into a container. -type ContainerCommitOptions struct { - Reference string - Comment string - Author string - Changes []string - Pause bool - Config *container.Config -} - // ContainerExecInspect holds information returned by exec inspect. type ContainerExecInspect struct { ExecID string `json:"ID"` @@ -58,42 +20,6 @@ type ContainerExecInspect struct { Pid int } -// ContainerListOptions holds parameters to list containers with. -type ContainerListOptions struct { - Size bool - All bool - Latest bool - Since string - Before string - Limit int - Filters filters.Args -} - -// ContainerLogsOptions holds parameters to filter logs with. -type ContainerLogsOptions struct { - ShowStdout bool - ShowStderr bool - Since string - Until string - Timestamps bool - Follow bool - Tail string - Details bool -} - -// ContainerRemoveOptions holds parameters to remove containers. -type ContainerRemoveOptions struct { - RemoveVolumes bool - RemoveLinks bool - Force bool -} - -// ContainerStartOptions holds parameters to start containers. -type ContainerStartOptions struct { - CheckpointID string - CheckpointDir string -} - // CopyToContainerOptions holds information // about files to copy into a container type CopyToContainerOptions struct { @@ -307,14 +233,6 @@ type ImageSearchOptions struct { Limit int } -// ResizeOptions holds parameters to resize a tty. -// It can be used to resize container ttys and -// exec process ttys too. -type ResizeOptions struct { - Height uint - Width uint -} - // NodeListOptions holds parameters to list nodes with. type NodeListOptions struct { Filters filters.Args @@ -340,15 +258,6 @@ type ServiceCreateOptions struct { QueryRegistry bool } -// ServiceCreateResponse contains the information returned to a client -// on the creation of a new service. -type ServiceCreateResponse struct { - // ID is the ID of the created service. - ID string - // Warnings is a set of non-fatal warning messages to pass on to the user. - Warnings []string `json:",omitempty"` -} - // Values for RegistryAuthFrom in ServiceUpdateOptions const ( RegistryAuthFromSpec = "spec" diff --git a/vendor/github.com/docker/docker/api/types/configs.go b/vendor/github.com/docker/docker/api/types/configs.go index 7d5930bbe..945b6efad 100644 --- a/vendor/github.com/docker/docker/api/types/configs.go +++ b/vendor/github.com/docker/docker/api/types/configs.go @@ -1,32 +1,5 @@ package types // import "github.com/docker/docker/api/types" -import ( - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/network" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -// configs holds structs used for internal communication between the -// frontend (such as an http server) and the backend (such as the -// docker daemon). - -// ContainerCreateConfig is the parameter set to ContainerCreate() -type ContainerCreateConfig struct { - Name string - Config *container.Config - HostConfig *container.HostConfig - NetworkingConfig *network.NetworkingConfig - Platform *ocispec.Platform - AdjustCPUShares bool -} - -// ContainerRmConfig holds arguments for the container remove -// operation. This struct is used to tell the backend what operations -// to perform. -type ContainerRmConfig struct { - ForceRemove, RemoveVolume, RemoveLink bool -} - // ExecConfig is a small subset of the Config struct that holds the configuration // for the exec feature of docker. type ExecConfig struct { @@ -43,25 +16,3 @@ type ExecConfig struct { WorkingDir string // Working directory Cmd []string // Execution commands and args } - -// PluginRmConfig holds arguments for plugin remove. -type PluginRmConfig struct { - ForceRemove bool -} - -// PluginEnableConfig holds arguments for plugin enable -type PluginEnableConfig struct { - Timeout int -} - -// PluginDisableConfig holds arguments for plugin disable. -type PluginDisableConfig struct { - ForceDisable bool -} - -// NetworkListConfig stores the options available for listing networks -type NetworkListConfig struct { - // TODO(@cpuguy83): naming is hard, this is pulled from what was being used in the router before moving here - Detailed bool - Verbose bool -} diff --git a/vendor/github.com/docker/docker/api/types/container/change_response_deprecated.go b/vendor/github.com/docker/docker/api/types/container/change_response_deprecated.go deleted file mode 100644 index 6b4b47390..000000000 --- a/vendor/github.com/docker/docker/api/types/container/change_response_deprecated.go +++ /dev/null @@ -1,6 +0,0 @@ -package container - -// ContainerChangeResponseItem change item in response to ContainerChanges operation -// -// Deprecated: use [FilesystemChange]. -type ContainerChangeResponseItem = FilesystemChange diff --git a/vendor/github.com/docker/docker/api/types/container/config.go b/vendor/github.com/docker/docker/api/types/container/config.go index 077583e66..be41d6315 100644 --- a/vendor/github.com/docker/docker/api/types/container/config.go +++ b/vendor/github.com/docker/docker/api/types/container/config.go @@ -5,6 +5,7 @@ import ( "time" "github.com/docker/docker/api/types/strslice" + dockerspec "github.com/docker/docker/image/spec/specs-go/v1" "github.com/docker/go-connections/nat" ) @@ -33,25 +34,7 @@ type StopOptions struct { } // HealthConfig holds configuration settings for the HEALTHCHECK feature. -type HealthConfig struct { - // Test is the test to perform to check that the container is healthy. - // An empty slice means to inherit the default. - // The options are: - // {} : inherit healthcheck - // {"NONE"} : disable healthcheck - // {"CMD", args...} : exec arguments directly - // {"CMD-SHELL", command} : run command with system's default shell - Test []string `json:",omitempty"` - - // Zero means to inherit. Durations are expressed as integer nanoseconds. - Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. - Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. - StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. - - // Retries is the number of consecutive failures needed to consider a container as unhealthy. - // Zero means inherit. - Retries int `json:",omitempty"` -} +type HealthConfig = dockerspec.HealthcheckConfig // ExecStartOptions holds the options to start container's exec. type ExecStartOptions struct { @@ -87,10 +70,13 @@ type Config struct { WorkingDir string // Current directory (PWD) in the command will be launched Entrypoint strslice.StrSlice // Entrypoint to run when starting the container NetworkDisabled bool `json:",omitempty"` // Is network disabled - MacAddress string `json:",omitempty"` // Mac Address of the container - OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile - Labels map[string]string // List of labels set to this container - StopSignal string `json:",omitempty"` // Signal to stop a container - StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container - Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT + // Mac Address of the container. + // + // Deprecated: this field is deprecated since API v1.44. Use EndpointSettings.MacAddress instead. + MacAddress string `json:",omitempty"` + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile + Labels map[string]string // List of labels set to this container + StopSignal string `json:",omitempty"` // Signal to stop a container + StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container + Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT } diff --git a/vendor/github.com/docker/docker/api/types/container/errors.go b/vendor/github.com/docker/docker/api/types/container/errors.go new file mode 100644 index 000000000..32c978037 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/errors.go @@ -0,0 +1,9 @@ +package container + +type errInvalidParameter struct{ error } + +func (e *errInvalidParameter) InvalidParameter() {} + +func (e *errInvalidParameter) Unwrap() error { + return e.error +} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig.go b/vendor/github.com/docker/docker/api/types/container/hostconfig.go index d4e6f5537..efb96266e 100644 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig.go +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig.go @@ -1,10 +1,12 @@ package container // import "github.com/docker/docker/api/types/container" import ( + "fmt" "strings" "github.com/docker/docker/api/types/blkiodev" "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/strslice" "github.com/docker/go-connections/nat" units "github.com/docker/go-units" @@ -132,12 +134,12 @@ type NetworkMode string // IsNone indicates whether container isn't using a network stack. func (n NetworkMode) IsNone() bool { - return n == "none" + return n == network.NetworkNone } // IsDefault indicates whether container uses the default network stack. func (n NetworkMode) IsDefault() bool { - return n == "default" + return n == network.NetworkDefault } // IsPrivate indicates whether container uses its private network stack. @@ -271,33 +273,42 @@ type DeviceMapping struct { // RestartPolicy represents the restart policies of the container. type RestartPolicy struct { - Name string + Name RestartPolicyMode MaximumRetryCount int } +type RestartPolicyMode string + +const ( + RestartPolicyDisabled RestartPolicyMode = "no" + RestartPolicyAlways RestartPolicyMode = "always" + RestartPolicyOnFailure RestartPolicyMode = "on-failure" + RestartPolicyUnlessStopped RestartPolicyMode = "unless-stopped" +) + // IsNone indicates whether the container has the "no" restart policy. // This means the container will not automatically restart when exiting. func (rp *RestartPolicy) IsNone() bool { - return rp.Name == "no" || rp.Name == "" + return rp.Name == RestartPolicyDisabled || rp.Name == "" } // IsAlways indicates whether the container has the "always" restart policy. // This means the container will automatically restart regardless of the exit status. func (rp *RestartPolicy) IsAlways() bool { - return rp.Name == "always" + return rp.Name == RestartPolicyAlways } // IsOnFailure indicates whether the container has the "on-failure" restart policy. // This means the container will automatically restart of exiting with a non-zero exit status. func (rp *RestartPolicy) IsOnFailure() bool { - return rp.Name == "on-failure" + return rp.Name == RestartPolicyOnFailure } // IsUnlessStopped indicates whether the container has the // "unless-stopped" restart policy. This means the container will // automatically restart unless user has put it to stopped state. func (rp *RestartPolicy) IsUnlessStopped() bool { - return rp.Name == "unless-stopped" + return rp.Name == RestartPolicyUnlessStopped } // IsSame compares two RestartPolicy to see if they are the same @@ -305,6 +316,33 @@ func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount } +// ValidateRestartPolicy validates the given RestartPolicy. +func ValidateRestartPolicy(policy RestartPolicy) error { + switch policy.Name { + case RestartPolicyAlways, RestartPolicyUnlessStopped, RestartPolicyDisabled: + if policy.MaximumRetryCount != 0 { + msg := "invalid restart policy: maximum retry count can only be used with 'on-failure'" + if policy.MaximumRetryCount < 0 { + msg += " and cannot be negative" + } + return &errInvalidParameter{fmt.Errorf(msg)} + } + return nil + case RestartPolicyOnFailure: + if policy.MaximumRetryCount < 0 { + return &errInvalidParameter{fmt.Errorf("invalid restart policy: maximum retry count cannot be negative")} + } + return nil + case "": + // Versions before v25.0.0 created an empty restart-policy "name" as + // default. Allow an empty name with "any" MaximumRetryCount for + // backward-compatibility. + return nil + default: + return &errInvalidParameter{fmt.Errorf("invalid restart policy: unknown policy '%s'; use one of '%s', '%s', '%s', or '%s'", policy.Name, RestartPolicyDisabled, RestartPolicyAlways, RestartPolicyOnFailure, RestartPolicyUnlessStopped)} + } +} + // LogMode is a type to define the available modes for logging // These modes affect how logs are handled when log messages start piling up. type LogMode string diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go index 24c4fa8d9..421329237 100644 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go @@ -1,8 +1,9 @@ //go:build !windows -// +build !windows package container // import "github.com/docker/docker/api/types/container" +import "github.com/docker/docker/api/types/network" + // IsValid indicates if an isolation technology is valid func (i Isolation) IsValid() bool { return i.IsDefault() @@ -11,15 +12,15 @@ func (i Isolation) IsValid() bool { // NetworkName returns the name of the network stack. func (n NetworkMode) NetworkName() string { if n.IsBridge() { - return "bridge" + return network.NetworkBridge } else if n.IsHost() { - return "host" + return network.NetworkHost } else if n.IsContainer() { return "container" } else if n.IsNone() { - return "none" + return network.NetworkNone } else if n.IsDefault() { - return "default" + return network.NetworkDefault } else if n.IsUserDefined() { return n.UserDefined() } @@ -28,12 +29,12 @@ func (n NetworkMode) NetworkName() string { // IsBridge indicates whether container uses the bridge network stack func (n NetworkMode) IsBridge() bool { - return n == "bridge" + return n == network.NetworkBridge } // IsHost indicates whether container uses the host network stack. func (n NetworkMode) IsHost() bool { - return n == "host" + return n == network.NetworkHost } // IsUserDefined indicates user-created network diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go index 99f803a5b..154667f4f 100644 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go @@ -1,9 +1,11 @@ package container // import "github.com/docker/docker/api/types/container" +import "github.com/docker/docker/api/types/network" + // IsBridge indicates whether container uses the bridge network stack // in windows it is given the name NAT func (n NetworkMode) IsBridge() bool { - return n == "nat" + return n == network.NetworkNat } // IsHost indicates whether container uses the host network stack. @@ -25,11 +27,11 @@ func (i Isolation) IsValid() bool { // NetworkName returns the name of the network stack. func (n NetworkMode) NetworkName() string { if n.IsDefault() { - return "default" + return network.NetworkDefault } else if n.IsBridge() { - return "nat" + return network.NetworkNat } else if n.IsNone() { - return "none" + return network.NetworkNone } else if n.IsContainer() { return "container" } else if n.IsUserDefined() { diff --git a/vendor/github.com/docker/docker/api/types/container/options.go b/vendor/github.com/docker/docker/api/types/container/options.go new file mode 100644 index 000000000..7a2300576 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/options.go @@ -0,0 +1,67 @@ +package container + +import "github.com/docker/docker/api/types/filters" + +// ResizeOptions holds parameters to resize a TTY. +// It can be used to resize container TTYs and +// exec process TTYs too. +type ResizeOptions struct { + Height uint + Width uint +} + +// AttachOptions holds parameters to attach to a container. +type AttachOptions struct { + Stream bool + Stdin bool + Stdout bool + Stderr bool + DetachKeys string + Logs bool +} + +// CommitOptions holds parameters to commit changes into a container. +type CommitOptions struct { + Reference string + Comment string + Author string + Changes []string + Pause bool + Config *Config +} + +// RemoveOptions holds parameters to remove containers. +type RemoveOptions struct { + RemoveVolumes bool + RemoveLinks bool + Force bool +} + +// StartOptions holds parameters to start containers. +type StartOptions struct { + CheckpointID string + CheckpointDir string +} + +// ListOptions holds parameters to list containers with. +type ListOptions struct { + Size bool + All bool + Latest bool + Since string + Before string + Limit int + Filters filters.Args +} + +// LogsOptions holds parameters to filter logs with. +type LogsOptions struct { + ShowStdout bool + ShowStderr bool + Since string + Until string + Timestamps bool + Follow bool + Tail string + Details bool +} diff --git a/vendor/github.com/docker/docker/api/types/events/events.go b/vendor/github.com/docker/docker/api/types/events/events.go index 9fe07e26f..6dbcd9223 100644 --- a/vendor/github.com/docker/docker/api/types/events/events.go +++ b/vendor/github.com/docker/docker/api/types/events/events.go @@ -1,7 +1,7 @@ package events // import "github.com/docker/docker/api/types/events" // Type is used for event-types. -type Type = string +type Type string // List of known event types. const ( @@ -18,6 +18,86 @@ const ( VolumeEventType Type = "volume" // VolumeEventType is the event type that volumes generate. ) +// Action is used for event-actions. +type Action string + +const ( + ActionCreate Action = "create" + ActionStart Action = "start" + ActionRestart Action = "restart" + ActionStop Action = "stop" + ActionCheckpoint Action = "checkpoint" + ActionPause Action = "pause" + ActionUnPause Action = "unpause" + ActionAttach Action = "attach" + ActionDetach Action = "detach" + ActionResize Action = "resize" + ActionUpdate Action = "update" + ActionRename Action = "rename" + ActionKill Action = "kill" + ActionDie Action = "die" + ActionOOM Action = "oom" + ActionDestroy Action = "destroy" + ActionRemove Action = "remove" + ActionCommit Action = "commit" + ActionTop Action = "top" + ActionCopy Action = "copy" + ActionArchivePath Action = "archive-path" + ActionExtractToDir Action = "extract-to-dir" + ActionExport Action = "export" + ActionImport Action = "import" + ActionSave Action = "save" + ActionLoad Action = "load" + ActionTag Action = "tag" + ActionUnTag Action = "untag" + ActionPush Action = "push" + ActionPull Action = "pull" + ActionPrune Action = "prune" + ActionDelete Action = "delete" + ActionEnable Action = "enable" + ActionDisable Action = "disable" + ActionConnect Action = "connect" + ActionDisconnect Action = "disconnect" + ActionReload Action = "reload" + ActionMount Action = "mount" + ActionUnmount Action = "unmount" + + // ActionExecCreate is the prefix used for exec_create events. These + // event-actions are commonly followed by a colon and space (": "), + // and the command that's defined for the exec, for example: + // + // exec_create: /bin/sh -c 'echo hello' + // + // This is far from ideal; it's a compromise to allow filtering and + // to preserve backward-compatibility. + ActionExecCreate Action = "exec_create" + // ActionExecStart is the prefix used for exec_create events. These + // event-actions are commonly followed by a colon and space (": "), + // and the command that's defined for the exec, for example: + // + // exec_start: /bin/sh -c 'echo hello' + // + // This is far from ideal; it's a compromise to allow filtering and + // to preserve backward-compatibility. + ActionExecStart Action = "exec_start" + ActionExecDie Action = "exec_die" + ActionExecDetach Action = "exec_detach" + + // ActionHealthStatus is the prefix to use for health_status events. + // + // Health-status events can either have a pre-defined status, in which + // case the "health_status" action is followed by a colon, or can be + // "free-form", in which case they're followed by the output of the + // health-check output. + // + // This is far form ideal, and a compromise to allow filtering, and + // to preserve backward-compatibility. + ActionHealthStatus Action = "health_status" + ActionHealthStatusRunning Action = "health_status: running" + ActionHealthStatusHealthy Action = "health_status: healthy" + ActionHealthStatusUnhealthy Action = "health_status: unhealthy" +) + // Actor describes something that generates events, // like a container, or a network, or a volume. // It has a defined name and a set of attributes. @@ -37,7 +117,7 @@ type Message struct { From string `json:"from,omitempty"` // Deprecated: use Actor.Attributes["image"] instead. Type Type - Action string + Action Action Actor Actor // Engine events are local scope. Cluster events are swarm scope. Scope string `json:"scope,omitempty"` diff --git a/vendor/github.com/docker/docker/api/types/image_delete_response_item.go b/vendor/github.com/docker/docker/api/types/image/delete_response.go similarity index 68% rename from vendor/github.com/docker/docker/api/types/image_delete_response_item.go rename to vendor/github.com/docker/docker/api/types/image/delete_response.go index b9a65a0d8..998620dc6 100644 --- a/vendor/github.com/docker/docker/api/types/image_delete_response_item.go +++ b/vendor/github.com/docker/docker/api/types/image/delete_response.go @@ -1,11 +1,11 @@ -package types +package image // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command -// ImageDeleteResponseItem image delete response item -// swagger:model ImageDeleteResponseItem -type ImageDeleteResponseItem struct { +// DeleteResponse delete response +// swagger:model DeleteResponse +type DeleteResponse struct { // The image ID of an image that was deleted Deleted string `json:"Deleted,omitempty"` diff --git a/vendor/github.com/docker/docker/api/types/image/image.go b/vendor/github.com/docker/docker/api/types/image/image.go new file mode 100644 index 000000000..167df28c7 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/image/image.go @@ -0,0 +1,9 @@ +package image + +import "time" + +// Metadata contains engine-local data about the image. +type Metadata struct { + // LastTagTime is the date and time at which the image was last tagged. + LastTagTime time.Time `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/image_summary.go b/vendor/github.com/docker/docker/api/types/image/summary.go similarity index 85% rename from vendor/github.com/docker/docker/api/types/image_summary.go rename to vendor/github.com/docker/docker/api/types/image/summary.go index 0f6f14484..f1e3e2ef0 100644 --- a/vendor/github.com/docker/docker/api/types/image_summary.go +++ b/vendor/github.com/docker/docker/api/types/image/summary.go @@ -1,11 +1,11 @@ -package types +package image // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command -// ImageSummary image summary -// swagger:model ImageSummary -type ImageSummary struct { +// Summary summary +// swagger:model Summary +type Summary struct { // Number of containers using this image. Includes both stopped and running // containers. @@ -84,11 +84,6 @@ type ImageSummary struct { // Total size of the image including all layers it is composed of. // - // In versions of Docker before v1.10, this field was calculated from - // the image itself and all of its parent images. Images are now stored - // self-contained, and no longer use a parent-chain, making this field - // an equivalent of the Size field. - // - // Deprecated: this field is kept for backward compatibility, and will be removed in API v1.44. + // Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. VirtualSize int64 `json:"VirtualSize,omitempty"` } diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/docker/docker/api/types/mount/mount.go index ac4ce6223..57edf2ef1 100644 --- a/vendor/github.com/docker/docker/api/types/mount/mount.go +++ b/vendor/github.com/docker/docker/api/types/mount/mount.go @@ -29,7 +29,7 @@ type Mount struct { // Source is not supported for tmpfs (must be an empty value) Source string `json:",omitempty"` Target string `json:",omitempty"` - ReadOnly bool `json:",omitempty"` + ReadOnly bool `json:",omitempty"` // attempts recursive read-only if possible Consistency Consistency `json:",omitempty"` BindOptions *BindOptions `json:",omitempty"` @@ -85,6 +85,11 @@ type BindOptions struct { Propagation Propagation `json:",omitempty"` NonRecursive bool `json:",omitempty"` CreateMountpoint bool `json:",omitempty"` + // ReadOnlyNonRecursive makes the mount non-recursively read-only, but still leaves the mount recursive + // (unless NonRecursive is set to true in conjunction). + ReadOnlyNonRecursive bool `json:",omitempty"` + // ReadOnlyForceRecursive raises an error if the mount cannot be made recursively read-only. + ReadOnlyForceRecursive bool `json:",omitempty"` } // VolumeOptions represents the options for a mount of type volume. diff --git a/vendor/github.com/docker/docker/api/types/network/endpoint.go b/vendor/github.com/docker/docker/api/types/network/endpoint.go new file mode 100644 index 000000000..9edd1c38d --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/network/endpoint.go @@ -0,0 +1,147 @@ +package network + +import ( + "errors" + "fmt" + "net" + + "github.com/docker/docker/internal/multierror" +) + +// EndpointSettings stores the network endpoint details +type EndpointSettings struct { + // Configurations + IPAMConfig *EndpointIPAMConfig + Links []string + Aliases []string // Aliases holds the list of extra, user-specified DNS names for this endpoint. + // MacAddress may be used to specify a MAC address when the container is created. + // Once the container is running, it becomes operational data (it may contain a + // generated address). + MacAddress string + // Operational data + NetworkID string + EndpointID string + Gateway string + IPAddress string + IPPrefixLen int + IPv6Gateway string + GlobalIPv6Address string + GlobalIPv6PrefixLen int + DriverOpts map[string]string + // DNSNames holds all the (non fully qualified) DNS names associated to this endpoint. First entry is used to + // generate PTR records. + DNSNames []string +} + +// Copy makes a deep copy of `EndpointSettings` +func (es *EndpointSettings) Copy() *EndpointSettings { + epCopy := *es + if es.IPAMConfig != nil { + epCopy.IPAMConfig = es.IPAMConfig.Copy() + } + + if es.Links != nil { + links := make([]string, 0, len(es.Links)) + epCopy.Links = append(links, es.Links...) + } + + if es.Aliases != nil { + aliases := make([]string, 0, len(es.Aliases)) + epCopy.Aliases = append(aliases, es.Aliases...) + } + + if len(es.DNSNames) > 0 { + epCopy.DNSNames = make([]string, len(es.DNSNames)) + copy(epCopy.DNSNames, es.DNSNames) + } + + return &epCopy +} + +// EndpointIPAMConfig represents IPAM configurations for the endpoint +type EndpointIPAMConfig struct { + IPv4Address string `json:",omitempty"` + IPv6Address string `json:",omitempty"` + LinkLocalIPs []string `json:",omitempty"` +} + +// Copy makes a copy of the endpoint ipam config +func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig { + cfgCopy := *cfg + cfgCopy.LinkLocalIPs = make([]string, 0, len(cfg.LinkLocalIPs)) + cfgCopy.LinkLocalIPs = append(cfgCopy.LinkLocalIPs, cfg.LinkLocalIPs...) + return &cfgCopy +} + +// NetworkSubnet describes a user-defined subnet for a specific network. It's only used to validate if an +// EndpointIPAMConfig is valid for a specific network. +type NetworkSubnet interface { + // Contains checks whether the NetworkSubnet contains [addr]. + Contains(addr net.IP) bool + // IsStatic checks whether the subnet was statically allocated (ie. user-defined). + IsStatic() bool +} + +// IsInRange checks whether static IP addresses are valid in a specific network. +func (cfg *EndpointIPAMConfig) IsInRange(v4Subnets []NetworkSubnet, v6Subnets []NetworkSubnet) error { + var errs []error + + if err := validateEndpointIPAddress(cfg.IPv4Address, v4Subnets); err != nil { + errs = append(errs, err) + } + if err := validateEndpointIPAddress(cfg.IPv6Address, v6Subnets); err != nil { + errs = append(errs, err) + } + + return multierror.Join(errs...) +} + +func validateEndpointIPAddress(epAddr string, ipamSubnets []NetworkSubnet) error { + if epAddr == "" { + return nil + } + + var staticSubnet bool + parsedAddr := net.ParseIP(epAddr) + for _, subnet := range ipamSubnets { + if subnet.IsStatic() { + staticSubnet = true + if subnet.Contains(parsedAddr) { + return nil + } + } + } + + if staticSubnet { + return fmt.Errorf("no configured subnet or ip-range contain the IP address %s", epAddr) + } + + return errors.New("user specified IP address is supported only when connecting to networks with user configured subnets") +} + +// Validate checks whether cfg is valid. +func (cfg *EndpointIPAMConfig) Validate() error { + if cfg == nil { + return nil + } + + var errs []error + + if cfg.IPv4Address != "" { + if addr := net.ParseIP(cfg.IPv4Address); addr == nil || addr.To4() == nil || addr.IsUnspecified() { + errs = append(errs, fmt.Errorf("invalid IPv4 address: %s", cfg.IPv4Address)) + } + } + if cfg.IPv6Address != "" { + if addr := net.ParseIP(cfg.IPv6Address); addr == nil || addr.To4() != nil || addr.IsUnspecified() { + errs = append(errs, fmt.Errorf("invalid IPv6 address: %s", cfg.IPv6Address)) + } + } + for _, addr := range cfg.LinkLocalIPs { + if parsed := net.ParseIP(addr); parsed == nil || parsed.IsUnspecified() { + errs = append(errs, fmt.Errorf("invalid link-local IP address: %s", addr)) + } + } + + return multierror.Join(errs...) +} diff --git a/vendor/github.com/docker/docker/api/types/network/ipam.go b/vendor/github.com/docker/docker/api/types/network/ipam.go new file mode 100644 index 000000000..f319e1402 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/network/ipam.go @@ -0,0 +1,134 @@ +package network + +import ( + "errors" + "fmt" + "net/netip" + + "github.com/docker/docker/internal/multierror" +) + +// IPAM represents IP Address Management +type IPAM struct { + Driver string + Options map[string]string // Per network IPAM driver options + Config []IPAMConfig +} + +// IPAMConfig represents IPAM configurations +type IPAMConfig struct { + Subnet string `json:",omitempty"` + IPRange string `json:",omitempty"` + Gateway string `json:",omitempty"` + AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` +} + +type ipFamily string + +const ( + ip4 ipFamily = "IPv4" + ip6 ipFamily = "IPv6" +) + +// ValidateIPAM checks whether the network's IPAM passed as argument is valid. It returns a joinError of the list of +// errors found. +func ValidateIPAM(ipam *IPAM, enableIPv6 bool) error { + if ipam == nil { + return nil + } + + var errs []error + for _, cfg := range ipam.Config { + subnet, err := netip.ParsePrefix(cfg.Subnet) + if err != nil { + errs = append(errs, fmt.Errorf("invalid subnet %s: invalid CIDR block notation", cfg.Subnet)) + continue + } + subnetFamily := ip4 + if subnet.Addr().Is6() { + subnetFamily = ip6 + } + + if !enableIPv6 && subnetFamily == ip6 { + continue + } + + if subnet != subnet.Masked() { + errs = append(errs, fmt.Errorf("invalid subnet %s: it should be %s", subnet, subnet.Masked())) + } + + if ipRangeErrs := validateIPRange(cfg.IPRange, subnet, subnetFamily); len(ipRangeErrs) > 0 { + errs = append(errs, ipRangeErrs...) + } + + if err := validateAddress(cfg.Gateway, subnet, subnetFamily); err != nil { + errs = append(errs, fmt.Errorf("invalid gateway %s: %w", cfg.Gateway, err)) + } + + for auxName, aux := range cfg.AuxAddress { + if err := validateAddress(aux, subnet, subnetFamily); err != nil { + errs = append(errs, fmt.Errorf("invalid auxiliary address %s: %w", auxName, err)) + } + } + } + + if err := multierror.Join(errs...); err != nil { + return fmt.Errorf("invalid network config:\n%w", err) + } + + return nil +} + +func validateIPRange(ipRange string, subnet netip.Prefix, subnetFamily ipFamily) []error { + if ipRange == "" { + return nil + } + prefix, err := netip.ParsePrefix(ipRange) + if err != nil { + return []error{fmt.Errorf("invalid ip-range %s: invalid CIDR block notation", ipRange)} + } + family := ip4 + if prefix.Addr().Is6() { + family = ip6 + } + + if family != subnetFamily { + return []error{fmt.Errorf("invalid ip-range %s: parent subnet is an %s block", ipRange, subnetFamily)} + } + + var errs []error + if prefix.Bits() < subnet.Bits() { + errs = append(errs, fmt.Errorf("invalid ip-range %s: CIDR block is bigger than its parent subnet %s", ipRange, subnet)) + } + if prefix != prefix.Masked() { + errs = append(errs, fmt.Errorf("invalid ip-range %s: it should be %s", prefix, prefix.Masked())) + } + if !subnet.Overlaps(prefix) { + errs = append(errs, fmt.Errorf("invalid ip-range %s: parent subnet %s doesn't contain ip-range", ipRange, subnet)) + } + + return errs +} + +func validateAddress(address string, subnet netip.Prefix, subnetFamily ipFamily) error { + if address == "" { + return nil + } + addr, err := netip.ParseAddr(address) + if err != nil { + return errors.New("invalid address") + } + family := ip4 + if addr.Is6() { + family = ip6 + } + + if family != subnetFamily { + return fmt.Errorf("parent subnet is an %s block", subnetFamily) + } + if !subnet.Contains(addr) { + return fmt.Errorf("parent subnet %s doesn't contain this address", subnet) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/api/types/network/network.go b/vendor/github.com/docker/docker/api/types/network/network.go index 437b184c6..f1f300f3d 100644 --- a/vendor/github.com/docker/docker/api/types/network/network.go +++ b/vendor/github.com/docker/docker/api/types/network/network.go @@ -1,69 +1,34 @@ package network // import "github.com/docker/docker/api/types/network" + import ( "github.com/docker/docker/api/types/filters" ) +const ( + // NetworkDefault is a platform-independent alias to choose the platform-specific default network stack. + NetworkDefault = "default" + // NetworkHost is the name of the predefined network used when the NetworkMode host is selected (only available on Linux) + NetworkHost = "host" + // NetworkNone is the name of the predefined network used when the NetworkMode none is selected (available on both Linux and Windows) + NetworkNone = "none" + // NetworkBridge is the name of the default network on Linux + NetworkBridge = "bridge" + // NetworkNat is the name of the default network on Windows + NetworkNat = "nat" +) + // Address represents an IP address type Address struct { Addr string PrefixLen int } -// IPAM represents IP Address Management -type IPAM struct { - Driver string - Options map[string]string // Per network IPAM driver options - Config []IPAMConfig -} - -// IPAMConfig represents IPAM configurations -type IPAMConfig struct { - Subnet string `json:",omitempty"` - IPRange string `json:",omitempty"` - Gateway string `json:",omitempty"` - AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` -} - -// EndpointIPAMConfig represents IPAM configurations for the endpoint -type EndpointIPAMConfig struct { - IPv4Address string `json:",omitempty"` - IPv6Address string `json:",omitempty"` - LinkLocalIPs []string `json:",omitempty"` -} - -// Copy makes a copy of the endpoint ipam config -func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig { - cfgCopy := *cfg - cfgCopy.LinkLocalIPs = make([]string, 0, len(cfg.LinkLocalIPs)) - cfgCopy.LinkLocalIPs = append(cfgCopy.LinkLocalIPs, cfg.LinkLocalIPs...) - return &cfgCopy -} - // PeerInfo represents one peer of an overlay network type PeerInfo struct { Name string IP string } -// EndpointSettings stores the network endpoint details -type EndpointSettings struct { - // Configurations - IPAMConfig *EndpointIPAMConfig - Links []string - Aliases []string - // Operational data - NetworkID string - EndpointID string - Gateway string - IPAddress string - IPPrefixLen int - IPv6Gateway string - GlobalIPv6Address string - GlobalIPv6PrefixLen int - MacAddress string - DriverOpts map[string]string -} - // Task carries the information about one backend task type Task struct { Name string @@ -80,25 +45,6 @@ type ServiceInfo struct { Tasks []Task } -// Copy makes a deep copy of `EndpointSettings` -func (es *EndpointSettings) Copy() *EndpointSettings { - epCopy := *es - if es.IPAMConfig != nil { - epCopy.IPAMConfig = es.IPAMConfig.Copy() - } - - if es.Links != nil { - links := make([]string, 0, len(es.Links)) - epCopy.Links = append(links, es.Links...) - } - - if es.Aliases != nil { - aliases := make([]string, 0, len(es.Aliases)) - epCopy.Aliases = append(aliases, es.Aliases...) - } - return &epCopy -} - // NetworkingConfig represents the container's networking configuration for each of its interfaces // Carries the networking configs specified in the `docker run` and `docker network connect` commands type NetworkingConfig struct { diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go index b83f5d7b2..05cb31075 100644 --- a/vendor/github.com/docker/docker/api/types/registry/registry.go +++ b/vendor/github.com/docker/docker/api/types/registry/registry.go @@ -92,7 +92,9 @@ type SearchResult struct { IsOfficial bool `json:"is_official"` // Name is the name of the repository Name string `json:"name"` - // IsAutomated indicates whether the result is automated + // IsAutomated indicates whether the result is automated. + // + // Deprecated: the "is_automated" field is deprecated and will always be "false" in the future. IsAutomated bool `json:"is_automated"` // Description is a textual description of the repository Description string `json:"description"` diff --git a/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/docker/docker/api/types/swarm/container.go index af5e1c0bc..65f61d2d2 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/container.go +++ b/vendor/github.com/docker/docker/api/types/swarm/container.go @@ -32,6 +32,42 @@ type SELinuxContext struct { Level string } +// SeccompMode is the type used for the enumeration of possible seccomp modes +// in SeccompOpts +type SeccompMode string + +const ( + SeccompModeDefault SeccompMode = "default" + SeccompModeUnconfined SeccompMode = "unconfined" + SeccompModeCustom SeccompMode = "custom" +) + +// SeccompOpts defines the options for configuring seccomp on a swarm-managed +// container. +type SeccompOpts struct { + // Mode is the SeccompMode used for the container. + Mode SeccompMode `json:",omitempty"` + // Profile is the custom seccomp profile as a json object to be used with + // the container. Mode should be set to SeccompModeCustom when using a + // custom profile in this manner. + Profile []byte `json:",omitempty"` +} + +// AppArmorMode is type used for the enumeration of possible AppArmor modes in +// AppArmorOpts +type AppArmorMode string + +const ( + AppArmorModeDefault AppArmorMode = "default" + AppArmorModeDisabled AppArmorMode = "disabled" +) + +// AppArmorOpts defines the options for configuring AppArmor on a swarm-managed +// container. Currently, custom AppArmor profiles are not supported. +type AppArmorOpts struct { + Mode AppArmorMode `json:",omitempty"` +} + // CredentialSpec for managed service account (Windows only) type CredentialSpec struct { Config string @@ -41,8 +77,11 @@ type CredentialSpec struct { // Privileges defines the security options for the container. type Privileges struct { - CredentialSpec *CredentialSpec - SELinuxContext *SELinuxContext + CredentialSpec *CredentialSpec + SELinuxContext *SELinuxContext + Seccomp *SeccompOpts `json:",omitempty"` + AppArmor *AppArmorOpts `json:",omitempty"` + NoNewPrivileges bool } // ContainerSpec represents the spec of a container. diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go index 98c2806c3..292bd7afc 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go @@ -1,3 +1,3 @@ -//go:generate protoc -I . --gogofast_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto +//go:generate protoc --gogofaster_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto package runtime // import "github.com/docker/docker/api/types/swarm/runtime" diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go index e45045866..32aaf0d51 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go @@ -1,23 +1,15 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: plugin.proto -/* - Package runtime is a generated protocol buffer package. - - It is generated from these files: - plugin.proto - - It has these top-level messages: - PluginSpec - PluginPrivilege -*/ package runtime -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import io "io" +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -28,22 +20,50 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // PluginSpec defines the base payload which clients can specify for creating // a service with the plugin runtime. type PluginSpec struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"` - Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges" json:"privileges,omitempty"` + Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges,proto3" json:"privileges,omitempty"` Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"` - Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"` + Env []string `protobuf:"bytes,5,rep,name=env,proto3" json:"env,omitempty"` +} + +func (m *PluginSpec) Reset() { *m = PluginSpec{} } +func (m *PluginSpec) String() string { return proto.CompactTextString(m) } +func (*PluginSpec) ProtoMessage() {} +func (*PluginSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_22a625af4bc1cc87, []int{0} +} +func (m *PluginSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PluginSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PluginSpec.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PluginSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PluginSpec.Merge(m, src) +} +func (m *PluginSpec) XXX_Size() int { + return m.Size() +} +func (m *PluginSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PluginSpec.DiscardUnknown(m) } -func (m *PluginSpec) Reset() { *m = PluginSpec{} } -func (m *PluginSpec) String() string { return proto.CompactTextString(m) } -func (*PluginSpec) ProtoMessage() {} -func (*PluginSpec) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} } +var xxx_messageInfo_PluginSpec proto.InternalMessageInfo func (m *PluginSpec) GetName() string { if m != nil { @@ -85,13 +105,41 @@ func (m *PluginSpec) GetEnv() []string { type PluginPrivilege struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - Value []string `protobuf:"bytes,3,rep,name=value" json:"value,omitempty"` + Value []string `protobuf:"bytes,3,rep,name=value,proto3" json:"value,omitempty"` } -func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} } -func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) } -func (*PluginPrivilege) ProtoMessage() {} -func (*PluginPrivilege) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{1} } +func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} } +func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) } +func (*PluginPrivilege) ProtoMessage() {} +func (*PluginPrivilege) Descriptor() ([]byte, []int) { + return fileDescriptor_22a625af4bc1cc87, []int{1} +} +func (m *PluginPrivilege) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PluginPrivilege) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PluginPrivilege.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PluginPrivilege) XXX_Merge(src proto.Message) { + xxx_messageInfo_PluginPrivilege.Merge(m, src) +} +func (m *PluginPrivilege) XXX_Size() int { + return m.Size() +} +func (m *PluginPrivilege) XXX_DiscardUnknown() { + xxx_messageInfo_PluginPrivilege.DiscardUnknown(m) +} + +var xxx_messageInfo_PluginPrivilege proto.InternalMessageInfo func (m *PluginPrivilege) GetName() string { if m != nil { @@ -118,10 +166,32 @@ func init() { proto.RegisterType((*PluginSpec)(nil), "PluginSpec") proto.RegisterType((*PluginPrivilege)(nil), "PluginPrivilege") } + +func init() { proto.RegisterFile("plugin.proto", fileDescriptor_22a625af4bc1cc87) } + +var fileDescriptor_22a625af4bc1cc87 = []byte{ + // 225 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0xc8, 0x29, 0x4d, + 0xcf, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0x9a, 0xc1, 0xc8, 0xc5, 0x15, 0x00, 0x16, + 0x08, 0x2e, 0x48, 0x4d, 0x16, 0x12, 0xe2, 0x62, 0xc9, 0x4b, 0xcc, 0x4d, 0x95, 0x60, 0x54, 0x60, + 0xd4, 0xe0, 0x0c, 0x02, 0xb3, 0x85, 0xc4, 0xb8, 0xd8, 0x8a, 0x52, 0x73, 0xf3, 0x4b, 0x52, 0x25, + 0x98, 0xc0, 0xa2, 0x50, 0x9e, 0x90, 0x01, 0x17, 0x57, 0x41, 0x51, 0x66, 0x59, 0x66, 0x4e, 0x6a, + 0x7a, 0x6a, 0xb1, 0x04, 0xb3, 0x02, 0xb3, 0x06, 0xb7, 0x91, 0x80, 0x1e, 0xc4, 0xb0, 0x00, 0x98, + 0x44, 0x10, 0x92, 0x1a, 0x21, 0x29, 0x2e, 0x8e, 0x94, 0xcc, 0xe2, 0xc4, 0xa4, 0x9c, 0xd4, 0x14, + 0x09, 0x16, 0x05, 0x46, 0x0d, 0x8e, 0x20, 0x38, 0x5f, 0x48, 0x80, 0x8b, 0x39, 0x35, 0xaf, 0x4c, + 0x82, 0x55, 0x81, 0x59, 0x83, 0x33, 0x08, 0xc4, 0x54, 0x8a, 0xe5, 0xe2, 0x47, 0x33, 0x0c, 0xab, + 0xf3, 0x14, 0xb8, 0xb8, 0x53, 0x52, 0x8b, 0x93, 0x8b, 0x32, 0x0b, 0x4a, 0x32, 0xf3, 0xf3, 0xa0, + 0x6e, 0x44, 0x16, 0x12, 0x12, 0xe1, 0x62, 0x2d, 0x4b, 0xcc, 0x29, 0x4d, 0x05, 0xbb, 0x91, 0x33, + 0x08, 0xc2, 0x71, 0x92, 0x38, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, + 0x18, 0x27, 0x3c, 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x24, 0x36, + 0x70, 0xd0, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x37, 0xea, 0xe2, 0xca, 0x2a, 0x01, 0x00, + 0x00, +} + func (m *PluginSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -129,66 +199,69 @@ func (m *PluginSpec) Marshal() (dAtA []byte, err error) { } func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PluginSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Remote) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote))) - i += copy(dAtA[i:], m.Remote) - } - if len(m.Privileges) > 0 { - for _, msg := range m.Privileges { - dAtA[i] = 0x1a - i++ - i = encodeVarintPlugin(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Env[iNdEx]) + copy(dAtA[i:], m.Env[iNdEx]) + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Env[iNdEx]))) + i-- + dAtA[i] = 0x2a } } if m.Disabled { - dAtA[i] = 0x20 - i++ + i-- if m.Disabled { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ + i-- + dAtA[i] = 0x20 } - if len(m.Env) > 0 { - for _, s := range m.Env { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.Privileges) > 0 { + for iNdEx := len(m.Privileges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Privileges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPlugin(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } } - return i, nil + if len(m.Remote) > 0 { + i -= len(m.Remote) + copy(dAtA[i:], m.Remote) + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -196,50 +269,56 @@ func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) { } func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PluginPrivilege) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) + if len(m.Value) > 0 { + for iNdEx := len(m.Value) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Value[iNdEx]) + copy(dAtA[i:], m.Value[iNdEx]) + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Value[iNdEx]))) + i-- + dAtA[i] = 0x1a + } } if len(m.Description) > 0 { - dAtA[i] = 0x12 - i++ + i -= len(m.Description) + copy(dAtA[i:], m.Description) i = encodeVarintPlugin(dAtA, i, uint64(len(m.Description))) - i += copy(dAtA[i:], m.Description) + i-- + dAtA[i] = 0x12 } - if len(m.Value) > 0 { - for _, s := range m.Value { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int { + offset -= sovPlugin(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *PluginSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -269,6 +348,9 @@ func (m *PluginSpec) Size() (n int) { } func (m *PluginPrivilege) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -289,14 +371,7 @@ func (m *PluginPrivilege) Size() (n int) { } func sovPlugin(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozPlugin(x uint64) (n int) { return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -316,7 +391,7 @@ func (m *PluginSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -344,7 +419,7 @@ func (m *PluginSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -354,6 +429,9 @@ func (m *PluginSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthPlugin } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPlugin + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -373,7 +451,7 @@ func (m *PluginSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -383,6 +461,9 @@ func (m *PluginSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthPlugin } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPlugin + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -402,7 +483,7 @@ func (m *PluginSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -411,6 +492,9 @@ func (m *PluginSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthPlugin } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPlugin + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -433,7 +517,7 @@ func (m *PluginSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -453,7 +537,7 @@ func (m *PluginSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -463,6 +547,9 @@ func (m *PluginSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthPlugin } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPlugin + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -474,7 +561,7 @@ func (m *PluginSpec) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthPlugin } if (iNdEx + skippy) > l { @@ -504,7 +591,7 @@ func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -532,7 +619,7 @@ func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -542,6 +629,9 @@ func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { return ErrInvalidLengthPlugin } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPlugin + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -561,7 +651,7 @@ func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -571,6 +661,9 @@ func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { return ErrInvalidLengthPlugin } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPlugin + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -590,7 +683,7 @@ func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -600,6 +693,9 @@ func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { return ErrInvalidLengthPlugin } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPlugin + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -611,7 +707,7 @@ func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthPlugin } if (iNdEx + skippy) > l { @@ -629,6 +725,7 @@ func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { func skipPlugin(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 + depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -660,10 +757,8 @@ func skipPlugin(dAtA []byte) (n int, err error) { break } } - return iNdEx, nil case 1: iNdEx += 8 - return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -680,75 +775,34 @@ func skipPlugin(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthPlugin } - return iNdEx, nil + iNdEx += length case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipPlugin(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil + depth++ case 4: - return iNdEx, nil + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupPlugin + } + depth-- case 5: iNdEx += 4 - return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } + if iNdEx < 0 { + return 0, ErrInvalidLengthPlugin + } + if depth == 0 { + return iNdEx, nil + } } - panic("unreachable") + return 0, io.ErrUnexpectedEOF } var ( - ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow") + ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupPlugin = fmt.Errorf("proto: unexpected end of group") ) - -func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) } - -var fileDescriptorPlugin = []byte{ - // 256 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x4d, 0x4b, 0xc3, 0x30, - 0x18, 0xc7, 0x89, 0xdd, 0xc6, 0xfa, 0x4c, 0x70, 0x04, 0x91, 0xe2, 0xa1, 0x94, 0x9d, 0x7a, 0x6a, - 0x45, 0x2f, 0x82, 0x37, 0x0f, 0x9e, 0x47, 0xbc, 0x09, 0x1e, 0xd2, 0xf6, 0xa1, 0x06, 0x9b, 0x17, - 0x92, 0xb4, 0xe2, 0x37, 0xf1, 0x23, 0x79, 0xf4, 0x23, 0x48, 0x3f, 0x89, 0x98, 0x75, 0x32, 0x64, - 0xa7, 0xff, 0x4b, 0xc2, 0x9f, 0x1f, 0x0f, 0x9c, 0x9a, 0xae, 0x6f, 0x85, 0x2a, 0x8c, 0xd5, 0x5e, - 0x6f, 0x3e, 0x08, 0xc0, 0x36, 0x14, 0x8f, 0x06, 0x6b, 0x4a, 0x61, 0xa6, 0xb8, 0xc4, 0x84, 0x64, - 0x24, 0x8f, 0x59, 0xf0, 0xf4, 0x02, 0x16, 0x16, 0xa5, 0xf6, 0x98, 0x9c, 0x84, 0x76, 0x4a, 0xf4, - 0x0a, 0xc0, 0x58, 0x31, 0x88, 0x0e, 0x5b, 0x74, 0x49, 0x94, 0x45, 0xf9, 0xea, 0x7a, 0x5d, 0xec, - 0xc6, 0xb6, 0xfb, 0x07, 0x76, 0xf0, 0x87, 0x5e, 0xc2, 0xb2, 0x11, 0x8e, 0x57, 0x1d, 0x36, 0xc9, - 0x2c, 0x23, 0xf9, 0x92, 0xfd, 0x65, 0xba, 0x86, 0x08, 0xd5, 0x90, 0xcc, 0xb3, 0x28, 0x8f, 0xd9, - 0xaf, 0xdd, 0x3c, 0xc3, 0xd9, 0xbf, 0xb1, 0xa3, 0x78, 0x19, 0xac, 0x1a, 0x74, 0xb5, 0x15, 0xc6, - 0x0b, 0xad, 0x26, 0xc6, 0xc3, 0x8a, 0x9e, 0xc3, 0x7c, 0xe0, 0x5d, 0x8f, 0x81, 0x31, 0x66, 0xbb, - 0x70, 0xff, 0xf0, 0x39, 0xa6, 0xe4, 0x6b, 0x4c, 0xc9, 0xf7, 0x98, 0x92, 0xa7, 0xdb, 0x56, 0xf8, - 0x97, 0xbe, 0x2a, 0x6a, 0x2d, 0xcb, 0x46, 0xd7, 0xaf, 0x68, 0xf7, 0xc2, 0x8d, 0x28, 0xfd, 0xbb, - 0x41, 0x57, 0xba, 0x37, 0x6e, 0x65, 0x69, 0x7b, 0xe5, 0x85, 0xc4, 0xbb, 0x49, 0xab, 0x45, 0x38, - 0xe4, 0xcd, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x99, 0xa8, 0xd9, 0x9b, 0x58, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto index 9ef169046..e311b36ba 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto @@ -1,7 +1,5 @@ syntax = "proto3"; -option go_package = "github.com/docker/docker/api/types/swarm/runtime;runtime"; - // PluginSpec defines the base payload which clients can specify for creating // a service with the plugin runtime. message PluginSpec { diff --git a/vendor/github.com/docker/docker/api/types/swarm/service.go b/vendor/github.com/docker/docker/api/types/swarm/service.go index 6eb452d24..5b6d5ec12 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/service.go +++ b/vendor/github.com/docker/docker/api/types/swarm/service.go @@ -34,9 +34,9 @@ type ServiceSpec struct { UpdateConfig *UpdateConfig `json:",omitempty"` RollbackConfig *UpdateConfig `json:",omitempty"` - // Networks field in ServiceSpec is deprecated. The - // same field in TaskSpec should be used instead. - // This field will be removed in a future release. + // Networks specifies which networks the service should attach to. + // + // Deprecated: This field is deprecated since v1.44. The Networks field in TaskSpec should be used instead. Networks []NetworkAttachmentConfig `json:",omitempty"` EndpointSpec *EndpointSpec `json:",omitempty"` } diff --git a/vendor/github.com/docker/docker/api/types/swarm/service_create_response.go b/vendor/github.com/docker/docker/api/types/swarm/service_create_response.go new file mode 100644 index 000000000..9a268ff1b --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/service_create_response.go @@ -0,0 +1,20 @@ +package swarm + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ServiceCreateResponse contains the information returned to a client on the +// creation of a new service. +// +// swagger:model ServiceCreateResponse +type ServiceCreateResponse struct { + + // The ID of the created service. + ID string `json:"ID,omitempty"` + + // Optional warning message. + // + // FIXME(thaJeztah): this should have "omitempty" in the generated type. + // + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/service_update_response.go b/vendor/github.com/docker/docker/api/types/swarm/service_update_response.go similarity index 95% rename from vendor/github.com/docker/docker/api/types/service_update_response.go rename to vendor/github.com/docker/docker/api/types/swarm/service_update_response.go index 74ea64b1b..0417467da 100644 --- a/vendor/github.com/docker/docker/api/types/service_update_response.go +++ b/vendor/github.com/docker/docker/api/types/swarm/service_update_response.go @@ -1,4 +1,4 @@ -package types +package swarm // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command diff --git a/vendor/github.com/docker/docker/api/types/system/info.go b/vendor/github.com/docker/docker/api/types/system/info.go new file mode 100644 index 000000000..89d4a0098 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/system/info.go @@ -0,0 +1,116 @@ +package system + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/swarm" +) + +// Info contains response of Engine API: +// GET "/info" +type Info struct { + ID string + Containers int + ContainersRunning int + ContainersPaused int + ContainersStopped int + Images int + Driver string + DriverStatus [][2]string + SystemStatus [][2]string `json:",omitempty"` // SystemStatus is only propagated by the Swarm standalone API + Plugins PluginsInfo + MemoryLimit bool + SwapLimit bool + KernelMemory bool `json:",omitempty"` // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes + KernelMemoryTCP bool `json:",omitempty"` // KernelMemoryTCP is not supported on cgroups v2. + CPUCfsPeriod bool `json:"CpuCfsPeriod"` + CPUCfsQuota bool `json:"CpuCfsQuota"` + CPUShares bool + CPUSet bool + PidsLimit bool + IPv4Forwarding bool + BridgeNfIptables bool + BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` + Debug bool + NFd int + OomKillDisable bool + NGoroutines int + SystemTime string + LoggingDriver string + CgroupDriver string + CgroupVersion string `json:",omitempty"` + NEventsListener int + KernelVersion string + OperatingSystem string + OSVersion string + OSType string + Architecture string + IndexServerAddress string + RegistryConfig *registry.ServiceConfig + NCPU int + MemTotal int64 + GenericResources []swarm.GenericResource + DockerRootDir string + HTTPProxy string `json:"HttpProxy"` + HTTPSProxy string `json:"HttpsProxy"` + NoProxy string + Name string + Labels []string + ExperimentalBuild bool + ServerVersion string + Runtimes map[string]RuntimeWithStatus + DefaultRuntime string + Swarm swarm.Info + // LiveRestoreEnabled determines whether containers should be kept + // running when the daemon is shutdown or upon daemon start if + // running containers are detected + LiveRestoreEnabled bool + Isolation container.Isolation + InitBinary string + ContainerdCommit Commit + RuncCommit Commit + InitCommit Commit + SecurityOptions []string + ProductLicense string `json:",omitempty"` + DefaultAddressPools []NetworkAddressPool `json:",omitempty"` + CDISpecDirs []string + + // Legacy API fields for older API versions. + legacyFields + + // Warnings contains a slice of warnings that occurred while collecting + // system information. These warnings are intended to be informational + // messages for the user, and are not intended to be parsed / used for + // other purposes, as they do not have a fixed format. + Warnings []string +} + +type legacyFields struct { + ExecutionDriver string `json:",omitempty"` // Deprecated: deprecated since API v1.25, but returned for older versions. +} + +// PluginsInfo is a temp struct holding Plugins name +// registered with docker daemon. It is used by [Info] struct +type PluginsInfo struct { + // List of Volume plugins registered + Volume []string + // List of Network plugins registered + Network []string + // List of Authorization plugins registered + Authorization []string + // List of Log plugins registered + Log []string +} + +// Commit holds the Git-commit (SHA1) that a binary was built from, as reported +// in the version-string of external tools, such as containerd, or runC. +type Commit struct { + ID string // ID is the actual commit ID of external tool. + Expected string // Expected is the commit ID of external tool expected by dockerd as set at build time. +} + +// NetworkAddressPool is a temp struct used by [Info] struct. +type NetworkAddressPool struct { + Base string + Size int +} diff --git a/vendor/github.com/docker/docker/api/types/system/runtime.go b/vendor/github.com/docker/docker/api/types/system/runtime.go new file mode 100644 index 000000000..d077295a0 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/system/runtime.go @@ -0,0 +1,20 @@ +package system + +// Runtime describes an OCI runtime +type Runtime struct { + // "Legacy" runtime configuration for runc-compatible runtimes. + + Path string `json:"path,omitempty"` + Args []string `json:"runtimeArgs,omitempty"` + + // Shimv2 runtime configuration. Mutually exclusive with the legacy config above. + + Type string `json:"runtimeType,omitempty"` + Options map[string]interface{} `json:"options,omitempty"` +} + +// RuntimeWithStatus extends [Runtime] to hold [RuntimeStatus]. +type RuntimeWithStatus struct { + Runtime + Status map[string]string `json:"status,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/system/security_opts.go b/vendor/github.com/docker/docker/api/types/system/security_opts.go new file mode 100644 index 000000000..edff3eb1a --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/system/security_opts.go @@ -0,0 +1,48 @@ +package system + +import ( + "errors" + "fmt" + "strings" +) + +// SecurityOpt contains the name and options of a security option +type SecurityOpt struct { + Name string + Options []KeyValue +} + +// DecodeSecurityOptions decodes a security options string slice to a +// type-safe [SecurityOpt]. +func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) { + so := []SecurityOpt{} + for _, opt := range opts { + // support output from a < 1.13 docker daemon + if !strings.Contains(opt, "=") { + so = append(so, SecurityOpt{Name: opt}) + continue + } + secopt := SecurityOpt{} + for _, s := range strings.Split(opt, ",") { + k, v, ok := strings.Cut(s, "=") + if !ok { + return nil, fmt.Errorf("invalid security option %q", s) + } + if k == "" || v == "" { + return nil, errors.New("invalid empty security option") + } + if k == "name" { + secopt.Name = v + continue + } + secopt.Options = append(secopt.Options, KeyValue{Key: k, Value: v}) + } + so = append(so, secopt) + } + return so, nil +} + +// KeyValue holds a key/value pair. +type KeyValue struct { + Key, Value string +} diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go index b413e0200..5c56a0caf 100644 --- a/vendor/github.com/docker/docker/api/types/types.go +++ b/vendor/github.com/docker/docker/api/types/types.go @@ -1,18 +1,15 @@ package types // import "github.com/docker/docker/api/types" import ( - "errors" - "fmt" "io" "os" - "strings" "time" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/image" "github.com/docker/docker/api/types/mount" "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/api/types/volume" "github.com/docker/go-connections/nat" @@ -80,6 +77,8 @@ type ImageInspect struct { // Container is the ID of the container that was used to create the image. // // Depending on how the image was created, this field may be empty. + // + // Deprecated: this field is omitted in API v1.45, but kept for backward compatibility. Container string // ContainerConfig is an optional field containing the configuration of the @@ -87,6 +86,8 @@ type ImageInspect struct { // // Previous versions of Docker builder used this field to store build cache, // and it is not in active use anymore. + // + // Deprecated: this field is omitted in API v1.45, but kept for backward compatibility. ContainerConfig *container.Config // DockerVersion is the version of Docker that was used to build the image. @@ -118,12 +119,7 @@ type ImageInspect struct { // VirtualSize is the total size of the image including all layers it is // composed of. // - // In versions of Docker before v1.10, this field was calculated from - // the image itself and all of its parent images. Docker v1.10 and up - // store images self-contained, and no longer use a parent-chain, making - // this field an equivalent of the Size field. - // - // Deprecated: Unused in API 1.43 and up, but kept for backward compatibility with older API versions. + // Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. VirtualSize int64 `json:"VirtualSize,omitempty"` // GraphDriver holds information about the storage driver used to store the @@ -137,13 +133,7 @@ type ImageInspect struct { // Metadata of the image in the local cache. // // This information is local to the daemon, and not part of the image itself. - Metadata ImageMetadata -} - -// ImageMetadata contains engine-local data about the image -type ImageMetadata struct { - // LastTagTime is the date and time at which the image was last tagged. - LastTagTime time.Time `json:",omitempty"` + Metadata image.Metadata } // Container contains response of Engine API: @@ -237,148 +227,6 @@ type Version struct { BuildTime string `json:",omitempty"` } -// Commit holds the Git-commit (SHA1) that a binary was built from, as reported -// in the version-string of external tools, such as containerd, or runC. -type Commit struct { - ID string // ID is the actual commit ID of external tool. - Expected string // Expected is the commit ID of external tool expected by dockerd as set at build time. -} - -// Info contains response of Engine API: -// GET "/info" -type Info struct { - ID string - Containers int - ContainersRunning int - ContainersPaused int - ContainersStopped int - Images int - Driver string - DriverStatus [][2]string - SystemStatus [][2]string `json:",omitempty"` // SystemStatus is only propagated by the Swarm standalone API - Plugins PluginsInfo - MemoryLimit bool - SwapLimit bool - KernelMemory bool `json:",omitempty"` // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes - KernelMemoryTCP bool `json:",omitempty"` // KernelMemoryTCP is not supported on cgroups v2. - CPUCfsPeriod bool `json:"CpuCfsPeriod"` - CPUCfsQuota bool `json:"CpuCfsQuota"` - CPUShares bool - CPUSet bool - PidsLimit bool - IPv4Forwarding bool - BridgeNfIptables bool - BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` - Debug bool - NFd int - OomKillDisable bool - NGoroutines int - SystemTime string - LoggingDriver string - CgroupDriver string - CgroupVersion string `json:",omitempty"` - NEventsListener int - KernelVersion string - OperatingSystem string - OSVersion string - OSType string - Architecture string - IndexServerAddress string - RegistryConfig *registry.ServiceConfig - NCPU int - MemTotal int64 - GenericResources []swarm.GenericResource - DockerRootDir string - HTTPProxy string `json:"HttpProxy"` - HTTPSProxy string `json:"HttpsProxy"` - NoProxy string - Name string - Labels []string - ExperimentalBuild bool - ServerVersion string - Runtimes map[string]Runtime - DefaultRuntime string - Swarm swarm.Info - // LiveRestoreEnabled determines whether containers should be kept - // running when the daemon is shutdown or upon daemon start if - // running containers are detected - LiveRestoreEnabled bool - Isolation container.Isolation - InitBinary string - ContainerdCommit Commit - RuncCommit Commit - InitCommit Commit - SecurityOptions []string - ProductLicense string `json:",omitempty"` - DefaultAddressPools []NetworkAddressPool `json:",omitempty"` - - // Warnings contains a slice of warnings that occurred while collecting - // system information. These warnings are intended to be informational - // messages for the user, and are not intended to be parsed / used for - // other purposes, as they do not have a fixed format. - Warnings []string -} - -// KeyValue holds a key/value pair -type KeyValue struct { - Key, Value string -} - -// NetworkAddressPool is a temp struct used by Info struct -type NetworkAddressPool struct { - Base string - Size int -} - -// SecurityOpt contains the name and options of a security option -type SecurityOpt struct { - Name string - Options []KeyValue -} - -// DecodeSecurityOptions decodes a security options string slice to a type safe -// SecurityOpt -func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) { - so := []SecurityOpt{} - for _, opt := range opts { - // support output from a < 1.13 docker daemon - if !strings.Contains(opt, "=") { - so = append(so, SecurityOpt{Name: opt}) - continue - } - secopt := SecurityOpt{} - for _, s := range strings.Split(opt, ",") { - k, v, ok := strings.Cut(s, "=") - if !ok { - return nil, fmt.Errorf("invalid security option %q", s) - } - if k == "" || v == "" { - return nil, errors.New("invalid empty security option") - } - if k == "name" { - secopt.Name = v - continue - } - secopt.Options = append(secopt.Options, KeyValue{Key: k, Value: v}) - } - so = append(so, secopt) - } - return so, nil -} - -// PluginsInfo is a temp struct holding Plugins name -// registered with docker daemon. It is used by Info struct -type PluginsInfo struct { - // List of Volume plugins registered - Volume []string - // List of Network plugins registered - Network []string - // List of Authorization plugins registered - Authorization []string - // List of Log plugins registered - Log []string -} - // ExecStartCheck is a temp struct used by execStart // Config fields is part of ExecConfig in runconfig package type ExecStartCheck struct { @@ -491,17 +339,27 @@ type SummaryNetworkSettings struct { Networks map[string]*network.EndpointSettings } -// NetworkSettingsBase holds basic information about networks +// NetworkSettingsBase holds networking state for a container when inspecting it. type NetworkSettingsBase struct { - Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`) - SandboxID string // SandboxID uniquely represents a container's network stack - HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface - LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix - LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address - Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port - SandboxKey string // SandboxKey identifies the sandbox - SecondaryIPAddresses []network.Address - SecondaryIPv6Addresses []network.Address + Bridge string // Bridge contains the name of the default bridge interface iff it was set through the daemon --bridge flag. + SandboxID string // SandboxID uniquely represents a container's network stack + SandboxKey string // SandboxKey identifies the sandbox + Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port + + // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface + // + // Deprecated: This field is never set and will be removed in a future release. + HairpinMode bool + // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix + // + // Deprecated: This field is never set and will be removed in a future release. + LinkLocalIPv6Address string + // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address + // + // Deprecated: This field is never set and will be removed in a future release. + LinkLocalIPv6PrefixLen int + SecondaryIPAddresses []network.Address // Deprecated: This field is never set and will be removed in a future release. + SecondaryIPv6Addresses []network.Address // Deprecated: This field is never set and will be removed in a future release. } // DefaultNetworkSettings holds network information @@ -594,14 +452,9 @@ type EndpointResource struct { // NetworkCreate is the expected body of the "create network" http request message type NetworkCreate struct { - // Check for networks with duplicate names. - // Network is primarily keyed based on a random ID and not on the name. - // Network name is strictly a user-friendly alias to the network - // which is uniquely identified using ID. - // And there is no guaranteed way to check for duplicates. - // Option CheckDuplicate is there to provide a best effort checking of any networks - // which has the same name but it is not guaranteed to catch all name collisions. - CheckDuplicate bool + // Deprecated: CheckDuplicate is deprecated since API v1.44, but it defaults to true when sent by the client + // package to older daemons. + CheckDuplicate bool `json:",omitempty"` Driver string Scope string EnableIPv6 bool @@ -645,33 +498,6 @@ type NetworkInspectOptions struct { Verbose bool } -// Checkpoint represents the details of a checkpoint -type Checkpoint struct { - Name string // Name is the name of the checkpoint -} - -// Runtime describes an OCI runtime -type Runtime struct { - // "Legacy" runtime configuration for runc-compatible runtimes. - - Path string `json:"path,omitempty"` - Args []string `json:"runtimeArgs,omitempty"` - - // Shimv2 runtime configuration. Mutually exclusive with the legacy config above. - - Type string `json:"runtimeType,omitempty"` - Options map[string]interface{} `json:"options,omitempty"` - - // This is exposed here only for internal use - ShimConfig *ShimConfig `json:"-"` -} - -// ShimConfig is used by runtime to configure containerd shims -type ShimConfig struct { - Binary string - Opts interface{} -} - // DiskUsageObject represents an object type used for disk usage query filtering. type DiskUsageObject string @@ -697,7 +523,7 @@ type DiskUsageOptions struct { // GET "/system/df" type DiskUsage struct { LayersSize int64 - Images []*ImageSummary + Images []*image.Summary Containers []*Container Volumes []*volume.Volume BuildCache []*BuildCache @@ -721,7 +547,7 @@ type VolumesPruneReport struct { // ImagesPruneReport contains the response for Engine API: // POST "/images/prune" type ImagesPruneReport struct { - ImagesDeleted []ImageDeleteResponseItem + ImagesDeleted []image.DeleteResponse SpaceReclaimed uint64 } diff --git a/vendor/github.com/docker/docker/api/types/types_deprecated.go b/vendor/github.com/docker/docker/api/types/types_deprecated.go new file mode 100644 index 000000000..e332a7bb6 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/types_deprecated.go @@ -0,0 +1,138 @@ +package types + +import ( + "github.com/docker/docker/api/types/checkpoint" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/image" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/system" +) + +// CheckpointCreateOptions holds parameters to create a checkpoint from a container. +// +// Deprecated: use [checkpoint.CreateOptions]. +type CheckpointCreateOptions = checkpoint.CreateOptions + +// CheckpointListOptions holds parameters to list checkpoints for a container +// +// Deprecated: use [checkpoint.ListOptions]. +type CheckpointListOptions = checkpoint.ListOptions + +// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container +// +// Deprecated: use [checkpoint.DeleteOptions]. +type CheckpointDeleteOptions = checkpoint.DeleteOptions + +// Checkpoint represents the details of a checkpoint when listing endpoints. +// +// Deprecated: use [checkpoint.Summary]. +type Checkpoint = checkpoint.Summary + +// Info contains response of Engine API: +// GET "/info" +// +// Deprecated: use [system.Info]. +type Info = system.Info + +// Commit holds the Git-commit (SHA1) that a binary was built from, as reported +// in the version-string of external tools, such as containerd, or runC. +// +// Deprecated: use [system.Commit]. +type Commit = system.Commit + +// PluginsInfo is a temp struct holding Plugins name +// registered with docker daemon. It is used by [system.Info] struct +// +// Deprecated: use [system.PluginsInfo]. +type PluginsInfo = system.PluginsInfo + +// NetworkAddressPool is a temp struct used by [system.Info] struct. +// +// Deprecated: use [system.NetworkAddressPool]. +type NetworkAddressPool = system.NetworkAddressPool + +// Runtime describes an OCI runtime. +// +// Deprecated: use [system.Runtime]. +type Runtime = system.Runtime + +// SecurityOpt contains the name and options of a security option. +// +// Deprecated: use [system.SecurityOpt]. +type SecurityOpt = system.SecurityOpt + +// KeyValue holds a key/value pair. +// +// Deprecated: use [system.KeyValue]. +type KeyValue = system.KeyValue + +// ImageDeleteResponseItem image delete response item. +// +// Deprecated: use [image.DeleteResponse]. +type ImageDeleteResponseItem = image.DeleteResponse + +// ImageSummary image summary. +// +// Deprecated: use [image.Summary]. +type ImageSummary = image.Summary + +// ImageMetadata contains engine-local data about the image. +// +// Deprecated: use [image.Metadata]. +type ImageMetadata = image.Metadata + +// ServiceCreateResponse contains the information returned to a client +// on the creation of a new service. +// +// Deprecated: use [swarm.ServiceCreateResponse]. +type ServiceCreateResponse = swarm.ServiceCreateResponse + +// ServiceUpdateResponse service update response. +// +// Deprecated: use [swarm.ServiceUpdateResponse]. +type ServiceUpdateResponse = swarm.ServiceUpdateResponse + +// ContainerStartOptions holds parameters to start containers. +// +// Deprecated: use [container.StartOptions]. +type ContainerStartOptions = container.StartOptions + +// ResizeOptions holds parameters to resize a TTY. +// It can be used to resize container TTYs and +// exec process TTYs too. +// +// Deprecated: use [container.ResizeOptions]. +type ResizeOptions = container.ResizeOptions + +// ContainerAttachOptions holds parameters to attach to a container. +// +// Deprecated: use [container.AttachOptions]. +type ContainerAttachOptions = container.AttachOptions + +// ContainerCommitOptions holds parameters to commit changes into a container. +// +// Deprecated: use [container.CommitOptions]. +type ContainerCommitOptions = container.CommitOptions + +// ContainerListOptions holds parameters to list containers with. +// +// Deprecated: use [container.ListOptions]. +type ContainerListOptions = container.ListOptions + +// ContainerLogsOptions holds parameters to filter logs with. +// +// Deprecated: use [container.LogsOptions]. +type ContainerLogsOptions = container.LogsOptions + +// ContainerRemoveOptions holds parameters to remove containers. +// +// Deprecated: use [container.RemoveOptions]. +type ContainerRemoveOptions = container.RemoveOptions + +// DecodeSecurityOptions decodes a security options string slice to a type safe +// [system.SecurityOpt]. +// +// Deprecated: use [system.DecodeSecurityOptions]. +func DecodeSecurityOptions(opts []string) ([]system.SecurityOpt, error) { + return system.DecodeSecurityOptions(opts) +} diff --git a/vendor/github.com/docker/docker/client/README.md b/vendor/github.com/docker/docker/client/README.md index 992f18117..f8af3ab90 100644 --- a/vendor/github.com/docker/docker/client/README.md +++ b/vendor/github.com/docker/docker/client/README.md @@ -1,8 +1,10 @@ # Go client for the Docker Engine API -The `docker` command uses this package to communicate with the daemon. It can also be used by your own Go applications to do anything the command-line interface does – running containers, pulling images, managing swarms, etc. +The `docker` command uses this package to communicate with the daemon. It can +also be used by your own Go applications to do anything the command-line +interface does – running containers, pulling images, managing swarms, etc. -For example, to list running containers (the equivalent of `docker ps`): +For example, to list all containers (the equivalent of `docker ps --all`): ```go package main @@ -11,25 +13,26 @@ import ( "context" "fmt" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" "github.com/docker/docker/client" ) func main() { - cli, err := client.NewClientWithOpts(client.FromEnv) + apiClient, err := client.NewClientWithOpts(client.FromEnv) if err != nil { panic(err) } + defer apiClient.Close() - containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) + containers, err := apiClient.ContainerList(context.Background(), container.ListOptions{All: true}) if err != nil { panic(err) } - for _, container := range containers { - fmt.Printf("%s %s\n", container.ID[:10], container.Image) + for _, ctr := range containers { + fmt.Printf("%s %s (status: %s)\n", ctr.ID, ctr.Image, ctr.Status) } } ``` -[Full documentation is available on GoDoc.](https://godoc.org/github.com/docker/docker/client) +[Full documentation is available on pkg.go.dev.](https://pkg.go.dev/github.com/docker/docker/client) diff --git a/vendor/github.com/docker/docker/client/build_prune.go b/vendor/github.com/docker/docker/client/build_prune.go index 2b6606236..1a830f413 100644 --- a/vendor/github.com/docker/docker/client/build_prune.go +++ b/vendor/github.com/docker/docker/client/build_prune.go @@ -13,7 +13,7 @@ import ( // BuildCachePrune requests the daemon to delete unused cache data func (cli *Client) BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) { - if err := cli.NewVersionError("1.31", "build prune"); err != nil { + if err := cli.NewVersionError(ctx, "1.31", "build prune"); err != nil { return nil, err } diff --git a/vendor/github.com/docker/docker/client/checkpoint_create.go b/vendor/github.com/docker/docker/client/checkpoint_create.go index 921024fe4..9746d288d 100644 --- a/vendor/github.com/docker/docker/client/checkpoint_create.go +++ b/vendor/github.com/docker/docker/client/checkpoint_create.go @@ -3,11 +3,11 @@ package client // import "github.com/docker/docker/client" import ( "context" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/checkpoint" ) // CheckpointCreate creates a checkpoint from the given container with the given name -func (cli *Client) CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error { +func (cli *Client) CheckpointCreate(ctx context.Context, container string, options checkpoint.CreateOptions) error { resp, err := cli.post(ctx, "/containers/"+container+"/checkpoints", nil, options, nil) ensureReaderClosed(resp) return err diff --git a/vendor/github.com/docker/docker/client/checkpoint_delete.go b/vendor/github.com/docker/docker/client/checkpoint_delete.go index 54f55fa76..b968c2b23 100644 --- a/vendor/github.com/docker/docker/client/checkpoint_delete.go +++ b/vendor/github.com/docker/docker/client/checkpoint_delete.go @@ -4,11 +4,11 @@ import ( "context" "net/url" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/checkpoint" ) // CheckpointDelete deletes the checkpoint with the given name from the given container -func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, options types.CheckpointDeleteOptions) error { +func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, options checkpoint.DeleteOptions) error { query := url.Values{} if options.CheckpointDir != "" { query.Set("dir", options.CheckpointDir) diff --git a/vendor/github.com/docker/docker/client/checkpoint_list.go b/vendor/github.com/docker/docker/client/checkpoint_list.go index 39cfb959f..8feb1f3f7 100644 --- a/vendor/github.com/docker/docker/client/checkpoint_list.go +++ b/vendor/github.com/docker/docker/client/checkpoint_list.go @@ -5,12 +5,12 @@ import ( "encoding/json" "net/url" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/checkpoint" ) // CheckpointList returns the checkpoints of the given container in the docker host -func (cli *Client) CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) { - var checkpoints []types.Checkpoint +func (cli *Client) CheckpointList(ctx context.Context, container string, options checkpoint.ListOptions) ([]checkpoint.Summary, error) { + var checkpoints []checkpoint.Summary query := url.Values{} if options.CheckpointDir != "" { diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go index 54fa36cca..0b496b0fa 100644 --- a/vendor/github.com/docker/docker/client/client.go +++ b/vendor/github.com/docker/docker/client/client.go @@ -19,7 +19,7 @@ For example, to list running containers (the equivalent of "docker ps"): "context" "fmt" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" "github.com/docker/docker/client" ) @@ -29,13 +29,13 @@ For example, to list running containers (the equivalent of "docker ps"): panic(err) } - containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) + containers, err := cli.ContainerList(context.Background(), container.ListOptions{}) if err != nil { panic(err) } - for _, container := range containers { - fmt.Printf("%s %s\n", container.ID[:10], container.Image) + for _, ctr := range containers { + fmt.Printf("%s %s\n", ctr.ID, ctr.Image) } } */ @@ -43,17 +43,21 @@ package client // import "github.com/docker/docker/client" import ( "context" + "crypto/tls" "net" "net/http" "net/url" "path" "strings" + "time" "github.com/docker/docker/api" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/versions" "github.com/docker/go-connections/sockets" "github.com/pkg/errors" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + "go.opentelemetry.io/otel/trace" ) // DummyHost is a hostname used for local communication. @@ -86,8 +90,12 @@ import ( // [Go stdlib]: https://github.com/golang/go/blob/6244b1946bc2101b01955468f1be502dbadd6807/src/net/http/transport.go#L558-L569 const DummyHost = "api.moby.localhost" -// ErrRedirect is the error returned by checkRedirect when the request is non-GET. -var ErrRedirect = errors.New("unexpected redirect in response") +// fallbackAPIVersion is the version to fallback to if API-version negotiation +// fails. This version is the highest version of the API before API-version +// negotiation was introduced. If negotiation fails (or no API version was +// included in the API response), we assume the API server uses the most +// recent version before negotiation was introduced. +const fallbackAPIVersion = "1.24" // Client is the API client that performs all operations // against a docker server. @@ -106,7 +114,12 @@ type Client struct { client *http.Client // version of the server to talk to. version string - // custom http headers configured by users. + // userAgent is the User-Agent header to use for HTTP requests. It takes + // precedence over User-Agent headers set in customHTTPHeaders, and other + // header variables. When set to an empty string, the User-Agent header + // is removed, and no header is sent. + userAgent *string + // custom HTTP headers configured by users. customHTTPHeaders map[string]string // manualOverride is set to true when the version was set by users. manualOverride bool @@ -119,22 +132,33 @@ type Client struct { // negotiated indicates that API version negotiation took place negotiated bool + + tp trace.TracerProvider + + // When the client transport is an *http.Transport (default) we need to do some extra things (like closing idle connections). + // Store the original transport as the http.Client transport will be wrapped with tracing libs. + baseTransport *http.Transport } -// CheckRedirect specifies the policy for dealing with redirect responses: -// If the request is non-GET return ErrRedirect, otherwise use the last response. +// ErrRedirect is the error returned by checkRedirect when the request is non-GET. +var ErrRedirect = errors.New("unexpected redirect in response") + +// CheckRedirect specifies the policy for dealing with redirect responses. It +// can be set on [http.Client.CheckRedirect] to prevent HTTP redirects for +// non-GET requests. It returns an [ErrRedirect] for non-GET request, otherwise +// returns a [http.ErrUseLastResponse], which is special-cased by http.Client +// to use the last response. // -// Go 1.8 changes behavior for HTTP redirects (specifically 301, 307, and 308) -// in the client. The Docker client (and by extension docker API client) can be -// made to send a request like POST /containers//start where what would normally -// be in the name section of the URL is empty. This triggers an HTTP 301 from -// the daemon. +// Go 1.8 changed behavior for HTTP redirects (specifically 301, 307, and 308) +// in the client. The client (and by extension API client) can be made to send +// a request like "POST /containers//start" where what would normally be in the +// name section of the URL is empty. This triggers an HTTP 301 from the daemon. // -// In go 1.8 this 301 will be converted to a GET request, and ends up getting +// In go 1.8 this 301 is converted to a GET request, and ends up getting // a 404 from the daemon. This behavior change manifests in the client in that // before, the 301 was not followed and the client did not generate an error, -// but now results in a message like Error response from daemon: page not found. -func CheckRedirect(req *http.Request, via []*http.Request) error { +// but now results in a message like "Error response from daemon: page not found". +func CheckRedirect(_ *http.Request, via []*http.Request) error { if via[0].Method == http.MethodGet { return http.ErrUseLastResponse } @@ -145,11 +169,11 @@ func CheckRedirect(req *http.Request, via []*http.Request) error { // default API host and version. It also initializes the custom HTTP headers to // add to each request. // -// It takes an optional list of Opt functional arguments, which are applied in +// It takes an optional list of [Opt] functional arguments, which are applied in // the order they're provided, which allows modifying the defaults when creating // the client. For example, the following initializes a client that configures -// itself with values from environment variables (client.FromEnv), and has -// automatic API version negotiation enabled (client.WithAPIVersionNegotiation()). +// itself with values from environment variables ([FromEnv]), and has automatic +// API version negotiation enabled ([WithAPIVersionNegotiation]). // // cli, err := client.NewClientWithOpts( // client.FromEnv, @@ -179,23 +203,43 @@ func NewClientWithOpts(ops ...Opt) (*Client, error) { } } + if tr, ok := c.client.Transport.(*http.Transport); ok { + // Store the base transport before we wrap it in tracing libs below + // This is used, as an example, to close idle connections when the client is closed + c.baseTransport = tr + } + if c.scheme == "" { - c.scheme = "http" - - tlsConfig := resolveTLSConfig(c.client.Transport) - if tlsConfig != nil { - // TODO(stevvooe): This isn't really the right way to write clients in Go. - // `NewClient` should probably only take an `*http.Client` and work from there. - // Unfortunately, the model of having a host-ish/url-thingy as the connection - // string has us confusing protocol and transport layers. We continue doing - // this to avoid breaking existing clients but this should be addressed. + // TODO(stevvooe): This isn't really the right way to write clients in Go. + // `NewClient` should probably only take an `*http.Client` and work from there. + // Unfortunately, the model of having a host-ish/url-thingy as the connection + // string has us confusing protocol and transport layers. We continue doing + // this to avoid breaking existing clients but this should be addressed. + if c.tlsConfig() != nil { c.scheme = "https" + } else { + c.scheme = "http" } } + c.client.Transport = otelhttp.NewTransport( + c.client.Transport, + otelhttp.WithTracerProvider(c.tp), + otelhttp.WithSpanNameFormatter(func(_ string, req *http.Request) string { + return req.Method + " " + req.URL.Path + }), + ) + return c, nil } +func (cli *Client) tlsConfig() *tls.Config { + if cli.baseTransport == nil { + return nil + } + return cli.baseTransport.TLSClientConfig +} + func defaultHTTPClient(hostURL *url.URL) (*http.Client, error) { transport := &http.Transport{} err := sockets.ConfigureTransport(transport, hostURL.Scheme, hostURL.Host) @@ -210,19 +254,28 @@ func defaultHTTPClient(hostURL *url.URL) (*http.Client, error) { // Close the transport used by the client func (cli *Client) Close() error { - if t, ok := cli.client.Transport.(*http.Transport); ok { - t.CloseIdleConnections() + if cli.baseTransport != nil { + cli.baseTransport.CloseIdleConnections() + return nil } return nil } -// getAPIPath returns the versioned request path to call the api. -// It appends the query parameters to the path if they are not empty. -func (cli *Client) getAPIPath(ctx context.Context, p string, query url.Values) string { - var apiPath string +// checkVersion manually triggers API version negotiation (if configured). +// This allows for version-dependent code to use the same version as will +// be negotiated when making the actual requests, and for which cases +// we cannot do the negotiation lazily. +func (cli *Client) checkVersion(ctx context.Context) { if cli.negotiateVersion && !cli.negotiated { cli.NegotiateAPIVersion(ctx) } +} + +// getAPIPath returns the versioned request path to call the API. +// It appends the query parameters to the path if they are not empty. +func (cli *Client) getAPIPath(ctx context.Context, p string, query url.Values) string { + var apiPath string + cli.checkVersion(ctx) if cli.version != "" { v := strings.TrimPrefix(cli.version, "v") apiPath = path.Join(cli.basePath, "/v"+v, p) @@ -244,8 +297,8 @@ func (cli *Client) ClientVersion() string { // by the client, it uses the client's maximum version. // // If a manual override is in place, either through the "DOCKER_API_VERSION" -// (EnvOverrideAPIVersion) environment variable, or if the client is initialized -// with a fixed version (WithVersion(xx)), no negotiation is performed. +// ([EnvOverrideAPIVersion]) environment variable, or if the client is initialized +// with a fixed version ([WithVersion]), no negotiation is performed. // // If the API server's ping response does not contain an API version, or if the // client did not get a successful ping response, it assumes it is connected with @@ -265,8 +318,8 @@ func (cli *Client) NegotiateAPIVersion(ctx context.Context) { // version. // // If a manual override is in place, either through the "DOCKER_API_VERSION" -// (EnvOverrideAPIVersion) environment variable, or if the client is initialized -// with a fixed version (WithVersion(xx)), no negotiation is performed. +// ([EnvOverrideAPIVersion]) environment variable, or if the client is initialized +// with a fixed version ([WithVersion]), no negotiation is performed. // // If the API server's ping response does not contain an API version, we assume // we are connected with an old daemon without API version negotiation support, @@ -283,7 +336,7 @@ func (cli *Client) NegotiateAPIVersionPing(pingResponse types.Ping) { func (cli *Client) negotiateAPIVersionPing(pingResponse types.Ping) { // default to the latest version before versioning headers existed if pingResponse.APIVersion == "" { - pingResponse.APIVersion = "1.24" + pingResponse.APIVersion = fallbackAPIVersion } // if the client is not initialized with a version, start with the latest supported version @@ -338,17 +391,40 @@ func ParseHostURL(host string) (*url.URL, error) { }, nil } +func (cli *Client) dialerFromTransport() func(context.Context, string, string) (net.Conn, error) { + if cli.baseTransport == nil || cli.baseTransport.DialContext == nil { + return nil + } + + if cli.baseTransport.TLSClientConfig != nil { + // When using a tls config we don't use the configured dialer but instead a fallback dialer... + // Note: It seems like this should use the normal dialer and wrap the returned net.Conn in a tls.Conn + // I honestly don't know why it doesn't do that, but it doesn't and such a change is entirely unrelated to the change in this commit. + return nil + } + return cli.baseTransport.DialContext +} + // Dialer returns a dialer for a raw stream connection, with an HTTP/1.1 header, -// that can be used for proxying the daemon connection. +// that can be used for proxying the daemon connection. It is used by +// ["docker dial-stdio"]. // -// Used by `docker dial-stdio` (docker/cli#889). +// ["docker dial-stdio"]: https://github.com/docker/cli/pull/1014 func (cli *Client) Dialer() func(context.Context) (net.Conn, error) { return func(ctx context.Context) (net.Conn, error) { - if transport, ok := cli.client.Transport.(*http.Transport); ok { - if transport.DialContext != nil && transport.TLSClientConfig == nil { - return transport.DialContext(ctx, cli.proto, cli.addr) + if dialFn := cli.dialerFromTransport(); dialFn != nil { + return dialFn(ctx, cli.proto, cli.addr) + } + switch cli.proto { + case "unix": + return net.Dial(cli.proto, cli.addr) + case "npipe": + return sockets.DialPipe(cli.addr, 32*time.Second) + default: + if tlsConfig := cli.tlsConfig(); tlsConfig != nil { + return tls.Dial(cli.proto, cli.addr, tlsConfig) } + return net.Dial(cli.proto, cli.addr) } - return fallbackDial(cli.proto, cli.addr, resolveTLSConfig(cli.client.Transport)) } } diff --git a/vendor/github.com/docker/docker/client/client_unix.go b/vendor/github.com/docker/docker/client/client_unix.go index 319b738d3..9fe78ea43 100644 --- a/vendor/github.com/docker/docker/client/client_unix.go +++ b/vendor/github.com/docker/docker/client/client_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package client // import "github.com/docker/docker/client" diff --git a/vendor/github.com/docker/docker/client/config_create.go b/vendor/github.com/docker/docker/client/config_create.go index f6b1881fc..3deb4a8e2 100644 --- a/vendor/github.com/docker/docker/client/config_create.go +++ b/vendor/github.com/docker/docker/client/config_create.go @@ -11,7 +11,7 @@ import ( // ConfigCreate creates a new config. func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) { var response types.ConfigCreateResponse - if err := cli.NewVersionError("1.30", "config create"); err != nil { + if err := cli.NewVersionError(ctx, "1.30", "config create"); err != nil { return response, err } resp, err := cli.post(ctx, "/configs/create", nil, config, nil) diff --git a/vendor/github.com/docker/docker/client/config_inspect.go b/vendor/github.com/docker/docker/client/config_inspect.go index 9be7882c3..2c6c7cb36 100644 --- a/vendor/github.com/docker/docker/client/config_inspect.go +++ b/vendor/github.com/docker/docker/client/config_inspect.go @@ -14,7 +14,7 @@ func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.C if id == "" { return swarm.Config{}, nil, objectNotFoundError{object: "config", id: id} } - if err := cli.NewVersionError("1.30", "config inspect"); err != nil { + if err := cli.NewVersionError(ctx, "1.30", "config inspect"); err != nil { return swarm.Config{}, nil, err } resp, err := cli.get(ctx, "/configs/"+id, nil, nil) diff --git a/vendor/github.com/docker/docker/client/config_list.go b/vendor/github.com/docker/docker/client/config_list.go index 565acc6e2..14dd3813e 100644 --- a/vendor/github.com/docker/docker/client/config_list.go +++ b/vendor/github.com/docker/docker/client/config_list.go @@ -12,7 +12,7 @@ import ( // ConfigList returns the list of configs. func (cli *Client) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) { - if err := cli.NewVersionError("1.30", "config list"); err != nil { + if err := cli.NewVersionError(ctx, "1.30", "config list"); err != nil { return nil, err } query := url.Values{} diff --git a/vendor/github.com/docker/docker/client/config_remove.go b/vendor/github.com/docker/docker/client/config_remove.go index 24b94e9c1..d05b0113a 100644 --- a/vendor/github.com/docker/docker/client/config_remove.go +++ b/vendor/github.com/docker/docker/client/config_remove.go @@ -4,7 +4,7 @@ import "context" // ConfigRemove removes a config. func (cli *Client) ConfigRemove(ctx context.Context, id string) error { - if err := cli.NewVersionError("1.30", "config remove"); err != nil { + if err := cli.NewVersionError(ctx, "1.30", "config remove"); err != nil { return err } resp, err := cli.delete(ctx, "/configs/"+id, nil, nil) diff --git a/vendor/github.com/docker/docker/client/config_update.go b/vendor/github.com/docker/docker/client/config_update.go index 1ac298543..6995861df 100644 --- a/vendor/github.com/docker/docker/client/config_update.go +++ b/vendor/github.com/docker/docker/client/config_update.go @@ -9,7 +9,7 @@ import ( // ConfigUpdate attempts to update a config func (cli *Client) ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error { - if err := cli.NewVersionError("1.30", "config update"); err != nil { + if err := cli.NewVersionError(ctx, "1.30", "config update"); err != nil { return err } query := url.Values{} diff --git a/vendor/github.com/docker/docker/client/container_attach.go b/vendor/github.com/docker/docker/client/container_attach.go index ba92117d3..6a32e5f66 100644 --- a/vendor/github.com/docker/docker/client/container_attach.go +++ b/vendor/github.com/docker/docker/client/container_attach.go @@ -2,9 +2,11 @@ package client // import "github.com/docker/docker/client" import ( "context" + "net/http" "net/url" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" ) // ContainerAttach attaches a connection to a container in the server. @@ -31,7 +33,7 @@ import ( // // You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this // stream. -func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) { +func (cli *Client) ContainerAttach(ctx context.Context, container string, options container.AttachOptions) (types.HijackedResponse, error) { query := url.Values{} if options.Stream { query.Set("stream", "1") @@ -52,8 +54,7 @@ func (cli *Client) ContainerAttach(ctx context.Context, container string, option query.Set("logs", "1") } - headers := map[string][]string{ + return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, http.Header{ "Content-Type": {"text/plain"}, - } - return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, headers) + }) } diff --git a/vendor/github.com/docker/docker/client/container_commit.go b/vendor/github.com/docker/docker/client/container_commit.go index cd7f76346..26b3f0915 100644 --- a/vendor/github.com/docker/docker/client/container_commit.go +++ b/vendor/github.com/docker/docker/client/container_commit.go @@ -6,12 +6,13 @@ import ( "errors" "net/url" - "github.com/docker/distribution/reference" + "github.com/distribution/reference" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" ) // ContainerCommit applies changes to a container and creates a new tagged image. -func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) { +func (cli *Client) ContainerCommit(ctx context.Context, container string, options container.CommitOptions) (types.IDResponse, error) { var repository, tag string if options.Reference != "" { ref, err := reference.ParseNormalizedNamed(options.Reference) diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go index 193a2bb56..409f5b492 100644 --- a/vendor/github.com/docker/docker/client/container_create.go +++ b/vendor/github.com/docker/docker/client/container_create.go @@ -23,10 +23,23 @@ type configWrapper struct { func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *ocispec.Platform, containerName string) (container.CreateResponse, error) { var response container.CreateResponse - if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil { + // Make sure we negotiated (if the client is configured to do so), + // as code below contains API-version specific handling of options. + // + // Normally, version-negotiation (if enabled) would not happen until + // the API request is made. + cli.checkVersion(ctx) + + if err := cli.NewVersionError(ctx, "1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil { + return response, err + } + if err := cli.NewVersionError(ctx, "1.41", "specify container image platform"); platform != nil && err != nil { return response, err } - if err := cli.NewVersionError("1.41", "specify container image platform"); platform != nil && err != nil { + if err := cli.NewVersionError(ctx, "1.44", "specify health-check start interval"); config != nil && config.Healthcheck != nil && config.Healthcheck.StartInterval != 0 && err != nil { + return response, err + } + if err := cli.NewVersionError(ctx, "1.44", "specify mac-address per network"); hasEndpointSpecificMacAddress(networkingConfig) && err != nil { return response, err } @@ -45,6 +58,11 @@ func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config } } + // Since API 1.44, the container-wide MacAddress is deprecated and will trigger a WARNING if it's specified. + if versions.GreaterThanOrEqualTo(cli.ClientVersion(), "1.44") { + config.MacAddress = "" //nolint:staticcheck // ignore SA1019: field is deprecated, but still used on API < v1.44. + } + query := url.Values{} if p := formatPlatform(platform); p != "" { query.Set("platform", p) @@ -81,3 +99,16 @@ func formatPlatform(platform *ocispec.Platform) string { } return path.Join(platform.OS, platform.Architecture, platform.Variant) } + +// hasEndpointSpecificMacAddress checks whether one of the endpoint in networkingConfig has a MacAddress defined. +func hasEndpointSpecificMacAddress(networkingConfig *network.NetworkingConfig) bool { + if networkingConfig == nil { + return false + } + for _, endpoint := range networkingConfig.EndpointsConfig { + if endpoint.MacAddress != "" { + return true + } + } + return false +} diff --git a/vendor/github.com/docker/docker/client/container_exec.go b/vendor/github.com/docker/docker/client/container_exec.go index 6a2cb006f..3fff0c828 100644 --- a/vendor/github.com/docker/docker/client/container_exec.go +++ b/vendor/github.com/docker/docker/client/container_exec.go @@ -3,6 +3,7 @@ package client // import "github.com/docker/docker/client" import ( "context" "encoding/json" + "net/http" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/versions" @@ -12,7 +13,14 @@ import ( func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) { var response types.IDResponse - if err := cli.NewVersionError("1.25", "env"); len(config.Env) != 0 && err != nil { + // Make sure we negotiated (if the client is configured to do so), + // as code below contains API-version specific handling of options. + // + // Normally, version-negotiation (if enabled) would not happen until + // the API request is made. + cli.checkVersion(ctx) + + if err := cli.NewVersionError(ctx, "1.25", "env"); len(config.Env) != 0 && err != nil { return response, err } if versions.LessThan(cli.ClientVersion(), "1.42") { @@ -46,10 +54,9 @@ func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, confi if versions.LessThan(cli.ClientVersion(), "1.42") { config.ConsoleSize = nil } - headers := map[string][]string{ + return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, http.Header{ "Content-Type": {"application/json"}, - } - return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers) + }) } // ContainerExecInspect returns information about a specific exec process on the docker host. diff --git a/vendor/github.com/docker/docker/client/container_list.go b/vendor/github.com/docker/docker/client/container_list.go index bd491b3db..782e1b3c6 100644 --- a/vendor/github.com/docker/docker/client/container_list.go +++ b/vendor/github.com/docker/docker/client/container_list.go @@ -7,11 +7,12 @@ import ( "strconv" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" ) // ContainerList returns the list of containers in the docker host. -func (cli *Client) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) { +func (cli *Client) ContainerList(ctx context.Context, options container.ListOptions) ([]types.Container, error) { query := url.Values{} if options.All { @@ -37,7 +38,6 @@ func (cli *Client) ContainerList(ctx context.Context, options types.ContainerLis if options.Filters.Len() > 0 { //nolint:staticcheck // ignore SA1019 for old code filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) - if err != nil { return nil, err } diff --git a/vendor/github.com/docker/docker/client/container_logs.go b/vendor/github.com/docker/docker/client/container_logs.go index 9bdf2b0fa..61197d840 100644 --- a/vendor/github.com/docker/docker/client/container_logs.go +++ b/vendor/github.com/docker/docker/client/container_logs.go @@ -6,7 +6,7 @@ import ( "net/url" "time" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" timetypes "github.com/docker/docker/api/types/time" "github.com/pkg/errors" ) @@ -33,7 +33,7 @@ import ( // // You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this // stream. -func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) { +func (cli *Client) ContainerLogs(ctx context.Context, container string, options container.LogsOptions) (io.ReadCloser, error) { query := url.Values{} if options.ShowStdout { query.Set("stdout", "1") diff --git a/vendor/github.com/docker/docker/client/container_prune.go b/vendor/github.com/docker/docker/client/container_prune.go index 04383deaa..ca5092384 100644 --- a/vendor/github.com/docker/docker/client/container_prune.go +++ b/vendor/github.com/docker/docker/client/container_prune.go @@ -13,7 +13,7 @@ import ( func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) { var report types.ContainersPruneReport - if err := cli.NewVersionError("1.25", "container prune"); err != nil { + if err := cli.NewVersionError(ctx, "1.25", "container prune"); err != nil { return report, err } diff --git a/vendor/github.com/docker/docker/client/container_remove.go b/vendor/github.com/docker/docker/client/container_remove.go index c21de609b..39f7b106a 100644 --- a/vendor/github.com/docker/docker/client/container_remove.go +++ b/vendor/github.com/docker/docker/client/container_remove.go @@ -4,11 +4,11 @@ import ( "context" "net/url" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" ) // ContainerRemove kills and removes a container from the docker host. -func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) error { +func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options container.RemoveOptions) error { query := url.Values{} if options.RemoveVolumes { query.Set("v", "1") diff --git a/vendor/github.com/docker/docker/client/container_resize.go b/vendor/github.com/docker/docker/client/container_resize.go index a9d4c0c79..5cfd01d47 100644 --- a/vendor/github.com/docker/docker/client/container_resize.go +++ b/vendor/github.com/docker/docker/client/container_resize.go @@ -5,16 +5,16 @@ import ( "net/url" "strconv" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" ) // ContainerResize changes the size of the tty for a container. -func (cli *Client) ContainerResize(ctx context.Context, containerID string, options types.ResizeOptions) error { +func (cli *Client) ContainerResize(ctx context.Context, containerID string, options container.ResizeOptions) error { return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width) } // ContainerExecResize changes the size of the tty for an exec process running inside a container. -func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error { +func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options container.ResizeOptions) error { return cli.resize(ctx, "/exec/"+execID, options.Height, options.Width) } diff --git a/vendor/github.com/docker/docker/client/container_restart.go b/vendor/github.com/docker/docker/client/container_restart.go index 1e0ad9998..825d3e4e9 100644 --- a/vendor/github.com/docker/docker/client/container_restart.go +++ b/vendor/github.com/docker/docker/client/container_restart.go @@ -17,8 +17,16 @@ func (cli *Client) ContainerRestart(ctx context.Context, containerID string, opt if options.Timeout != nil { query.Set("t", strconv.Itoa(*options.Timeout)) } - if options.Signal != "" && versions.GreaterThanOrEqualTo(cli.version, "1.42") { - query.Set("signal", options.Signal) + if options.Signal != "" { + // Make sure we negotiated (if the client is configured to do so), + // as code below contains API-version specific handling of options. + // + // Normally, version-negotiation (if enabled) would not happen until + // the API request is made. + cli.checkVersion(ctx) + if versions.GreaterThanOrEqualTo(cli.version, "1.42") { + query.Set("signal", options.Signal) + } } resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil) ensureReaderClosed(resp) diff --git a/vendor/github.com/docker/docker/client/container_start.go b/vendor/github.com/docker/docker/client/container_start.go index c2e0b15dc..33ba85f24 100644 --- a/vendor/github.com/docker/docker/client/container_start.go +++ b/vendor/github.com/docker/docker/client/container_start.go @@ -4,11 +4,11 @@ import ( "context" "net/url" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" ) // ContainerStart sends a request to the docker daemon to start a container. -func (cli *Client) ContainerStart(ctx context.Context, containerID string, options types.ContainerStartOptions) error { +func (cli *Client) ContainerStart(ctx context.Context, containerID string, options container.StartOptions) error { query := url.Values{} if len(options.CheckpointID) != 0 { query.Set("checkpoint", options.CheckpointID) diff --git a/vendor/github.com/docker/docker/client/container_stats.go b/vendor/github.com/docker/docker/client/container_stats.go index 0a6488dde..3fabb75f3 100644 --- a/vendor/github.com/docker/docker/client/container_stats.go +++ b/vendor/github.com/docker/docker/client/container_stats.go @@ -21,8 +21,10 @@ func (cli *Client) ContainerStats(ctx context.Context, containerID string, strea return types.ContainerStats{}, err } - osType := getDockerOS(resp.header.Get("Server")) - return types.ContainerStats{Body: resp.body, OSType: osType}, err + return types.ContainerStats{ + Body: resp.body, + OSType: getDockerOS(resp.header.Get("Server")), + }, nil } // ContainerStatsOneShot gets a single stat entry from a container. @@ -37,6 +39,8 @@ func (cli *Client) ContainerStatsOneShot(ctx context.Context, containerID string return types.ContainerStats{}, err } - osType := getDockerOS(resp.header.Get("Server")) - return types.ContainerStats{Body: resp.body, OSType: osType}, err + return types.ContainerStats{ + Body: resp.body, + OSType: getDockerOS(resp.header.Get("Server")), + }, nil } diff --git a/vendor/github.com/docker/docker/client/container_stop.go b/vendor/github.com/docker/docker/client/container_stop.go index 2a43ce227..ac0cab69d 100644 --- a/vendor/github.com/docker/docker/client/container_stop.go +++ b/vendor/github.com/docker/docker/client/container_stop.go @@ -21,8 +21,16 @@ func (cli *Client) ContainerStop(ctx context.Context, containerID string, option if options.Timeout != nil { query.Set("t", strconv.Itoa(*options.Timeout)) } - if options.Signal != "" && versions.GreaterThanOrEqualTo(cli.version, "1.42") { - query.Set("signal", options.Signal) + if options.Signal != "" { + // Make sure we negotiated (if the client is configured to do so), + // as code below contains API-version specific handling of options. + // + // Normally, version-negotiation (if enabled) would not happen until + // the API request is made. + cli.checkVersion(ctx) + if versions.GreaterThanOrEqualTo(cli.version, "1.42") { + query.Set("signal", options.Signal) + } } resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil) ensureReaderClosed(resp) diff --git a/vendor/github.com/docker/docker/client/container_wait.go b/vendor/github.com/docker/docker/client/container_wait.go index 2375eb1e8..b8d3bdef0 100644 --- a/vendor/github.com/docker/docker/client/container_wait.go +++ b/vendor/github.com/docker/docker/client/container_wait.go @@ -30,6 +30,12 @@ const containerWaitErrorMsgLimit = 2 * 1024 /* Max: 2KiB */ // synchronize ContainerWait with other calls, such as specifying a // "next-exit" condition before issuing a ContainerStart request. func (cli *Client) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.WaitResponse, <-chan error) { + // Make sure we negotiated (if the client is configured to do so), + // as code below contains API-version specific handling of options. + // + // Normally, version-negotiation (if enabled) would not happen until + // the API request is made. + cli.checkVersion(ctx) if versions.LessThan(cli.ClientVersion(), "1.30") { return cli.legacyContainerWait(ctx, containerID) } @@ -66,8 +72,12 @@ func (cli *Client) ContainerWait(ctx context.Context, containerID string, condit // // If there's a JSON parsing error, read the real error message // off the body and send it to the client. - _, _ = io.ReadAll(io.LimitReader(stream, containerWaitErrorMsgLimit)) - errC <- errors.New(responseText.String()) + if errors.As(err, new(*json.SyntaxError)) { + _, _ = io.ReadAll(io.LimitReader(stream, containerWaitErrorMsgLimit)) + errC <- errors.New(responseText.String()) + } else { + errC <- err + } return } diff --git a/vendor/github.com/docker/docker/client/distribution_inspect.go b/vendor/github.com/docker/docker/client/distribution_inspect.go index efab066d3..68ef31b78 100644 --- a/vendor/github.com/docker/docker/client/distribution_inspect.go +++ b/vendor/github.com/docker/docker/client/distribution_inspect.go @@ -3,6 +3,7 @@ package client // import "github.com/docker/docker/client" import ( "context" "encoding/json" + "net/http" "net/url" "github.com/docker/docker/api/types/registry" @@ -16,13 +17,13 @@ func (cli *Client) DistributionInspect(ctx context.Context, image, encodedRegist return distributionInspect, objectNotFoundError{object: "distribution", id: image} } - if err := cli.NewVersionError("1.30", "distribution inspect"); err != nil { + if err := cli.NewVersionError(ctx, "1.30", "distribution inspect"); err != nil { return distributionInspect, err } - var headers map[string][]string + var headers http.Header if encodedRegistryAuth != "" { - headers = map[string][]string{ + headers = http.Header{ registry.AuthHeader: {encodedRegistryAuth}, } } diff --git a/vendor/github.com/docker/docker/client/errors.go b/vendor/github.com/docker/docker/client/errors.go index 6878144c4..4b96b0208 100644 --- a/vendor/github.com/docker/docker/client/errors.go +++ b/vendor/github.com/docker/docker/client/errors.go @@ -1,6 +1,7 @@ package client // import "github.com/docker/docker/client" import ( + "context" "fmt" "github.com/docker/docker/api/types/versions" @@ -31,20 +32,10 @@ func ErrorConnectionFailed(host string) error { return errConnectionFailed{host: host} } -// Deprecated: use the errdefs.NotFound() interface instead. Kept for backward compatibility -type notFound interface { - error - NotFound() bool -} - // IsErrNotFound returns true if the error is a NotFound error, which is returned -// by the API when some object is not found. +// by the API when some object is not found. It is an alias for [errdefs.IsNotFound]. func IsErrNotFound(err error) bool { - if errdefs.IsNotFound(err) { - return true - } - var e notFound - return errors.As(err, &e) + return errdefs.IsNotFound(err) } type objectNotFoundError struct { @@ -58,9 +49,18 @@ func (e objectNotFoundError) Error() string { return fmt.Sprintf("Error: No such %s: %s", e.object, e.id) } -// NewVersionError returns an error if the APIVersion required -// if less than the current supported version -func (cli *Client) NewVersionError(APIrequired, feature string) error { +// NewVersionError returns an error if the APIVersion required is less than the +// current supported version. +// +// It performs API-version negotiation if the Client is configured with this +// option, otherwise it assumes the latest API version is used. +func (cli *Client) NewVersionError(ctx context.Context, APIrequired, feature string) error { + // Make sure we negotiated (if the client is configured to do so), + // as code below contains API-version specific handling of options. + // + // Normally, version-negotiation (if enabled) would not happen until + // the API request is made. + cli.checkVersion(ctx) if cli.version != "" && versions.LessThan(cli.version, APIrequired) { return fmt.Errorf("%q requires API version %s, but the Docker daemon API version is %s", feature, APIrequired, cli.version) } diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go index 7e84865f6..839d4c5cd 100644 --- a/vendor/github.com/docker/docker/client/hijack.go +++ b/vendor/github.com/docker/docker/client/hijack.go @@ -3,18 +3,16 @@ package client // import "github.com/docker/docker/client" import ( "bufio" "context" - "crypto/tls" "fmt" "net" "net/http" - "net/http/httputil" "net/url" "time" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/versions" - "github.com/docker/go-connections/sockets" "github.com/pkg/errors" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" ) // postHijacked sends a POST request and hijacks the connection. @@ -23,11 +21,11 @@ func (cli *Client) postHijacked(ctx context.Context, path string, query url.Valu if err != nil { return types.HijackedResponse{}, err } - req, err := cli.buildRequest(http.MethodPost, cli.getAPIPath(ctx, path, query), bodyEncoded, headers) + req, err := cli.buildRequest(ctx, http.MethodPost, cli.getAPIPath(ctx, path, query), bodyEncoded, headers) if err != nil { return types.HijackedResponse{}, err } - conn, mediaType, err := cli.setupHijackConn(ctx, req, "tcp") + conn, mediaType, err := cli.setupHijackConn(req, "tcp") if err != nil { return types.HijackedResponse{}, err } @@ -37,29 +35,18 @@ func (cli *Client) postHijacked(ctx context.Context, path string, query url.Valu // DialHijack returns a hijacked connection with negotiated protocol proto. func (cli *Client) DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) { - req, err := http.NewRequest(http.MethodPost, url, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, nil) if err != nil { return nil, err } req = cli.addHeaders(req, meta) - conn, _, err := cli.setupHijackConn(ctx, req, proto) + conn, _, err := cli.setupHijackConn(req, proto) return conn, err } -// fallbackDial is used when WithDialer() was not called. -// See cli.Dialer(). -func fallbackDial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { - if tlsConfig != nil && proto != "unix" && proto != "npipe" { - return tls.Dial(proto, addr, tlsConfig) - } - if proto == "npipe" { - return sockets.DialPipe(addr, 32*time.Second) - } - return net.Dial(proto, addr) -} - -func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto string) (net.Conn, string, error) { +func (cli *Client) setupHijackConn(req *http.Request, proto string) (_ net.Conn, _ string, retErr error) { + ctx := req.Context() req.Header.Set("Connection", "Upgrade") req.Header.Set("Upgrade", proto) @@ -68,6 +55,11 @@ func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto if err != nil { return nil, "", errors.Wrap(err, "cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") } + defer func() { + if retErr != nil { + conn.Close() + } + }() // When we set up a TCP connection for hijack, there could be long periods // of inactivity (a long running command with no output) that in certain @@ -79,35 +71,29 @@ func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto _ = tcpConn.SetKeepAlivePeriod(30 * time.Second) } - clientconn := httputil.NewClientConn(conn, nil) - defer clientconn.Close() + hc := &hijackedConn{conn, bufio.NewReader(conn)} // Server hijacks the connection, error 'connection closed' expected - resp, err := clientconn.Do(req) - - //nolint:staticcheck // ignore SA1019 for connecting to old (pre go1.8) daemons - if err != httputil.ErrPersistEOF { - if err != nil { - return nil, "", err - } - if resp.StatusCode != http.StatusSwitchingProtocols { - _ = resp.Body.Close() - return nil, "", fmt.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode) - } + resp, err := otelhttp.NewTransport(hc).RoundTrip(req) + if err != nil { + return nil, "", err + } + if resp.StatusCode != http.StatusSwitchingProtocols { + _ = resp.Body.Close() + return nil, "", fmt.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode) } - c, br := clientconn.Hijack() - if br.Buffered() > 0 { + if hc.r.Buffered() > 0 { // If there is buffered content, wrap the connection. We return an // object that implements CloseWrite if the underlying connection // implements it. - if _, ok := c.(types.CloseWriter); ok { - c = &hijackedConnCloseWriter{&hijackedConn{c, br}} + if _, ok := hc.Conn.(types.CloseWriter); ok { + conn = &hijackedConnCloseWriter{hc} } else { - c = &hijackedConn{c, br} + conn = hc } } else { - br.Reset(nil) + hc.r.Reset(nil) } var mediaType string @@ -116,7 +102,7 @@ func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto mediaType = resp.Header.Get("Content-Type") } - return c, mediaType, nil + return conn, mediaType, nil } // hijackedConn wraps a net.Conn and is returned by setupHijackConn in the case @@ -128,6 +114,13 @@ type hijackedConn struct { r *bufio.Reader } +func (c *hijackedConn) RoundTrip(req *http.Request) (*http.Response, error) { + if err := req.Write(c.Conn); err != nil { + return nil, err + } + return http.ReadResponse(c.r, req) +} + func (c *hijackedConn) Read(b []byte) (int, error) { return c.r.Read(b) } diff --git a/vendor/github.com/docker/docker/client/image_build.go b/vendor/github.com/docker/docker/client/image_build.go index d16e1d8ea..d294ddc8b 100644 --- a/vendor/github.com/docker/docker/client/image_build.go +++ b/vendor/github.com/docker/docker/client/image_build.go @@ -18,18 +18,18 @@ import ( // The Body in the response implements an io.ReadCloser and it's up to the caller to // close it. func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { - query, err := cli.imageBuildOptionsToQuery(options) + query, err := cli.imageBuildOptionsToQuery(ctx, options) if err != nil { return types.ImageBuildResponse{}, err } - headers := http.Header(make(map[string][]string)) buf, err := json.Marshal(options.AuthConfigs) if err != nil { return types.ImageBuildResponse{}, err } - headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) + headers := http.Header{} + headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) headers.Set("Content-Type", "application/x-tar") serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) @@ -37,15 +37,13 @@ func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, optio return types.ImageBuildResponse{}, err } - osType := getDockerOS(serverResp.header.Get("Server")) - return types.ImageBuildResponse{ Body: serverResp.body, - OSType: osType, + OSType: getDockerOS(serverResp.header.Get("Server")), }, nil } -func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) { +func (cli *Client) imageBuildOptionsToQuery(ctx context.Context, options types.ImageBuildOptions) (url.Values, error) { query := url.Values{ "t": options.Tags, "securityopt": options.SecurityOpt, @@ -75,7 +73,7 @@ func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (ur } if options.Squash { - if err := cli.NewVersionError("1.25", "squash"); err != nil { + if err := cli.NewVersionError(ctx, "1.25", "squash"); err != nil { return query, err } query.Set("squash", "1") @@ -125,7 +123,7 @@ func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (ur query.Set("session", options.SessionID) } if options.Platform != "" { - if err := cli.NewVersionError("1.32", "platform"); err != nil { + if err := cli.NewVersionError(ctx, "1.32", "platform"); err != nil { return query, err } query.Set("platform", strings.ToLower(options.Platform)) diff --git a/vendor/github.com/docker/docker/client/image_create.go b/vendor/github.com/docker/docker/client/image_create.go index 6a9b708f7..29cd0b437 100644 --- a/vendor/github.com/docker/docker/client/image_create.go +++ b/vendor/github.com/docker/docker/client/image_create.go @@ -3,10 +3,11 @@ package client // import "github.com/docker/docker/client" import ( "context" "io" + "net/http" "net/url" "strings" - "github.com/docker/distribution/reference" + "github.com/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/registry" ) @@ -33,6 +34,7 @@ func (cli *Client) ImageCreate(ctx context.Context, parentReference string, opti } func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { - headers := map[string][]string{registry.AuthHeader: {registryAuth}} - return cli.post(ctx, "/images/create", query, nil, headers) + return cli.post(ctx, "/images/create", query, nil, http.Header{ + registry.AuthHeader: {registryAuth}, + }) } diff --git a/vendor/github.com/docker/docker/client/image_import.go b/vendor/github.com/docker/docker/client/image_import.go index c5de42cb7..cd376a14e 100644 --- a/vendor/github.com/docker/docker/client/image_import.go +++ b/vendor/github.com/docker/docker/client/image_import.go @@ -6,7 +6,7 @@ import ( "net/url" "strings" - "github.com/docker/distribution/reference" + "github.com/distribution/reference" "github.com/docker/docker/api/types" ) diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go index 950d51333..f3f2280e3 100644 --- a/vendor/github.com/docker/docker/client/image_list.go +++ b/vendor/github.com/docker/docker/client/image_list.go @@ -7,12 +7,20 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/image" "github.com/docker/docker/api/types/versions" ) // ImageList returns a list of images in the docker host. -func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) { - var images []types.ImageSummary +func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]image.Summary, error) { + // Make sure we negotiated (if the client is configured to do so), + // as code below contains API-version specific handling of options. + // + // Normally, version-negotiation (if enabled) would not happen until + // the API request is made. + cli.checkVersion(ctx) + + var images []image.Summary query := url.Values{} optionFilters := options.Filters diff --git a/vendor/github.com/docker/docker/client/image_load.go b/vendor/github.com/docker/docker/client/image_load.go index 91016e493..c825206ea 100644 --- a/vendor/github.com/docker/docker/client/image_load.go +++ b/vendor/github.com/docker/docker/client/image_load.go @@ -3,6 +3,7 @@ package client // import "github.com/docker/docker/client" import ( "context" "io" + "net/http" "net/url" "github.com/docker/docker/api/types" @@ -17,8 +18,9 @@ func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) ( if quiet { v.Set("quiet", "1") } - headers := map[string][]string{"Content-Type": {"application/x-tar"}} - resp, err := cli.postRaw(ctx, "/images/load", v, input, headers) + resp, err := cli.postRaw(ctx, "/images/load", v, input, http.Header{ + "Content-Type": {"application/x-tar"}, + }) if err != nil { return types.ImageLoadResponse{}, err } diff --git a/vendor/github.com/docker/docker/client/image_prune.go b/vendor/github.com/docker/docker/client/image_prune.go index 56af6d7f9..6b82d6ab6 100644 --- a/vendor/github.com/docker/docker/client/image_prune.go +++ b/vendor/github.com/docker/docker/client/image_prune.go @@ -13,7 +13,7 @@ import ( func (cli *Client) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (types.ImagesPruneReport, error) { var report types.ImagesPruneReport - if err := cli.NewVersionError("1.25", "image prune"); err != nil { + if err := cli.NewVersionError(ctx, "1.25", "image prune"); err != nil { return report, err } diff --git a/vendor/github.com/docker/docker/client/image_pull.go b/vendor/github.com/docker/docker/client/image_pull.go index a23975591..d92049d58 100644 --- a/vendor/github.com/docker/docker/client/image_pull.go +++ b/vendor/github.com/docker/docker/client/image_pull.go @@ -6,7 +6,7 @@ import ( "net/url" "strings" - "github.com/docker/distribution/reference" + "github.com/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/errdefs" ) diff --git a/vendor/github.com/docker/docker/client/image_push.go b/vendor/github.com/docker/docker/client/image_push.go index dd1b8f347..6839a89e0 100644 --- a/vendor/github.com/docker/docker/client/image_push.go +++ b/vendor/github.com/docker/docker/client/image_push.go @@ -4,9 +4,10 @@ import ( "context" "errors" "io" + "net/http" "net/url" - "github.com/docker/distribution/reference" + "github.com/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/errdefs" @@ -50,6 +51,7 @@ func (cli *Client) ImagePush(ctx context.Context, image string, options types.Im } func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (serverResponse, error) { - headers := map[string][]string{registry.AuthHeader: {registryAuth}} - return cli.post(ctx, "/images/"+imageID+"/push", query, nil, headers) + return cli.post(ctx, "/images/"+imageID+"/push", query, nil, http.Header{ + registry.AuthHeader: {registryAuth}, + }) } diff --git a/vendor/github.com/docker/docker/client/image_remove.go b/vendor/github.com/docker/docker/client/image_remove.go index 6a9fb3f41..b936d2083 100644 --- a/vendor/github.com/docker/docker/client/image_remove.go +++ b/vendor/github.com/docker/docker/client/image_remove.go @@ -6,10 +6,11 @@ import ( "net/url" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/image" ) // ImageRemove removes an image from the docker host. -func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) { +func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]image.DeleteResponse, error) { query := url.Values{} if options.Force { @@ -19,7 +20,7 @@ func (cli *Client) ImageRemove(ctx context.Context, imageID string, options type query.Set("noprune", "1") } - var dels []types.ImageDeleteResponseItem + var dels []image.DeleteResponse resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) defer ensureReaderClosed(resp) if err != nil { diff --git a/vendor/github.com/docker/docker/client/image_search.go b/vendor/github.com/docker/docker/client/image_search.go index 5f0c49ed3..8971b139a 100644 --- a/vendor/github.com/docker/docker/client/image_search.go +++ b/vendor/github.com/docker/docker/client/image_search.go @@ -3,6 +3,7 @@ package client // import "github.com/docker/docker/client" import ( "context" "encoding/json" + "net/http" "net/url" "strconv" @@ -48,6 +49,7 @@ func (cli *Client) ImageSearch(ctx context.Context, term string, options types.I } func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { - headers := map[string][]string{registry.AuthHeader: {registryAuth}} - return cli.get(ctx, "/images/search", query, headers) + return cli.get(ctx, "/images/search", query, http.Header{ + registry.AuthHeader: {registryAuth}, + }) } diff --git a/vendor/github.com/docker/docker/client/image_tag.go b/vendor/github.com/docker/docker/client/image_tag.go index 5652bfc25..ea6b4a1e6 100644 --- a/vendor/github.com/docker/docker/client/image_tag.go +++ b/vendor/github.com/docker/docker/client/image_tag.go @@ -4,7 +4,7 @@ import ( "context" "net/url" - "github.com/docker/distribution/reference" + "github.com/distribution/reference" "github.com/pkg/errors" ) diff --git a/vendor/github.com/docker/docker/client/info.go b/vendor/github.com/docker/docker/client/info.go index c856704e2..cc3fcc467 100644 --- a/vendor/github.com/docker/docker/client/info.go +++ b/vendor/github.com/docker/docker/client/info.go @@ -6,12 +6,12 @@ import ( "fmt" "net/url" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/system" ) // Info returns information about the docker server. -func (cli *Client) Info(ctx context.Context) (types.Info, error) { - var info types.Info +func (cli *Client) Info(ctx context.Context) (system.Info, error) { + var info system.Info serverResp, err := cli.get(ctx, "/info", url.Values{}, nil) defer ensureReaderClosed(serverResp) if err != nil { diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/interface.go index 7993c5a48..302f5fb13 100644 --- a/vendor/github.com/docker/docker/client/interface.go +++ b/vendor/github.com/docker/docker/client/interface.go @@ -14,6 +14,7 @@ import ( "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/system" "github.com/docker/docker/api/types/volume" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -45,30 +46,30 @@ type CommonAPIClient interface { // ContainerAPIClient defines API client methods for the containers type ContainerAPIClient interface { - ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) - ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) + ContainerAttach(ctx context.Context, container string, options container.AttachOptions) (types.HijackedResponse, error) + ContainerCommit(ctx context.Context, container string, options container.CommitOptions) (types.IDResponse, error) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *ocispec.Platform, containerName string) (container.CreateResponse, error) ContainerDiff(ctx context.Context, container string) ([]container.FilesystemChange, error) ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) - ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error + ContainerExecResize(ctx context.Context, execID string, options container.ResizeOptions) error ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error ContainerExport(ctx context.Context, container string) (io.ReadCloser, error) ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error) ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (types.ContainerJSON, []byte, error) ContainerKill(ctx context.Context, container, signal string) error - ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) - ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) + ContainerList(ctx context.Context, options container.ListOptions) ([]types.Container, error) + ContainerLogs(ctx context.Context, container string, options container.LogsOptions) (io.ReadCloser, error) ContainerPause(ctx context.Context, container string) error - ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error + ContainerRemove(ctx context.Context, container string, options container.RemoveOptions) error ContainerRename(ctx context.Context, container, newContainerName string) error - ContainerResize(ctx context.Context, container string, options types.ResizeOptions) error + ContainerResize(ctx context.Context, container string, options container.ResizeOptions) error ContainerRestart(ctx context.Context, container string, options container.StopOptions) error ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error) ContainerStats(ctx context.Context, container string, stream bool) (types.ContainerStats, error) ContainerStatsOneShot(ctx context.Context, container string) (types.ContainerStats, error) - ContainerStart(ctx context.Context, container string, options types.ContainerStartOptions) error + ContainerStart(ctx context.Context, container string, options container.StartOptions) error ContainerStop(ctx context.Context, container string, options container.StopOptions) error ContainerTop(ctx context.Context, container string, arguments []string) (container.ContainerTopOKBody, error) ContainerUnpause(ctx context.Context, container string) error @@ -93,11 +94,11 @@ type ImageAPIClient interface { ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) ImageInspectWithRaw(ctx context.Context, image string) (types.ImageInspect, []byte, error) - ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) + ImageList(ctx context.Context, options types.ImageListOptions) ([]image.Summary, error) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) - ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) + ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]image.DeleteResponse, error) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) ImageSave(ctx context.Context, images []string) (io.ReadCloser, error) ImageTag(ctx context.Context, image, ref string) error @@ -140,13 +141,13 @@ type PluginAPIClient interface { // ServiceAPIClient defines API client methods for the services type ServiceAPIClient interface { - ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) + ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (swarm.ServiceCreateResponse, error) ServiceInspectWithRaw(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) ServiceRemove(ctx context.Context, serviceID string) error - ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) - ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) - TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) + ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (swarm.ServiceUpdateResponse, error) + ServiceLogs(ctx context.Context, serviceID string, options container.LogsOptions) (io.ReadCloser, error) + TaskLogs(ctx context.Context, taskID string, options container.LogsOptions) (io.ReadCloser, error) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) } @@ -165,7 +166,7 @@ type SwarmAPIClient interface { // SystemAPIClient defines API client methods for the system type SystemAPIClient interface { Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) - Info(ctx context.Context) (types.Info, error) + Info(ctx context.Context) (system.Info, error) RegistryLogin(ctx context.Context, auth registry.AuthConfig) (registry.AuthenticateOKBody, error) DiskUsage(ctx context.Context, options types.DiskUsageOptions) (types.DiskUsage, error) Ping(ctx context.Context) (types.Ping, error) diff --git a/vendor/github.com/docker/docker/client/interface_experimental.go b/vendor/github.com/docker/docker/client/interface_experimental.go index 402ffb512..c585c1045 100644 --- a/vendor/github.com/docker/docker/client/interface_experimental.go +++ b/vendor/github.com/docker/docker/client/interface_experimental.go @@ -3,7 +3,7 @@ package client // import "github.com/docker/docker/client" import ( "context" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/checkpoint" ) type apiClientExperimental interface { @@ -12,7 +12,7 @@ type apiClientExperimental interface { // CheckpointAPIClient defines API client methods for the checkpoints type CheckpointAPIClient interface { - CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error - CheckpointDelete(ctx context.Context, container string, options types.CheckpointDeleteOptions) error - CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) + CheckpointCreate(ctx context.Context, container string, options checkpoint.CreateOptions) error + CheckpointDelete(ctx context.Context, container string, options checkpoint.DeleteOptions) error + CheckpointList(ctx context.Context, container string, options checkpoint.ListOptions) ([]checkpoint.Summary, error) } diff --git a/vendor/github.com/docker/docker/client/network_create.go b/vendor/github.com/docker/docker/client/network_create.go index 278d9383a..668e87d65 100644 --- a/vendor/github.com/docker/docker/client/network_create.go +++ b/vendor/github.com/docker/docker/client/network_create.go @@ -5,14 +5,26 @@ import ( "encoding/json" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" ) // NetworkCreate creates a new network in the docker host. func (cli *Client) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) { + // Make sure we negotiated (if the client is configured to do so), + // as code below contains API-version specific handling of options. + // + // Normally, version-negotiation (if enabled) would not happen until + // the API request is made. + cli.checkVersion(ctx) + networkCreateRequest := types.NetworkCreateRequest{ NetworkCreate: options, Name: name, } + if versions.LessThan(cli.version, "1.44") { + networkCreateRequest.CheckDuplicate = true //nolint:staticcheck // ignore SA1019: CheckDuplicate is deprecated since API v1.44. + } + var response types.NetworkCreateResponse serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil) defer ensureReaderClosed(serverResp) diff --git a/vendor/github.com/docker/docker/client/network_prune.go b/vendor/github.com/docker/docker/client/network_prune.go index cebb18821..7b5f831ef 100644 --- a/vendor/github.com/docker/docker/client/network_prune.go +++ b/vendor/github.com/docker/docker/client/network_prune.go @@ -13,7 +13,7 @@ import ( func (cli *Client) NetworksPrune(ctx context.Context, pruneFilters filters.Args) (types.NetworksPruneReport, error) { var report types.NetworksPruneReport - if err := cli.NewVersionError("1.25", "network prune"); err != nil { + if err := cli.NewVersionError(ctx, "1.25", "network prune"); err != nil { return report, err } diff --git a/vendor/github.com/docker/docker/client/node_list.go b/vendor/github.com/docker/docker/client/node_list.go index c212906bc..1a9e6bfb1 100644 --- a/vendor/github.com/docker/docker/client/node_list.go +++ b/vendor/github.com/docker/docker/client/node_list.go @@ -16,7 +16,6 @@ func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) if options.Filters.Len() > 0 { filterJSON, err := filters.ToJSON(options.Filters) - if err != nil { return nil, err } diff --git a/vendor/github.com/docker/docker/client/options.go b/vendor/github.com/docker/docker/client/options.go index 099ad4184..ddb0ca399 100644 --- a/vendor/github.com/docker/docker/client/options.go +++ b/vendor/github.com/docker/docker/client/options.go @@ -11,25 +11,25 @@ import ( "github.com/docker/go-connections/sockets" "github.com/docker/go-connections/tlsconfig" "github.com/pkg/errors" + "go.opentelemetry.io/otel/trace" ) -// Opt is a configuration option to initialize a client +// Opt is a configuration option to initialize a [Client]. type Opt func(*Client) error -// FromEnv configures the client with values from environment variables. +// FromEnv configures the client with values from environment variables. It +// is the equivalent of using the [WithTLSClientConfigFromEnv], [WithHostFromEnv], +// and [WithVersionFromEnv] options. // // FromEnv uses the following environment variables: // -// DOCKER_HOST (EnvOverrideHost) to set the URL to the docker server. -// -// DOCKER_API_VERSION (EnvOverrideAPIVersion) to set the version of the API to -// use, leave empty for latest. -// -// DOCKER_CERT_PATH (EnvOverrideCertPath) to specify the directory from which to -// load the TLS certificates (ca.pem, cert.pem, key.pem). -// -// DOCKER_TLS_VERIFY (EnvTLSVerify) to enable or disable TLS verification (off by -// default). +// - DOCKER_HOST ([EnvOverrideHost]) to set the URL to the docker server. +// - DOCKER_API_VERSION ([EnvOverrideAPIVersion]) to set the version of the +// API to use, leave empty for latest. +// - DOCKER_CERT_PATH ([EnvOverrideCertPath]) to specify the directory from +// which to load the TLS certificates ("ca.pem", "cert.pem", "key.pem'). +// - DOCKER_TLS_VERIFY ([EnvTLSVerify]) to enable or disable TLS verification +// (off by default). func FromEnv(c *Client) error { ops := []Opt{ WithTLSClientConfigFromEnv(), @@ -45,7 +45,8 @@ func FromEnv(c *Client) error { } // WithDialContext applies the dialer to the client transport. This can be -// used to set the Timeout and KeepAlive settings of the client. +// used to set the Timeout and KeepAlive settings of the client. It returns +// an error if the client does not have a [http.Transport] configured. func WithDialContext(dialContext func(ctx context.Context, network, addr string) (net.Conn, error)) Opt { return func(c *Client) error { if transport, ok := c.client.Transport.(*http.Transport); ok { @@ -75,7 +76,7 @@ func WithHost(host string) Opt { } // WithHostFromEnv overrides the client host with the host specified in the -// DOCKER_HOST (EnvOverrideHost) environment variable. If DOCKER_HOST is not set, +// DOCKER_HOST ([EnvOverrideHost]) environment variable. If DOCKER_HOST is not set, // or set to an empty value, the host is not modified. func WithHostFromEnv() Opt { return func(c *Client) error { @@ -86,7 +87,7 @@ func WithHostFromEnv() Opt { } } -// WithHTTPClient overrides the client http client with the specified one +// WithHTTPClient overrides the client's HTTP client with the specified one. func WithHTTPClient(client *http.Client) Opt { return func(c *Client) error { if client != nil { @@ -96,7 +97,7 @@ func WithHTTPClient(client *http.Client) Opt { } } -// WithTimeout configures the time limit for requests made by the HTTP client +// WithTimeout configures the time limit for requests made by the HTTP client. func WithTimeout(timeout time.Duration) Opt { return func(c *Client) error { c.client.Timeout = timeout @@ -104,7 +105,19 @@ func WithTimeout(timeout time.Duration) Opt { } } -// WithHTTPHeaders overrides the client default http headers +// WithUserAgent configures the User-Agent header to use for HTTP requests. +// It overrides any User-Agent set in headers. When set to an empty string, +// the User-Agent header is removed, and no header is sent. +func WithUserAgent(ua string) Opt { + return func(c *Client) error { + c.userAgent = &ua + return nil + } +} + +// WithHTTPHeaders appends custom HTTP headers to the client's default headers. +// It does not allow for built-in headers (such as "User-Agent", if set) to +// be overridden. Also see [WithUserAgent]. func WithHTTPHeaders(headers map[string]string) Opt { return func(c *Client) error { c.customHTTPHeaders = headers @@ -112,7 +125,7 @@ func WithHTTPHeaders(headers map[string]string) Opt { } } -// WithScheme overrides the client scheme with the specified one +// WithScheme overrides the client scheme with the specified one. func WithScheme(scheme string) Opt { return func(c *Client) error { c.scheme = scheme @@ -120,51 +133,50 @@ func WithScheme(scheme string) Opt { } } -// WithTLSClientConfig applies a tls config to the client transport. +// WithTLSClientConfig applies a TLS config to the client transport. func WithTLSClientConfig(cacertPath, certPath, keyPath string) Opt { return func(c *Client) error { - opts := tlsconfig.Options{ + transport, ok := c.client.Transport.(*http.Transport) + if !ok { + return errors.Errorf("cannot apply tls config to transport: %T", c.client.Transport) + } + config, err := tlsconfig.Client(tlsconfig.Options{ CAFile: cacertPath, CertFile: certPath, KeyFile: keyPath, ExclusiveRootPools: true, - } - config, err := tlsconfig.Client(opts) + }) if err != nil { return errors.Wrap(err, "failed to create tls config") } - if transport, ok := c.client.Transport.(*http.Transport); ok { - transport.TLSClientConfig = config - return nil - } - return errors.Errorf("cannot apply tls config to transport: %T", c.client.Transport) + transport.TLSClientConfig = config + return nil } } // WithTLSClientConfigFromEnv configures the client's TLS settings with the -// settings in the DOCKER_CERT_PATH and DOCKER_TLS_VERIFY environment variables. -// If DOCKER_CERT_PATH is not set or empty, TLS configuration is not modified. +// settings in the DOCKER_CERT_PATH ([EnvOverrideCertPath]) and DOCKER_TLS_VERIFY +// ([EnvTLSVerify]) environment variables. If DOCKER_CERT_PATH is not set or empty, +// TLS configuration is not modified. // // WithTLSClientConfigFromEnv uses the following environment variables: // -// DOCKER_CERT_PATH (EnvOverrideCertPath) to specify the directory from which to -// load the TLS certificates (ca.pem, cert.pem, key.pem). -// -// DOCKER_TLS_VERIFY (EnvTLSVerify) to enable or disable TLS verification (off by -// default). +// - DOCKER_CERT_PATH ([EnvOverrideCertPath]) to specify the directory from +// which to load the TLS certificates ("ca.pem", "cert.pem", "key.pem"). +// - DOCKER_TLS_VERIFY ([EnvTLSVerify]) to enable or disable TLS verification +// (off by default). func WithTLSClientConfigFromEnv() Opt { return func(c *Client) error { dockerCertPath := os.Getenv(EnvOverrideCertPath) if dockerCertPath == "" { return nil } - options := tlsconfig.Options{ + tlsc, err := tlsconfig.Client(tlsconfig.Options{ CAFile: filepath.Join(dockerCertPath, "ca.pem"), CertFile: filepath.Join(dockerCertPath, "cert.pem"), KeyFile: filepath.Join(dockerCertPath, "key.pem"), InsecureSkipVerify: os.Getenv(EnvTLSVerify) == "", - } - tlsc, err := tlsconfig.Client(options) + }) if err != nil { return err } @@ -178,7 +190,8 @@ func WithTLSClientConfigFromEnv() Opt { } // WithVersion overrides the client version with the specified one. If an empty -// version is specified, the value will be ignored to allow version negotiation. +// version is provided, the value is ignored to allow version negotiation +// (see [WithAPIVersionNegotiation]). func WithVersion(version string) Opt { return func(c *Client) error { if version != "" { @@ -190,8 +203,9 @@ func WithVersion(version string) Opt { } // WithVersionFromEnv overrides the client version with the version specified in -// the DOCKER_API_VERSION environment variable. If DOCKER_API_VERSION is not set, -// the version is not modified. +// the DOCKER_API_VERSION ([EnvOverrideAPIVersion]) environment variable. +// If DOCKER_API_VERSION is not set, or set to an empty value, the version +// is not modified. func WithVersionFromEnv() Opt { return func(c *Client) error { return WithVersion(os.Getenv(EnvOverrideAPIVersion))(c) @@ -201,10 +215,19 @@ func WithVersionFromEnv() Opt { // WithAPIVersionNegotiation enables automatic API version negotiation for the client. // With this option enabled, the client automatically negotiates the API version // to use when making requests. API version negotiation is performed on the first -// request; subsequent requests will not re-negotiate. +// request; subsequent requests do not re-negotiate. func WithAPIVersionNegotiation() Opt { return func(c *Client) error { c.negotiateVersion = true return nil } } + +// WithTraceProvider sets the trace provider for the client. +// If this is not set then the global trace provider will be used. +func WithTraceProvider(provider trace.TracerProvider) Opt { + return func(c *Client) error { + c.tp = provider + return nil + } +} diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go index 347ae71e0..dfd1042fa 100644 --- a/vendor/github.com/docker/docker/client/ping.go +++ b/vendor/github.com/docker/docker/client/ping.go @@ -21,11 +21,11 @@ func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { // Using cli.buildRequest() + cli.doRequest() instead of cli.sendRequest() // because ping requests are used during API version negotiation, so we want // to hit the non-versioned /_ping endpoint, not /v1.xx/_ping - req, err := cli.buildRequest(http.MethodHead, path.Join(cli.basePath, "/_ping"), nil, nil) + req, err := cli.buildRequest(ctx, http.MethodHead, path.Join(cli.basePath, "/_ping"), nil, nil) if err != nil { return ping, err } - serverResp, err := cli.doRequest(ctx, req) + serverResp, err := cli.doRequest(req) if err == nil { defer ensureReaderClosed(serverResp) switch serverResp.statusCode { @@ -37,11 +37,9 @@ func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { return ping, err } - req, err = cli.buildRequest(http.MethodGet, path.Join(cli.basePath, "/_ping"), nil, nil) - if err != nil { - return ping, err - } - serverResp, err = cli.doRequest(ctx, req) + // HEAD failed; fallback to GET. + req.Method = http.MethodGet + serverResp, err = cli.doRequest(req) defer ensureReaderClosed(serverResp) if err != nil { return ping, err diff --git a/vendor/github.com/docker/docker/client/plugin_install.go b/vendor/github.com/docker/docker/client/plugin_install.go index 3a740ec4f..69184619a 100644 --- a/vendor/github.com/docker/docker/client/plugin_install.go +++ b/vendor/github.com/docker/docker/client/plugin_install.go @@ -4,9 +4,10 @@ import ( "context" "encoding/json" "io" + "net/http" "net/url" - "github.com/docker/distribution/reference" + "github.com/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/errdefs" @@ -68,13 +69,15 @@ func (cli *Client) PluginInstall(ctx context.Context, name string, options types } func (cli *Client) tryPluginPrivileges(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { - headers := map[string][]string{registry.AuthHeader: {registryAuth}} - return cli.get(ctx, "/plugins/privileges", query, headers) + return cli.get(ctx, "/plugins/privileges", query, http.Header{ + registry.AuthHeader: {registryAuth}, + }) } func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileges types.PluginPrivileges, registryAuth string) (serverResponse, error) { - headers := map[string][]string{registry.AuthHeader: {registryAuth}} - return cli.post(ctx, "/plugins/pull", query, privileges, headers) + return cli.post(ctx, "/plugins/pull", query, privileges, http.Header{ + registry.AuthHeader: {registryAuth}, + }) } func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, options types.PluginInstallOptions) (types.PluginPrivileges, error) { diff --git a/vendor/github.com/docker/docker/client/plugin_push.go b/vendor/github.com/docker/docker/client/plugin_push.go index 18f9754c4..8f68a86ee 100644 --- a/vendor/github.com/docker/docker/client/plugin_push.go +++ b/vendor/github.com/docker/docker/client/plugin_push.go @@ -3,14 +3,16 @@ package client // import "github.com/docker/docker/client" import ( "context" "io" + "net/http" "github.com/docker/docker/api/types/registry" ) // PluginPush pushes a plugin to a registry func (cli *Client) PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) { - headers := map[string][]string{registry.AuthHeader: {registryAuth}} - resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, headers) + resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, http.Header{ + registry.AuthHeader: {registryAuth}, + }) if err != nil { return nil, err } diff --git a/vendor/github.com/docker/docker/client/plugin_upgrade.go b/vendor/github.com/docker/docker/client/plugin_upgrade.go index 995d1fd2c..5cade450f 100644 --- a/vendor/github.com/docker/docker/client/plugin_upgrade.go +++ b/vendor/github.com/docker/docker/client/plugin_upgrade.go @@ -3,9 +3,10 @@ package client // import "github.com/docker/docker/client" import ( "context" "io" + "net/http" "net/url" - "github.com/docker/distribution/reference" + "github.com/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/registry" "github.com/pkg/errors" @@ -13,7 +14,7 @@ import ( // PluginUpgrade upgrades a plugin func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { - if err := cli.NewVersionError("1.26", "plugin upgrade"); err != nil { + if err := cli.NewVersionError(ctx, "1.26", "plugin upgrade"); err != nil { return nil, err } query := url.Values{} @@ -35,6 +36,7 @@ func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types } func (cli *Client) tryPluginUpgrade(ctx context.Context, query url.Values, privileges types.PluginPrivileges, name, registryAuth string) (serverResponse, error) { - headers := map[string][]string{registry.AuthHeader: {registryAuth}} - return cli.post(ctx, "/plugins/"+name+"/upgrade", query, privileges, headers) + return cli.post(ctx, "/plugins/"+name+"/upgrade", query, privileges, http.Header{ + registry.AuthHeader: {registryAuth}, + }) } diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go index bcedcf3bd..efe07bb9e 100644 --- a/vendor/github.com/docker/docker/client/request.go +++ b/vendor/github.com/docker/docker/client/request.go @@ -10,6 +10,7 @@ import ( "net/http" "net/url" "os" + "reflect" "strings" "github.com/docker/docker/api/types" @@ -27,17 +28,17 @@ type serverResponse struct { } // head sends an http request to the docker API using the method HEAD. -func (cli *Client) head(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { +func (cli *Client) head(ctx context.Context, path string, query url.Values, headers http.Header) (serverResponse, error) { return cli.sendRequest(ctx, http.MethodHead, path, query, nil, headers) } // get sends an http request to the docker API using the method GET with a specific Go context. -func (cli *Client) get(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { +func (cli *Client) get(ctx context.Context, path string, query url.Values, headers http.Header) (serverResponse, error) { return cli.sendRequest(ctx, http.MethodGet, path, query, nil, headers) } // post sends an http request to the docker API using the method POST with a specific Go context. -func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { +func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers http.Header) (serverResponse, error) { body, headers, err := encodeBody(obj, headers) if err != nil { return serverResponse{}, err @@ -45,34 +46,44 @@ func (cli *Client) post(ctx context.Context, path string, query url.Values, obj return cli.sendRequest(ctx, http.MethodPost, path, query, body, headers) } -func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { +func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers http.Header) (serverResponse, error) { return cli.sendRequest(ctx, http.MethodPost, path, query, body, headers) } -func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { +func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers http.Header) (serverResponse, error) { body, headers, err := encodeBody(obj, headers) if err != nil { return serverResponse{}, err } - return cli.sendRequest(ctx, http.MethodPut, path, query, body, headers) + return cli.putRaw(ctx, path, query, body, headers) } // putRaw sends an http request to the docker API using the method PUT. -func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { +func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers http.Header) (serverResponse, error) { + // PUT requests are expected to always have a body (apparently) + // so explicitly pass an empty body to sendRequest to signal that + // it should set the Content-Type header if not already present. + if body == nil { + body = http.NoBody + } return cli.sendRequest(ctx, http.MethodPut, path, query, body, headers) } // delete sends an http request to the docker API using the method DELETE. -func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { +func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers http.Header) (serverResponse, error) { return cli.sendRequest(ctx, http.MethodDelete, path, query, nil, headers) } -type headers map[string][]string - -func encodeBody(obj interface{}, headers headers) (io.Reader, headers, error) { +func encodeBody(obj interface{}, headers http.Header) (io.Reader, http.Header, error) { if obj == nil { return nil, headers, nil } + // encoding/json encodes a nil pointer as the JSON document `null`, + // irrespective of whether the type implements json.Marshaler or encoding.TextMarshaler. + // That is almost certainly not what the caller intended as the request body. + if reflect.TypeOf(obj).Kind() == reflect.Ptr && reflect.ValueOf(obj).IsNil() { + return nil, headers, nil + } body, err := encodeData(obj) if err != nil { @@ -85,13 +96,8 @@ func encodeBody(obj interface{}, headers headers) (io.Reader, headers, error) { return body, headers, nil } -func (cli *Client) buildRequest(method, path string, body io.Reader, headers headers) (*http.Request, error) { - expectedPayload := (method == http.MethodPost || method == http.MethodPut) - if expectedPayload && body == nil { - body = bytes.NewReader([]byte{}) - } - - req, err := http.NewRequest(method, path, body) +func (cli *Client) buildRequest(ctx context.Context, method, path string, body io.Reader, headers http.Header) (*http.Request, error) { + req, err := http.NewRequestWithContext(ctx, method, path, body) if err != nil { return nil, err } @@ -104,19 +110,19 @@ func (cli *Client) buildRequest(method, path string, body io.Reader, headers hea req.Host = DummyHost } - if expectedPayload && req.Header.Get("Content-Type") == "" { + if body != nil && req.Header.Get("Content-Type") == "" { req.Header.Set("Content-Type", "text/plain") } return req, nil } -func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers headers) (serverResponse, error) { - req, err := cli.buildRequest(method, cli.getAPIPath(ctx, path, query), body, headers) +func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers http.Header) (serverResponse, error) { + req, err := cli.buildRequest(ctx, method, cli.getAPIPath(ctx, path, query), body, headers) if err != nil { return serverResponse{}, err } - resp, err := cli.doRequest(ctx, req) + resp, err := cli.doRequest(req) switch { case errors.Is(err, context.Canceled): return serverResponse{}, errdefs.Cancelled(err) @@ -128,10 +134,9 @@ func (cli *Client) sendRequest(ctx context.Context, method, path string, query u return resp, errdefs.FromStatusCode(err, resp.statusCode) } -func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResponse, error) { +func (cli *Client) doRequest(req *http.Request) (serverResponse, error) { serverResp := serverResponse{statusCode: -1, reqURL: req.URL} - req = req.WithContext(ctx) resp, err := cli.client.Do(req) if err != nil { if cli.scheme != "https" && strings.Contains(err.Error(), "malformed HTTP response") { @@ -148,19 +153,19 @@ func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResp return serverResp, err } - if nErr, ok := err.(*url.Error); ok { - if nErr, ok := nErr.Err.(*net.OpError); ok { + if uErr, ok := err.(*url.Error); ok { + if nErr, ok := uErr.Err.(*net.OpError); ok { if os.IsPermission(nErr.Err) { return serverResp, errors.Wrapf(err, "permission denied while trying to connect to the Docker daemon socket at %v", cli.host) } } } - if err, ok := err.(net.Error); ok { - if err.Timeout() { + if nErr, ok := err.(net.Error); ok { + if nErr.Timeout() { return serverResp, ErrorConnectionFailed(cli.host) } - if strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") { + if strings.Contains(nErr.Error(), "connection refused") || strings.Contains(nErr.Error(), "dial unix") { return serverResp, ErrorConnectionFailed(cli.host) } } @@ -221,26 +226,20 @@ func (cli *Client) checkResponseErr(serverResp serverResponse) error { return fmt.Errorf("request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), serverResp.reqURL) } - var ct string - if serverResp.header != nil { - ct = serverResp.header.Get("Content-Type") - } - - var errorMessage string - if (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) && ct == "application/json" { + var daemonErr error + if serverResp.header.Get("Content-Type") == "application/json" && (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) { var errorResponse types.ErrorResponse if err := json.Unmarshal(body, &errorResponse); err != nil { return errors.Wrap(err, "Error reading JSON") } - errorMessage = strings.TrimSpace(errorResponse.Message) + daemonErr = errors.New(strings.TrimSpace(errorResponse.Message)) } else { - errorMessage = strings.TrimSpace(string(body)) + daemonErr = errors.New(strings.TrimSpace(string(body))) } - - return errors.Wrap(errors.New(errorMessage), "Error response from daemon") + return errors.Wrap(daemonErr, "Error response from daemon") } -func (cli *Client) addHeaders(req *http.Request, headers headers) *http.Request { +func (cli *Client) addHeaders(req *http.Request, headers http.Header) *http.Request { // Add CLI Config's HTTP Headers BEFORE we set the Docker headers // then the user can't change OUR headers for k, v := range cli.customHTTPHeaders { @@ -253,6 +252,14 @@ func (cli *Client) addHeaders(req *http.Request, headers headers) *http.Request for k, v := range headers { req.Header[http.CanonicalHeaderKey(k)] = v } + + if cli.userAgent != nil { + if *cli.userAgent == "" { + req.Header.Del("User-Agent") + } else { + req.Header.Set("User-Agent", *cli.userAgent) + } + } return req } diff --git a/vendor/github.com/docker/docker/client/secret_create.go b/vendor/github.com/docker/docker/client/secret_create.go index c65d38a19..7b7f1ba74 100644 --- a/vendor/github.com/docker/docker/client/secret_create.go +++ b/vendor/github.com/docker/docker/client/secret_create.go @@ -11,7 +11,7 @@ import ( // SecretCreate creates a new secret. func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) { var response types.SecretCreateResponse - if err := cli.NewVersionError("1.25", "secret create"); err != nil { + if err := cli.NewVersionError(ctx, "1.25", "secret create"); err != nil { return response, err } resp, err := cli.post(ctx, "/secrets/create", nil, secret, nil) diff --git a/vendor/github.com/docker/docker/client/secret_inspect.go b/vendor/github.com/docker/docker/client/secret_inspect.go index 5906874b1..a9cb59889 100644 --- a/vendor/github.com/docker/docker/client/secret_inspect.go +++ b/vendor/github.com/docker/docker/client/secret_inspect.go @@ -11,7 +11,7 @@ import ( // SecretInspectWithRaw returns the secret information with raw data func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.Secret, []byte, error) { - if err := cli.NewVersionError("1.25", "secret inspect"); err != nil { + if err := cli.NewVersionError(ctx, "1.25", "secret inspect"); err != nil { return swarm.Secret{}, nil, err } if id == "" { diff --git a/vendor/github.com/docker/docker/client/secret_list.go b/vendor/github.com/docker/docker/client/secret_list.go index a0289c9f4..4d21639ef 100644 --- a/vendor/github.com/docker/docker/client/secret_list.go +++ b/vendor/github.com/docker/docker/client/secret_list.go @@ -12,7 +12,7 @@ import ( // SecretList returns the list of secrets. func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) { - if err := cli.NewVersionError("1.25", "secret list"); err != nil { + if err := cli.NewVersionError(ctx, "1.25", "secret list"); err != nil { return nil, err } query := url.Values{} diff --git a/vendor/github.com/docker/docker/client/secret_remove.go b/vendor/github.com/docker/docker/client/secret_remove.go index f47f68b6e..079ed6739 100644 --- a/vendor/github.com/docker/docker/client/secret_remove.go +++ b/vendor/github.com/docker/docker/client/secret_remove.go @@ -4,7 +4,7 @@ import "context" // SecretRemove removes a secret. func (cli *Client) SecretRemove(ctx context.Context, id string) error { - if err := cli.NewVersionError("1.25", "secret remove"); err != nil { + if err := cli.NewVersionError(ctx, "1.25", "secret remove"); err != nil { return err } resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil) diff --git a/vendor/github.com/docker/docker/client/secret_update.go b/vendor/github.com/docker/docker/client/secret_update.go index 2e939e8ce..9dfe67198 100644 --- a/vendor/github.com/docker/docker/client/secret_update.go +++ b/vendor/github.com/docker/docker/client/secret_update.go @@ -9,7 +9,7 @@ import ( // SecretUpdate attempts to update a secret. func (cli *Client) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error { - if err := cli.NewVersionError("1.25", "secret update"); err != nil { + if err := cli.NewVersionError(ctx, "1.25", "secret update"); err != nil { return err } query := url.Values{} diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go index b6065b8ee..2ebb5ee3a 100644 --- a/vendor/github.com/docker/docker/client/service_create.go +++ b/vendor/github.com/docker/docker/client/service_create.go @@ -4,26 +4,28 @@ import ( "context" "encoding/json" "fmt" + "net/http" "strings" - "github.com/docker/distribution/reference" + "github.com/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/versions" "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) // ServiceCreate creates a new service. -func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) { - var response types.ServiceCreateResponse - headers := map[string][]string{ - "version": {cli.version}, - } +func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (swarm.ServiceCreateResponse, error) { + var response swarm.ServiceCreateResponse - if options.EncodedRegistryAuth != "" { - headers[registry.AuthHeader] = []string{options.EncodedRegistryAuth} - } + // Make sure we negotiated (if the client is configured to do so), + // as code below contains API-version specific handling of options. + // + // Normally, version-negotiation (if enabled) would not happen until + // the API request is made. + cli.checkVersion(ctx) // Make sure containerSpec is not nil when no runtime is set or the runtime is set to container if service.TaskTemplate.ContainerSpec == nil && (service.TaskTemplate.Runtime == "" || service.TaskTemplate.Runtime == swarm.RuntimeContainer) { @@ -53,6 +55,16 @@ func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, } } + headers := http.Header{} + if versions.LessThan(cli.version, "1.30") { + // the custom "version" header was used by engine API before 20.10 + // (API 1.30) to switch between client- and server-side lookup of + // image digests. + headers["version"] = []string{cli.version} + } + if options.EncodedRegistryAuth != "" { + headers[registry.AuthHeader] = []string{options.EncodedRegistryAuth} + } resp, err := cli.post(ctx, "/services/create", nil, service, headers) defer ensureReaderClosed(resp) if err != nil { diff --git a/vendor/github.com/docker/docker/client/service_logs.go b/vendor/github.com/docker/docker/client/service_logs.go index 906fd4059..e9e30a2ab 100644 --- a/vendor/github.com/docker/docker/client/service_logs.go +++ b/vendor/github.com/docker/docker/client/service_logs.go @@ -6,14 +6,14 @@ import ( "net/url" "time" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" timetypes "github.com/docker/docker/api/types/time" "github.com/pkg/errors" ) // ServiceLogs returns the logs generated by a service in an io.ReadCloser. // It's up to the caller to close the stream. -func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { +func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options container.LogsOptions) (io.ReadCloser, error) { query := url.Values{} if options.ShowStdout { query.Set("stdout", "1") diff --git a/vendor/github.com/docker/docker/client/service_update.go b/vendor/github.com/docker/docker/client/service_update.go index ff8cded8b..e05eebf56 100644 --- a/vendor/github.com/docker/docker/client/service_update.go +++ b/vendor/github.com/docker/docker/client/service_update.go @@ -3,30 +3,31 @@ package client // import "github.com/docker/docker/client" import ( "context" "encoding/json" + "net/http" "net/url" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/versions" ) // ServiceUpdate updates a Service. The version number is required to avoid conflicting writes. // It should be the value as set *before* the update. You can find this value in the Meta field // of swarm.Service, which can be found using ServiceInspectWithRaw. -func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) { +func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (swarm.ServiceUpdateResponse, error) { + // Make sure we negotiated (if the client is configured to do so), + // as code below contains API-version specific handling of options. + // + // Normally, version-negotiation (if enabled) would not happen until + // the API request is made. + cli.checkVersion(ctx) + var ( query = url.Values{} - response = types.ServiceUpdateResponse{} + response = swarm.ServiceUpdateResponse{} ) - headers := map[string][]string{ - "version": {cli.version}, - } - - if options.EncodedRegistryAuth != "" { - headers[registry.AuthHeader] = []string{options.EncodedRegistryAuth} - } - if options.RegistryAuthFrom != "" { query.Set("registryAuthFrom", options.RegistryAuthFrom) } @@ -60,6 +61,16 @@ func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version } } + headers := http.Header{} + if versions.LessThan(cli.version, "1.30") { + // the custom "version" header was used by engine API before 20.10 + // (API 1.30) to switch between client- and server-side lookup of + // image digests. + headers["version"] = []string{cli.version} + } + if options.EncodedRegistryAuth != "" { + headers[registry.AuthHeader] = []string{options.EncodedRegistryAuth} + } resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers) defer ensureReaderClosed(resp) if err != nil { diff --git a/vendor/github.com/docker/docker/client/task_logs.go b/vendor/github.com/docker/docker/client/task_logs.go index 6222fab57..b8c20e71d 100644 --- a/vendor/github.com/docker/docker/client/task_logs.go +++ b/vendor/github.com/docker/docker/client/task_logs.go @@ -6,13 +6,13 @@ import ( "net/url" "time" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" timetypes "github.com/docker/docker/api/types/time" ) // TaskLogs returns the logs generated by a task in an io.ReadCloser. // It's up to the caller to close the stream. -func (cli *Client) TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { +func (cli *Client) TaskLogs(ctx context.Context, taskID string, options container.LogsOptions) (io.ReadCloser, error) { query := url.Values{} if options.ShowStdout { query.Set("stdout", "1") diff --git a/vendor/github.com/docker/docker/client/transport.go b/vendor/github.com/docker/docker/client/transport.go deleted file mode 100644 index 554134436..000000000 --- a/vendor/github.com/docker/docker/client/transport.go +++ /dev/null @@ -1,17 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "crypto/tls" - "net/http" -) - -// resolveTLSConfig attempts to resolve the TLS configuration from the -// RoundTripper. -func resolveTLSConfig(transport http.RoundTripper) *tls.Config { - switch tr := transport.(type) { - case *http.Transport: - return tr.TLSClientConfig - default: - return nil - } -} diff --git a/vendor/github.com/docker/docker/client/volume_prune.go b/vendor/github.com/docker/docker/client/volume_prune.go index 6e324708f..9333f6ee7 100644 --- a/vendor/github.com/docker/docker/client/volume_prune.go +++ b/vendor/github.com/docker/docker/client/volume_prune.go @@ -13,7 +13,7 @@ import ( func (cli *Client) VolumesPrune(ctx context.Context, pruneFilters filters.Args) (types.VolumesPruneReport, error) { var report types.VolumesPruneReport - if err := cli.NewVersionError("1.25", "volume prune"); err != nil { + if err := cli.NewVersionError(ctx, "1.25", "volume prune"); err != nil { return report, err } diff --git a/vendor/github.com/docker/docker/client/volume_remove.go b/vendor/github.com/docker/docker/client/volume_remove.go index 1f2643836..31e08cb97 100644 --- a/vendor/github.com/docker/docker/client/volume_remove.go +++ b/vendor/github.com/docker/docker/client/volume_remove.go @@ -10,8 +10,14 @@ import ( // VolumeRemove removes a volume from the docker host. func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool) error { query := url.Values{} - if versions.GreaterThanOrEqualTo(cli.version, "1.25") { - if force { + if force { + // Make sure we negotiated (if the client is configured to do so), + // as code below contains API-version specific handling of options. + // + // Normally, version-negotiation (if enabled) would not happen until + // the API request is made. + cli.checkVersion(ctx) + if versions.GreaterThanOrEqualTo(cli.version, "1.25") { query.Set("force", "1") } } diff --git a/vendor/github.com/docker/docker/client/volume_update.go b/vendor/github.com/docker/docker/client/volume_update.go index 33bd31e53..151863f07 100644 --- a/vendor/github.com/docker/docker/client/volume_update.go +++ b/vendor/github.com/docker/docker/client/volume_update.go @@ -11,7 +11,7 @@ import ( // VolumeUpdate updates a volume. This only works for Cluster Volumes, and // only some fields can be updated. func (cli *Client) VolumeUpdate(ctx context.Context, volumeID string, version swarm.Version, options volume.UpdateOptions) error { - if err := cli.NewVersionError("1.42", "volume update"); err != nil { + if err := cli.NewVersionError(ctx, "1.42", "volume update"); err != nil { return err } diff --git a/vendor/github.com/docker/docker/errdefs/defs.go b/vendor/github.com/docker/docker/errdefs/defs.go index 61e7456b4..a5523c3e9 100644 --- a/vendor/github.com/docker/docker/errdefs/defs.go +++ b/vendor/github.com/docker/docker/errdefs/defs.go @@ -1,4 +1,4 @@ -package errdefs // import "github.com/docker/docker/errdefs" +package errdefs // ErrNotFound signals that the requested object doesn't exist type ErrNotFound interface { diff --git a/vendor/github.com/docker/docker/errdefs/helpers.go b/vendor/github.com/docker/docker/errdefs/helpers.go index fe06fb6f7..042de4b7b 100644 --- a/vendor/github.com/docker/docker/errdefs/helpers.go +++ b/vendor/github.com/docker/docker/errdefs/helpers.go @@ -1,4 +1,4 @@ -package errdefs // import "github.com/docker/docker/errdefs" +package errdefs import "context" diff --git a/vendor/github.com/docker/docker/errdefs/http_helpers.go b/vendor/github.com/docker/docker/errdefs/http_helpers.go index 77bda389d..ebcd78930 100644 --- a/vendor/github.com/docker/docker/errdefs/http_helpers.go +++ b/vendor/github.com/docker/docker/errdefs/http_helpers.go @@ -1,4 +1,4 @@ -package errdefs // import "github.com/docker/docker/errdefs" +package errdefs import ( "net/http" diff --git a/vendor/github.com/docker/docker/errdefs/is.go b/vendor/github.com/docker/docker/errdefs/is.go index 3abf07d0c..f94034cbd 100644 --- a/vendor/github.com/docker/docker/errdefs/is.go +++ b/vendor/github.com/docker/docker/errdefs/is.go @@ -1,9 +1,18 @@ -package errdefs // import "github.com/docker/docker/errdefs" +package errdefs + +import ( + "context" + "errors" +) type causer interface { Cause() error } +type wrapErr interface { + Unwrap() error +} + func getImplementer(err error) error { switch e := err.(type) { case @@ -23,6 +32,8 @@ func getImplementer(err error) error { return err case causer: return getImplementer(e.Cause()) + case wrapErr: + return getImplementer(e.Unwrap()) default: return err } @@ -105,3 +116,8 @@ func IsDataLoss(err error) bool { _, ok := getImplementer(err).(ErrDataLoss) return ok } + +// IsContext returns if the passed in error is due to context cancellation or deadline exceeded. +func IsContext(err error) bool { + return errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) +} diff --git a/vendor/github.com/docker/docker/image/spec/specs-go/v1/image.go b/vendor/github.com/docker/docker/image/spec/specs-go/v1/image.go new file mode 100644 index 000000000..167261763 --- /dev/null +++ b/vendor/github.com/docker/docker/image/spec/specs-go/v1/image.go @@ -0,0 +1,54 @@ +package v1 + +import ( + "time" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +const DockerOCIImageMediaType = "application/vnd.docker.container.image.v1+json" + +// DockerOCIImage is a ocispec.Image extended with Docker specific Config. +type DockerOCIImage struct { + ocispec.Image + + // Shadow ocispec.Image.Config + Config DockerOCIImageConfig `json:"config,omitempty"` +} + +// DockerOCIImageConfig is a ocispec.ImageConfig extended with Docker specific fields. +type DockerOCIImageConfig struct { + ocispec.ImageConfig + + DockerOCIImageConfigExt +} + +// DockerOCIImageConfigExt contains Docker-specific fields in DockerImageConfig. +type DockerOCIImageConfigExt struct { + Healthcheck *HealthcheckConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy + + OnBuild []string `json:",omitempty"` // ONBUILD metadata that were defined on the image Dockerfile + Shell []string `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT +} + +// HealthcheckConfig holds configuration settings for the HEALTHCHECK feature. +type HealthcheckConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. + StartInterval time.Duration `json:",omitempty"` // The interval to attempt healthchecks at during the start period + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/internal/multierror/multierror.go b/vendor/github.com/docker/docker/internal/multierror/multierror.go new file mode 100644 index 000000000..cf4d6a595 --- /dev/null +++ b/vendor/github.com/docker/docker/internal/multierror/multierror.go @@ -0,0 +1,46 @@ +package multierror + +import ( + "strings" +) + +// Join is a drop-in replacement for errors.Join with better formatting. +func Join(errs ...error) error { + n := 0 + for _, err := range errs { + if err != nil { + n++ + } + } + if n == 0 { + return nil + } + e := &joinError{ + errs: make([]error, 0, n), + } + for _, err := range errs { + if err != nil { + e.errs = append(e.errs, err) + } + } + return e +} + +type joinError struct { + errs []error +} + +func (e *joinError) Error() string { + if len(e.errs) == 1 { + return strings.TrimSpace(e.errs[0].Error()) + } + stringErrs := make([]string, 0, len(e.errs)) + for _, subErr := range e.errs { + stringErrs = append(stringErrs, strings.Replace(subErr.Error(), "\n", "\n\t", -1)) + } + return "* " + strings.Join(stringErrs, "\n* ") +} + +func (e *joinError) Unwrap() []error { + return e.errs +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go index 34361a24a..43133a095 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive.go @@ -21,6 +21,7 @@ import ( "time" "github.com/containerd/containerd/pkg/userns" + "github.com/containerd/log" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/pools" @@ -29,7 +30,6 @@ import ( "github.com/moby/patternmatcher" "github.com/moby/sys/sequential" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) // ImpliedDirectoryMode represents the mode (Unix permissions) applied to directories that are implied by files in a @@ -42,7 +42,7 @@ import ( // This value is currently implementation-defined, and not captured in any cross-runtime specification. Thus, it is // subject to change in Moby at any time -- image authors who require consistent or known directory permissions // should explicitly control them by ensuring that header entries exist for any applicable path. -const ImpliedDirectoryMode = 0755 +const ImpliedDirectoryMode = 0o755 type ( // Compression is the state represents if compressed or not. @@ -70,6 +70,12 @@ type ( // replaced with the matching name from this map. RebaseNames map[string]string InUserNS bool + // Allow unpacking to succeed in spite of failures to set extended + // attributes on the unpacked files due to the destination filesystem + // not supporting them or a lack of permissions. Extended attributes + // were probably in the archive for a reason, so set this option at + // your own peril. + BestEffortXattrs bool } ) @@ -199,21 +205,21 @@ func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { if noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ"); noPigzEnv != "" { noPigz, err := strconv.ParseBool(noPigzEnv) if err != nil { - logrus.WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var") + log.G(ctx).WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var") } if noPigz { - logrus.Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv) + log.G(ctx).Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv) return gzip.NewReader(buf) } } unpigzPath, err := exec.LookPath("unpigz") if err != nil { - logrus.Debugf("unpigz binary not found, falling back to go gzip library") + log.G(ctx).Debugf("unpigz binary not found, falling back to go gzip library") return gzip.NewReader(buf) } - logrus.Debugf("Using %s to decompress", unpigzPath) + log.G(ctx).Debugf("Using %s to decompress", unpigzPath) return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) } @@ -475,6 +481,8 @@ func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, erro return hdr, nil } +const paxSchilyXattr = "SCHILY.xattr." + // ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem // to a tar header func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { @@ -487,15 +495,16 @@ func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { ) capability, _ := system.Lgetxattr(path, "security.capability") if capability != nil { - length := len(capability) if capability[versionOffset] == vfsCapRevision3 { // Convert VFS_CAP_REVISION_3 to VFS_CAP_REVISION_2 as root UID makes no // sense outside the user namespace the archive is built in. capability[versionOffset] = vfsCapRevision2 - length = xattrCapsSz2 + capability = capability[:xattrCapsSz2] + } + if hdr.PAXRecords == nil { + hdr.PAXRecords = make(map[string]string) } - hdr.Xattrs = make(map[string]string) - hdr.Xattrs["security.capability"] = string(capability[:length]) + hdr.PAXRecords[paxSchilyXattr+"security.capability"] = string(capability) } return nil } @@ -666,7 +675,19 @@ func (ta *tarAppender) addTarFile(path, name string) error { return nil } -func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.Identity, inUserns bool) error { +func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, opts *TarOptions) error { + var ( + Lchown = true + inUserns, bestEffortXattrs bool + chownOpts *idtools.Identity + ) + if opts != nil { + Lchown = !opts.NoLchown + inUserns = opts.InUserNS + chownOpts = opts.ChownOpts + bestEffortXattrs = opts.BestEffortXattrs + } + // hdr.Mode is in linux format, which we can use for sycalls, // but for os.Foo() calls we need the mode converted to os.FileMode, // so use hdrInfo.Mode() (they differ for e.g. setuid bits) @@ -736,7 +757,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L } case tar.TypeXGlobalHeader: - logrus.Debug("PAX Global Extended Headers found and ignored") + log.G(context.TODO()).Debug("PAX Global Extended Headers found and ignored") return nil default: @@ -757,26 +778,26 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L } } - var errors []string - for key, value := range hdr.Xattrs { - if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { - if err == syscall.ENOTSUP || err == syscall.EPERM { - // We ignore errors here because not all graphdrivers support - // xattrs *cough* old versions of AUFS *cough*. However only - // ENOTSUP should be emitted in that case, otherwise we still - // bail. + var xattrErrs []string + for key, value := range hdr.PAXRecords { + xattr, ok := strings.CutPrefix(key, paxSchilyXattr) + if !ok { + continue + } + if err := system.Lsetxattr(path, xattr, []byte(value), 0); err != nil { + if bestEffortXattrs && errors.Is(err, syscall.ENOTSUP) || errors.Is(err, syscall.EPERM) { // EPERM occurs if modifying xattrs is not allowed. This can // happen when running in userns with restrictions (ChromeOS). - errors = append(errors, err.Error()) + xattrErrs = append(xattrErrs, err.Error()) continue } return err } } - if len(errors) > 0 { - logrus.WithFields(logrus.Fields{ - "errors": errors, + if len(xattrErrs) > 0 { + log.G(context.TODO()).WithFields(log.Fields{ + "errors": xattrErrs, }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") } @@ -893,13 +914,13 @@ func (t *Tarballer) Do() { defer func() { // Make sure to check the error on Close. if err := ta.TarWriter.Close(); err != nil { - logrus.Errorf("Can't close tar writer: %s", err) + log.G(context.TODO()).Errorf("Can't close tar writer: %s", err) } if err := t.compressWriter.Close(); err != nil { - logrus.Errorf("Can't close compress writer: %s", err) + log.G(context.TODO()).Errorf("Can't close compress writer: %s", err) } if err := t.pipeWriter.Close(); err != nil { - logrus.Errorf("Can't close pipe writer: %s", err) + log.G(context.TODO()).Errorf("Can't close pipe writer: %s", err) } }() @@ -922,7 +943,7 @@ func (t *Tarballer) Do() { // directory. So, we must split the source path and use the // basename as the include. if len(t.options.IncludeFiles) > 0 { - logrus.Warn("Tar: Can't archive a file with includes") + log.G(context.TODO()).Warn("Tar: Can't archive a file with includes") } dir, base := SplitPathDirEntry(t.srcPath) @@ -947,7 +968,7 @@ func (t *Tarballer) Do() { walkRoot := getWalkRoot(t.srcPath, include) filepath.WalkDir(walkRoot, func(filePath string, f os.DirEntry, err error) error { if err != nil { - logrus.Errorf("Tar: Can't stat file %s to tar: %s", t.srcPath, err) + log.G(context.TODO()).Errorf("Tar: Can't stat file %s to tar: %s", t.srcPath, err) return nil } @@ -986,7 +1007,7 @@ func (t *Tarballer) Do() { skip, matchInfo, err = t.pm.MatchesUsingParentResults(relFilePath, patternmatcher.MatchInfo{}) } if err != nil { - logrus.Errorf("Error matching %s: %v", relFilePath, err) + log.G(context.TODO()).Errorf("Error matching %s: %v", relFilePath, err) return err } @@ -1047,7 +1068,7 @@ func (t *Tarballer) Do() { } if err := ta.addTarFile(filePath, relFilePath); err != nil { - logrus.Errorf("Can't add file %s to tar: %s", filePath, err) + log.G(context.TODO()).Errorf("Can't add file %s to tar: %s", filePath, err) // if pipe is broken, stop writing tar stream to it if err == io.ErrClosedPipe { return err @@ -1084,7 +1105,7 @@ loop: // ignore XGlobalHeader early to avoid creating parent directories for them if hdr.Typeflag == tar.TypeXGlobalHeader { - logrus.Debugf("PAX Global Extended Headers found for %s and ignored", hdr.Name) + log.G(context.TODO()).Debugf("PAX Global Extended Headers found for %s and ignored", hdr.Name) continue } @@ -1158,7 +1179,7 @@ loop: } } - if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { + if err := createTarFile(path, dest, hdr, trBuf, options); err != nil { return err } @@ -1297,7 +1318,7 @@ func (archiver *Archiver) CopyWithTar(src, dst string) error { // as owner rootIDs := archiver.IDMapping.RootPair() // Create dst, copy src's content into it - if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { + if err := idtools.MkdirAllAndChownNew(dst, 0o755, rootIDs); err != nil { return err } return archiver.TarUntar(src, dst) @@ -1322,7 +1343,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { dst = filepath.Join(dst, filepath.Base(src)) } // Create the holding directory if necessary - if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { + if err := system.MkdirAll(filepath.Dir(dst), 0o700); err != nil { return err } diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go index 76321a35e..2c3786cd5 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go @@ -21,8 +21,7 @@ func getWhiteoutConverter(format WhiteoutFormat, inUserNS bool) (tarWhiteoutConv return nil, nil } -type overlayWhiteoutConverter struct { -} +type overlayWhiteoutConverter struct{} func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) { // convert whiteouts to AUFS format @@ -30,7 +29,7 @@ func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os // we just rename the file and make it normal dir, filename := filepath.Split(hdr.Name) hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) - hdr.Mode = 0600 + hdr.Mode = 0o600 hdr.Typeflag = tar.TypeReg hdr.Size = 0 } @@ -42,9 +41,7 @@ func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os return nil, err } if len(opaque) == 1 && opaque[0] == 'y' { - if hdr.Xattrs != nil { - delete(hdr.Xattrs, "trusted.overlay.opaque") - } + delete(hdr.PAXRecords, paxSchilyXattr+"trusted.overlay.opaque") // create a header for the whiteout file // it should inherit some properties from the parent, but be a regular file diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_other.go b/vendor/github.com/docker/docker/pkg/archive/archive_other.go index 28ae2769c..3de1d64c8 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_other.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_other.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package archive // import "github.com/docker/docker/pkg/archive" diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go index 92d8e23dd..ff59d0197 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package archive // import "github.com/docker/docker/pkg/archive" @@ -8,6 +7,7 @@ import ( "errors" "os" "path/filepath" + "runtime" "strings" "syscall" @@ -44,6 +44,20 @@ func chmodTarEntry(perm os.FileMode) os.FileMode { // statUnix populates hdr from system-dependent fields of fi without performing // any OS lookups. func statUnix(fi os.FileInfo, hdr *tar.Header) error { + // Devmajor and Devminor are only needed for special devices. + + // In FreeBSD, RDev for regular files is -1 (unless overridden by FS): + // https://cgit.freebsd.org/src/tree/sys/kern/vfs_default.c?h=stable/13#n1531 + // (NODEV is -1: https://cgit.freebsd.org/src/tree/sys/sys/param.h?h=stable/13#n241). + + // ZFS in particular does not override the default: + // https://cgit.freebsd.org/src/tree/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c?h=stable/13#n2027 + + // Since `Stat_t.Rdev` is uint64, the cast turns -1 into (2^64 - 1). + // Such large values cannot be encoded in a tar header. + if runtime.GOOS == "freebsd" && hdr.Typeflag != tar.TypeBlock && hdr.Typeflag != tar.TypeChar { + return nil + } s, ok := fi.Sys().(*syscall.Stat_t) if !ok { return nil @@ -83,7 +97,7 @@ func getFileUIDGID(stat interface{}) (idtools.Identity, error) { // handleTarTypeBlockCharFifo is an OS-specific helper function used by // createTarFile to handle the following types of header: Block; Char; Fifo func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { - mode := uint32(hdr.Mode & 07777) + mode := uint32(hdr.Mode & 0o7777) switch hdr.Typeflag { case tar.TypeBlock: mode |= unix.S_IFBLK diff --git a/vendor/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/docker/docker/pkg/archive/changes.go index 7f7242be5..f9f16c925 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes.go +++ b/vendor/github.com/docker/docker/pkg/archive/changes.go @@ -3,6 +3,7 @@ package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "bytes" + "context" "fmt" "io" "os" @@ -12,10 +13,10 @@ import ( "syscall" "time" + "github.com/containerd/log" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" - "github.com/sirupsen/logrus" ) // ChangeType represents the change type. @@ -107,8 +108,10 @@ func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { return "", nil } -type skipChange func(string) (bool, error) -type deleteChange func(string, string, os.FileInfo) (string, error) +type ( + skipChange func(string) (bool, error) + deleteChange func(string, string, os.FileInfo) (string, error) +) func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) { var ( @@ -341,9 +344,7 @@ func newRootFileInfo() *FileInfo { // ChangesDirs compares two directories and generates an array of Change objects describing the changes. // If oldDir is "", then all files in newDir will be Add-Changes. func ChangesDirs(newDir, oldDir string) ([]Change, error) { - var ( - oldRoot, newRoot *FileInfo - ) + var oldRoot, newRoot *FileInfo if oldDir == "" { emptyDir, err := os.MkdirTemp("", "empty") if err != nil { @@ -371,7 +372,7 @@ func ChangesSize(newDir string, changes []Change) int64 { file := filepath.Join(newDir, change.Path) fileInfo, err := os.Lstat(file) if err != nil { - logrus.Errorf("Can not stat %q: %s", file, err) + log.G(context.TODO()).Errorf("Can not stat %q: %s", file, err) continue } @@ -420,22 +421,22 @@ func ExportChanges(dir string, changes []Change, idMap idtools.IdentityMapping) ChangeTime: timestamp, } if err := ta.TarWriter.WriteHeader(hdr); err != nil { - logrus.Debugf("Can't write whiteout header: %s", err) + log.G(context.TODO()).Debugf("Can't write whiteout header: %s", err) } } else { path := filepath.Join(dir, change.Path) if err := ta.addTarFile(path, change.Path[1:]); err != nil { - logrus.Debugf("Can't add file %s to tar: %s", path, err) + log.G(context.TODO()).Debugf("Can't add file %s to tar: %s", path, err) } } } // Make sure to check the error on Close. if err := ta.TarWriter.Close(); err != nil { - logrus.Debugf("Can't close layer: %s", err) + log.G(context.TODO()).Debugf("Can't close layer: %s", err) } if err := writer.Close(); err != nil { - logrus.Debugf("failed close Changes writer: %s", err) + log.G(context.TODO()).Debugf("failed close Changes writer: %s", err) } }() return reader, nil diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go index f8792b3d4..81fcbc5ba 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go +++ b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go @@ -267,7 +267,7 @@ func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) continue } bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) - var name = string(bytes[0:clen(bytes[:])]) + name := string(bytes[0:clen(bytes[:])]) if name == "." || name == ".." { // Useless names continue } diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_other.go b/vendor/github.com/docker/docker/pkg/archive/changes_other.go index 833798bd1..13a7d3c0c 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes_other.go +++ b/vendor/github.com/docker/docker/pkg/archive/changes_other.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package archive // import "github.com/docker/docker/pkg/archive" diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go index 54aace970..853c73ee8 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go +++ b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package archive // import "github.com/docker/docker/pkg/archive" diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go index 0ea159627..01eadc30d 100644 --- a/vendor/github.com/docker/docker/pkg/archive/copy.go +++ b/vendor/github.com/docker/docker/pkg/archive/copy.go @@ -2,14 +2,15 @@ package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" + "context" "errors" "io" "os" "path/filepath" "strings" + "github.com/containerd/log" "github.com/docker/docker/pkg/system" - "github.com/sirupsen/logrus" ) // Errors used or returned by this file. @@ -107,7 +108,7 @@ func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, er sourceDir, sourceBase := SplitPathDirEntry(sourcePath) opts := TarResourceRebaseOpts(sourceBase, rebaseName) - logrus.Debugf("copying %q from %q", sourceBase, sourceDir) + log.G(context.TODO()).Debugf("copying %q from %q", sourceBase, sourceDir) return TarWithOptions(sourceDir, opts) } diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go index 2ac7729f4..065bd4add 100644 --- a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go +++ b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package archive // import "github.com/docker/docker/pkg/archive" diff --git a/vendor/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go index 1a2fb971f..318f59421 100644 --- a/vendor/github.com/docker/docker/pkg/archive/diff.go +++ b/vendor/github.com/docker/docker/pkg/archive/diff.go @@ -2,6 +2,7 @@ package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" + "context" "fmt" "io" "os" @@ -9,9 +10,9 @@ import ( "runtime" "strings" + "github.com/containerd/log" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" - "github.com/sirupsen/logrus" ) // UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be @@ -67,7 +68,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, // image but have it tagged as Windows inadvertently. if runtime.GOOS == "windows" { if strings.Contains(hdr.Name, ":") { - logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) + log.G(context.TODO()).Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) continue } } @@ -92,7 +93,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, } defer os.RemoveAll(aufsTempdir) } - if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil { + if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, options); err != nil { return 0, err } } @@ -183,7 +184,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, return 0, err } - if err := createTarFile(path, dest, srcHdr, srcData, !options.NoLchown, nil, options.InUserNS); err != nil { + if err := createTarFile(path, dest, srcHdr, srcData, options); err != nil { return 0, err } diff --git a/vendor/github.com/docker/docker/pkg/archive/diff_unix.go b/vendor/github.com/docker/docker/pkg/archive/diff_unix.go index d7f806445..7216f2f4f 100644 --- a/vendor/github.com/docker/docker/pkg/archive/diff_unix.go +++ b/vendor/github.com/docker/docker/pkg/archive/diff_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package archive diff --git a/vendor/github.com/docker/docker/pkg/archive/path_unix.go b/vendor/github.com/docker/docker/pkg/archive/path_unix.go index 0b135aea7..390264bf8 100644 --- a/vendor/github.com/docker/docker/pkg/archive/path_unix.go +++ b/vendor/github.com/docker/docker/pkg/archive/path_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package archive diff --git a/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go index d08779686..14c4ceb1d 100644 --- a/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go +++ b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package archive // import "github.com/docker/docker/pkg/archive" diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir.go b/vendor/github.com/docker/docker/pkg/homedir/homedir.go new file mode 100644 index 000000000..590683206 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir.go @@ -0,0 +1,44 @@ +package homedir + +import ( + "os" + "os/user" + "runtime" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on. +// +// Deprecated: this function is no longer used, and will be removed in the next release. +func Key() string { + return envKeyName +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +// +// On non-Windows platforms, it falls back to nss lookups, if the home +// directory cannot be obtained from environment-variables. +// +// If linking statically with cgo enabled against glibc, ensure the +// osusergo build tag is used. +// +// If needing to do nss lookups, do not disable cgo or set osusergo. +func Get() string { + home, _ := os.UserHomeDir() + if home == "" && runtime.GOOS != "windows" { + if u, err := user.Current(); err == nil { + return u.HomeDir + } + } + return home +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +// +// Deprecated: this function is no longer used, and will be removed in the next release. +func GetShortcutString() string { + return homeShortCut +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go index 11f1bec98..4eeb26b5d 100644 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package homedir // import "github.com/docker/docker/pkg/homedir" diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go index d1732dee5..feae4d736 100644 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go @@ -1,39 +1,8 @@ //go:build !windows -// +build !windows package homedir // import "github.com/docker/docker/pkg/homedir" -import ( - "os" - "os/user" +const ( + envKeyName = "HOME" + homeShortCut = "~" ) - -// Key returns the env var name for the user's home dir based on -// the platform being run on -func Key() string { - return "HOME" -} - -// Get returns the home directory of the current user with the help of -// environment variables depending on the target operating system. -// Returned path should be used with "path/filepath" to form new paths. -// -// If linking statically with cgo enabled against glibc, ensure the -// osusergo build tag is used. -// -// If needing to do nss lookups, do not disable cgo or set osusergo. -func Get() string { - home := os.Getenv(Key()) - if home == "" { - if u, err := user.Current(); err == nil { - return u.HomeDir - } - } - return home -} - -// GetShortcutString returns the string that is shortcut to user's home directory -// in the native shell of the platform running on. -func GetShortcutString() string { - return "~" -} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go index 2f81813b2..37f4ee670 100644 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go @@ -1,24 +1,6 @@ package homedir // import "github.com/docker/docker/pkg/homedir" -import ( - "os" +const ( + envKeyName = "USERPROFILE" + homeShortCut = "%USERPROFILE%" // be careful while using in format functions ) - -// Key returns the env var name for the user's home dir based on -// the platform being run on -func Key() string { - return "USERPROFILE" -} - -// Get returns the home directory of the current user with the help of -// environment variables depending on the target operating system. -// Returned path should be used with "path/filepath" to form new paths. -func Get() string { - return os.Getenv(Key()) -} - -// GetShortcutString returns the string that is shortcut to user's home directory -// in the native shell of the platform running on. -func GetShortcutString() string { - return "%USERPROFILE%" // be careful while using in format functions -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go index a4001c3b8..cd621bdcc 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package idtools // import "github.com/docker/docker/pkg/idtools" @@ -11,15 +10,9 @@ import ( "os/exec" "path/filepath" "strconv" - "sync" "syscall" - "github.com/opencontainers/runc/libcontainer/user" -) - -var ( - entOnce sync.Once - getentCmd string + "github.com/moby/sys/user" ) func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error { @@ -162,10 +155,10 @@ func getentGroup(name string) (user.Group, error) { } func callGetent(database, key string) (io.Reader, error) { - entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") }) - // if no `getent` command on host, can't do anything else - if getentCmd == "" { - return nil, fmt.Errorf("unable to find getent command") + getentCmd, err := resolveBinary("getent") + // if no `getent` command within the execution environment, can't do anything else + if err != nil { + return nil, fmt.Errorf("unable to find getent command: %w", err) } command := exec.Command(getentCmd, database, key) // we run getent within container filesystem, but without /dev so /dev/null is not available for exec to mock stdin diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go index 5e24577e2..6a9311c4a 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go +++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package idtools // import "github.com/docker/docker/pkg/idtools" diff --git a/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go index 05cc69636..517a2f52c 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go +++ b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package idtools // import "github.com/docker/docker/pkg/idtools" diff --git a/vendor/github.com/docker/docker/pkg/ioutils/readers.go b/vendor/github.com/docker/docker/pkg/ioutils/readers.go index de00b95e3..e03d3fee7 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/readers.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/readers.go @@ -3,11 +3,15 @@ package ioutils // import "github.com/docker/docker/pkg/ioutils" import ( "context" "io" + "runtime/debug" + "sync/atomic" // make sure crypto.SHA256, crypto.sha512 and crypto.SHA384 are registered // TODO remove once https://github.com/opencontainers/go-digest/pull/64 is merged. _ "crypto/sha256" _ "crypto/sha512" + + "github.com/containerd/log" ) // ReadCloserWrapper wraps an io.Reader, and implements an io.ReadCloser @@ -16,10 +20,15 @@ import ( type ReadCloserWrapper struct { io.Reader closer func() error + closed atomic.Bool } // Close calls back the passed closer function func (r *ReadCloserWrapper) Close() error { + if !r.closed.CompareAndSwap(false, true) { + subsequentCloseWarn("ReadCloserWrapper") + return nil + } return r.closer() } @@ -87,6 +96,7 @@ type cancelReadCloser struct { cancel func() pR *io.PipeReader // Stream to read from pW *io.PipeWriter + closed atomic.Bool } // NewCancelReadCloser creates a wrapper that closes the ReadCloser when the @@ -146,6 +156,17 @@ func (p *cancelReadCloser) closeWithError(err error) { // Close closes the wrapper its underlying reader. It will cause // future calls to Read to return io.EOF. func (p *cancelReadCloser) Close() error { + if !p.closed.CompareAndSwap(false, true) { + subsequentCloseWarn("cancelReadCloser") + return nil + } p.closeWithError(io.EOF) return nil } + +func subsequentCloseWarn(name string) { + log.G(context.TODO()).Error("subsequent attempt to close " + name) + if log.GetLevel() >= log.DebugLevel { + log.G(context.TODO()).Errorf("stack trace: %s", string(debug.Stack())) + } +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/tempdir_deprecated.go b/vendor/github.com/docker/docker/pkg/ioutils/tempdir_deprecated.go deleted file mode 100644 index b3321602c..000000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/tempdir_deprecated.go +++ /dev/null @@ -1,10 +0,0 @@ -package ioutils - -import "github.com/docker/docker/pkg/longpath" - -// TempDir is the equivalent of [os.MkdirTemp], except that on Windows -// the result is in Windows longpath format. On Unix systems it is -// equivalent to [os.MkdirTemp]. -// -// Deprecated: use [longpath.MkdirTemp]. -var TempDir = longpath.MkdirTemp diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writers.go b/vendor/github.com/docker/docker/pkg/ioutils/writers.go index 61c679497..1f50602f2 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/writers.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/writers.go @@ -1,6 +1,9 @@ package ioutils // import "github.com/docker/docker/pkg/ioutils" -import "io" +import ( + "io" + "sync/atomic" +) // NopWriter represents a type which write operation is nop. type NopWriter struct{} @@ -29,9 +32,14 @@ func (f *NopFlusher) Flush() {} type writeCloserWrapper struct { io.Writer closer func() error + closed atomic.Bool } func (r *writeCloserWrapper) Close() error { + if !r.closed.CompareAndSwap(false, true) { + subsequentCloseWarn("WriteCloserWrapper") + return nil + } return r.closer() } diff --git a/vendor/github.com/docker/docker/pkg/meminfo/meminfo_unsupported.go b/vendor/github.com/docker/docker/pkg/meminfo/meminfo_unsupported.go index ebfadd534..3d03441c1 100644 --- a/vendor/github.com/docker/docker/pkg/meminfo/meminfo_unsupported.go +++ b/vendor/github.com/docker/docker/pkg/meminfo/meminfo_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux && !windows -// +build !linux,!windows package meminfo diff --git a/vendor/github.com/docker/docker/pkg/process/doc.go b/vendor/github.com/docker/docker/pkg/process/doc.go deleted file mode 100644 index dae536d7d..000000000 --- a/vendor/github.com/docker/docker/pkg/process/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package process provides a set of basic functions to manage individual -// processes. -package process diff --git a/vendor/github.com/docker/docker/pkg/process/process_unix.go b/vendor/github.com/docker/docker/pkg/process/process_unix.go deleted file mode 100644 index daf392362..000000000 --- a/vendor/github.com/docker/docker/pkg/process/process_unix.go +++ /dev/null @@ -1,82 +0,0 @@ -//go:build !windows -// +build !windows - -package process - -import ( - "bytes" - "fmt" - "os" - "path/filepath" - "runtime" - "strconv" - - "golang.org/x/sys/unix" -) - -// Alive returns true if process with a given pid is running. It only considers -// positive PIDs; 0 (all processes in the current process group), -1 (all processes -// with a PID larger than 1), and negative (-n, all processes in process group -// "n") values for pid are never considered to be alive. -func Alive(pid int) bool { - if pid < 1 { - return false - } - switch runtime.GOOS { - case "darwin": - // OS X does not have a proc filesystem. Use kill -0 pid to judge if the - // process exists. From KILL(2): https://www.freebsd.org/cgi/man.cgi?query=kill&sektion=2&manpath=OpenDarwin+7.2.1 - // - // Sig may be one of the signals specified in sigaction(2) or it may - // be 0, in which case error checking is performed but no signal is - // actually sent. This can be used to check the validity of pid. - err := unix.Kill(pid, 0) - - // Either the PID was found (no error) or we get an EPERM, which means - // the PID exists, but we don't have permissions to signal it. - return err == nil || err == unix.EPERM - default: - _, err := os.Stat(filepath.Join("/proc", strconv.Itoa(pid))) - return err == nil - } -} - -// Kill force-stops a process. It only considers positive PIDs; 0 (all processes -// in the current process group), -1 (all processes with a PID larger than 1), -// and negative (-n, all processes in process group "n") values for pid are -// ignored. Refer to [KILL(2)] for details. -// -// [KILL(2)]: https://man7.org/linux/man-pages/man2/kill.2.html -func Kill(pid int) error { - if pid < 1 { - return fmt.Errorf("invalid PID (%d): only positive PIDs are allowed", pid) - } - err := unix.Kill(pid, unix.SIGKILL) - if err != nil && err != unix.ESRCH { - return err - } - return nil -} - -// Zombie return true if process has a state with "Z". It only considers positive -// PIDs; 0 (all processes in the current process group), -1 (all processes with -// a PID larger than 1), and negative (-n, all processes in process group "n") -// values for pid are ignored. Refer to [PROC(5)] for details. -// -// [PROC(5)]: https://man7.org/linux/man-pages/man5/proc.5.html -func Zombie(pid int) (bool, error) { - if pid < 1 { - return false, nil - } - data, err := os.ReadFile(fmt.Sprintf("/proc/%d/stat", pid)) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - if cols := bytes.SplitN(data, []byte(" "), 4); len(cols) >= 3 && string(cols[2]) == "Z" { - return true, nil - } - return false, nil -} diff --git a/vendor/github.com/docker/docker/pkg/process/process_windows.go b/vendor/github.com/docker/docker/pkg/process/process_windows.go deleted file mode 100644 index 26158d09e..000000000 --- a/vendor/github.com/docker/docker/pkg/process/process_windows.go +++ /dev/null @@ -1,52 +0,0 @@ -package process - -import ( - "os" - - "golang.org/x/sys/windows" -) - -// Alive returns true if process with a given pid is running. -func Alive(pid int) bool { - h, err := windows.OpenProcess(windows.PROCESS_QUERY_LIMITED_INFORMATION, false, uint32(pid)) - if err != nil { - return false - } - var c uint32 - err = windows.GetExitCodeProcess(h, &c) - _ = windows.CloseHandle(h) - if err != nil { - // From the GetExitCodeProcess function (processthreadsapi.h) API docs: - // https://learn.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-getexitcodeprocess - // - // The GetExitCodeProcess function returns a valid error code defined by the - // application only after the thread terminates. Therefore, an application should - // not use STILL_ACTIVE (259) as an error code (STILL_ACTIVE is a macro for - // STATUS_PENDING (minwinbase.h)). If a thread returns STILL_ACTIVE (259) as - // an error code, then applications that test for that value could interpret it - // to mean that the thread is still running, and continue to test for the - // completion of the thread after the thread has terminated, which could put - // the application into an infinite loop. - return c == uint32(windows.STATUS_PENDING) - } - return true -} - -// Kill force-stops a process. -func Kill(pid int) error { - p, err := os.FindProcess(pid) - if err == nil { - err = p.Kill() - if err != nil && err != os.ErrProcessDone { - return err - } - } - return nil -} - -// Zombie is not supported on Windows. -// -// TODO(thaJeztah): remove once we remove the stubs from pkg/system. -func Zombie(_ int) (bool, error) { - return false, nil -} diff --git a/vendor/github.com/docker/docker/pkg/progress/progress.go b/vendor/github.com/docker/docker/pkg/progress/progress.go new file mode 100644 index 000000000..32300914e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/progress/progress.go @@ -0,0 +1,93 @@ +package progress // import "github.com/docker/docker/pkg/progress" + +import ( + "fmt" +) + +// Progress represents the progress of a transfer. +type Progress struct { + ID string + + // Progress contains a Message or... + Message string + + // ...progress of an action + Action string + Current int64 + Total int64 + + // If true, don't show xB/yB + HideCounts bool + // If not empty, use units instead of bytes for counts + Units string + + // Aux contains extra information not presented to the user, such as + // digests for push signing. + Aux interface{} + + LastUpdate bool +} + +// Output is an interface for writing progress information. It's +// like a writer for progress, but we don't call it Writer because +// that would be confusing next to ProgressReader (also, because it +// doesn't implement the io.Writer interface). +type Output interface { + WriteProgress(Progress) error +} + +type chanOutput chan<- Progress + +func (out chanOutput) WriteProgress(p Progress) error { + // FIXME: workaround for panic in #37735 + defer func() { + recover() + }() + out <- p + return nil +} + +// ChanOutput returns an Output that writes progress updates to the +// supplied channel. +func ChanOutput(progressChan chan<- Progress) Output { + return chanOutput(progressChan) +} + +type discardOutput struct{} + +func (discardOutput) WriteProgress(Progress) error { + return nil +} + +// DiscardOutput returns an Output that discards progress +func DiscardOutput() Output { + return discardOutput{} +} + +// Update is a convenience function to write a progress update to the channel. +func Update(out Output, id, action string) { + out.WriteProgress(Progress{ID: id, Action: action}) +} + +// Updatef is a convenience function to write a printf-formatted progress update +// to the channel. +func Updatef(out Output, id, format string, a ...interface{}) { + Update(out, id, fmt.Sprintf(format, a...)) +} + +// Message is a convenience function to write a progress message to the channel. +func Message(out Output, id, message string) { + out.WriteProgress(Progress{ID: id, Message: message}) +} + +// Messagef is a convenience function to write a printf-formatted progress +// message to the channel. +func Messagef(out Output, id, format string, a ...interface{}) { + Message(out, id, fmt.Sprintf(format, a...)) +} + +// Aux sends auxiliary information over a progress interface, which will not be +// formatted for the UI. This is used for things such as push signing. +func Aux(out Output, a interface{}) { + out.WriteProgress(Progress{Aux: a}) +} diff --git a/vendor/github.com/docker/docker/pkg/progress/progressreader.go b/vendor/github.com/docker/docker/pkg/progress/progressreader.go new file mode 100644 index 000000000..07450a2d7 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/progress/progressreader.go @@ -0,0 +1,66 @@ +package progress // import "github.com/docker/docker/pkg/progress" + +import ( + "io" + "time" + + "golang.org/x/time/rate" +) + +// Reader is a Reader with progress bar. +type Reader struct { + in io.ReadCloser // Stream to read from + out Output // Where to send progress bar to + size int64 + current int64 + lastUpdate int64 + id string + action string + rateLimiter *rate.Limiter +} + +// NewProgressReader creates a new ProgressReader. +func NewProgressReader(in io.ReadCloser, out Output, size int64, id, action string) *Reader { + return &Reader{ + in: in, + out: out, + size: size, + id: id, + action: action, + rateLimiter: rate.NewLimiter(rate.Every(100*time.Millisecond), 1), + } +} + +func (p *Reader) Read(buf []byte) (n int, err error) { + read, err := p.in.Read(buf) + p.current += int64(read) + updateEvery := int64(1024 * 512) // 512kB + if p.size > 0 { + // Update progress for every 1% read if 1% < 512kB + if increment := int64(0.01 * float64(p.size)); increment < updateEvery { + updateEvery = increment + } + } + if p.current-p.lastUpdate > updateEvery || err != nil { + p.updateProgress(err != nil && read == 0) + p.lastUpdate = p.current + } + + return read, err +} + +// Close closes the progress reader and its underlying reader. +func (p *Reader) Close() error { + if p.current < p.size { + // print a full progress bar when closing prematurely + p.current = p.size + p.updateProgress(false) + } + return p.in.Close() +} + +func (p *Reader) updateProgress(last bool) { + if last || p.current == p.size || p.rateLimiter.Allow() { + p.out.WriteProgress(Progress{ID: p.id, Action: p.action, Current: p.current, Total: p.size, LastUpdate: last}) + } +} diff --git a/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go b/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go new file mode 100644 index 000000000..b0456e580 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go @@ -0,0 +1,159 @@ +// Package streamformatter provides helper functions to format a stream. +package streamformatter // import "github.com/docker/docker/pkg/streamformatter" + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/progress" +) + +const streamNewline = "\r\n" + +type jsonProgressFormatter struct{} + +func appendNewline(source []byte) []byte { + return append(source, []byte(streamNewline)...) +} + +// FormatStatus formats the specified objects according to the specified format (and id). +func FormatStatus(id, format string, a ...interface{}) []byte { + str := fmt.Sprintf(format, a...) + b, err := json.Marshal(&jsonmessage.JSONMessage{ID: id, Status: str}) + if err != nil { + return FormatError(err) + } + return appendNewline(b) +} + +// FormatError formats the error as a JSON object +func FormatError(err error) []byte { + jsonError, ok := err.(*jsonmessage.JSONError) + if !ok { + jsonError = &jsonmessage.JSONError{Message: err.Error()} + } + if b, err := json.Marshal(&jsonmessage.JSONMessage{Error: jsonError, ErrorMessage: err.Error()}); err == nil { + return appendNewline(b) + } + return []byte(`{"error":"format error"}` + streamNewline) +} + +func (sf *jsonProgressFormatter) formatStatus(id, format string, a ...interface{}) []byte { + return FormatStatus(id, format, a...) +} + +// formatProgress formats the progress information for a specified action. +func (sf *jsonProgressFormatter) formatProgress(id, action string, progress *jsonmessage.JSONProgress, aux interface{}) []byte { + if progress == nil { + progress = &jsonmessage.JSONProgress{} + } + var auxJSON *json.RawMessage + if aux != nil { + auxJSONBytes, err := json.Marshal(aux) + if err != nil { + return nil + } + auxJSON = new(json.RawMessage) + *auxJSON = auxJSONBytes + } + b, err := json.Marshal(&jsonmessage.JSONMessage{ + Status: action, + ProgressMessage: progress.String(), + Progress: progress, + ID: id, + Aux: auxJSON, + }) + if err != nil { + return nil + } + return appendNewline(b) +} + +type rawProgressFormatter struct{} + +func (sf *rawProgressFormatter) formatStatus(id, format string, a ...interface{}) []byte { + return []byte(fmt.Sprintf(format, a...) + streamNewline) +} + +func (sf *rawProgressFormatter) formatProgress(id, action string, progress *jsonmessage.JSONProgress, aux interface{}) []byte { + if progress == nil { + progress = &jsonmessage.JSONProgress{} + } + endl := "\r" + if progress.String() == "" { + endl += "\n" + } + return []byte(action + " " + progress.String() + endl) +} + +// NewProgressOutput returns a progress.Output object that can be passed to +// progress.NewProgressReader. +func NewProgressOutput(out io.Writer) progress.Output { + return &progressOutput{sf: &rawProgressFormatter{}, out: out, newLines: true} +} + +// NewJSONProgressOutput returns a progress.Output that formats output +// using JSON objects +func NewJSONProgressOutput(out io.Writer, newLines bool) progress.Output { + return &progressOutput{sf: &jsonProgressFormatter{}, out: out, newLines: newLines} +} + +type formatProgress interface { + formatStatus(id, format string, a ...interface{}) []byte + formatProgress(id, action string, progress *jsonmessage.JSONProgress, aux interface{}) []byte +} + +type progressOutput struct { + sf formatProgress + out io.Writer + newLines bool +} + +// WriteProgress formats progress information from a ProgressReader. +func (out *progressOutput) WriteProgress(prog progress.Progress) error { + var formatted []byte + if prog.Message != "" { + formatted = out.sf.formatStatus(prog.ID, prog.Message) + } else { + jsonProgress := jsonmessage.JSONProgress{Current: prog.Current, Total: prog.Total, HideCounts: prog.HideCounts, Units: prog.Units} + formatted = out.sf.formatProgress(prog.ID, prog.Action, &jsonProgress, prog.Aux) + } + _, err := out.out.Write(formatted) + if err != nil { + return err + } + + if out.newLines && prog.LastUpdate { + _, err = out.out.Write(out.sf.formatStatus("", "")) + return err + } + + return nil +} + +// AuxFormatter is a streamFormatter that writes aux progress messages +type AuxFormatter struct { + io.Writer +} + +// Emit emits the given interface as an aux progress message +func (sf *AuxFormatter) Emit(id string, aux interface{}) error { + auxJSONBytes, err := json.Marshal(aux) + if err != nil { + return err + } + auxJSON := new(json.RawMessage) + *auxJSON = auxJSONBytes + msgJSON, err := json.Marshal(&jsonmessage.JSONMessage{ID: id, Aux: auxJSON}) + if err != nil { + return err + } + msgJSON = appendNewline(msgJSON) + n, err := sf.Writer.Write(msgJSON) + if n != len(msgJSON) { + return io.ErrShortWrite + } + return err +} diff --git a/vendor/github.com/docker/docker/pkg/streamformatter/streamwriter.go b/vendor/github.com/docker/docker/pkg/streamformatter/streamwriter.go new file mode 100644 index 000000000..1473ed974 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/streamformatter/streamwriter.go @@ -0,0 +1,47 @@ +package streamformatter // import "github.com/docker/docker/pkg/streamformatter" + +import ( + "encoding/json" + "io" + + "github.com/docker/docker/pkg/jsonmessage" +) + +type streamWriter struct { + io.Writer + lineFormat func([]byte) string +} + +func (sw *streamWriter) Write(buf []byte) (int, error) { + formattedBuf := sw.format(buf) + n, err := sw.Writer.Write(formattedBuf) + if n != len(formattedBuf) { + return n, io.ErrShortWrite + } + return len(buf), err +} + +func (sw *streamWriter) format(buf []byte) []byte { + msg := &jsonmessage.JSONMessage{Stream: sw.lineFormat(buf)} + b, err := json.Marshal(msg) + if err != nil { + return FormatError(err) + } + return appendNewline(b) +} + +// NewStdoutWriter returns a writer which formats the output as json message +// representing stdout lines +func NewStdoutWriter(out io.Writer) io.Writer { + return &streamWriter{Writer: out, lineFormat: func(buf []byte) string { + return string(buf) + }} +} + +// NewStderrWriter returns a writer which formats the output as json message +// representing stderr lines +func NewStderrWriter(out io.Writer) io.Writer { + return &streamWriter{Writer: out, lineFormat: func(buf []byte) string { + return "\033[91m" + string(buf) + "\033[0m" + }} +} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_nowindows.go b/vendor/github.com/docker/docker/pkg/system/chtimes_nowindows.go index 84ae15705..92ff02097 100644 --- a/vendor/github.com/docker/docker/pkg/system/chtimes_nowindows.go +++ b/vendor/github.com/docker/docker/pkg/system/chtimes_nowindows.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package system // import "github.com/docker/docker/pkg/system" diff --git a/vendor/github.com/docker/docker/pkg/system/errors.go b/vendor/github.com/docker/docker/pkg/system/errors.go index 2573d7162..f4bbcce74 100644 --- a/vendor/github.com/docker/docker/pkg/system/errors.go +++ b/vendor/github.com/docker/docker/pkg/system/errors.go @@ -1,13 +1,6 @@ package system // import "github.com/docker/docker/pkg/system" -import ( - "errors" -) +import "errors" -var ( - // ErrNotSupportedPlatform means the platform is not supported. - ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") - - // ErrNotSupportedOperatingSystem means the operating system is not supported. - ErrNotSupportedOperatingSystem = errors.New("operating system is not supported") -) +// ErrNotSupportedPlatform means the platform is not supported. +var ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_unix.go b/vendor/github.com/docker/docker/pkg/system/filesys_unix.go index 380112940..f01f9385e 100644 --- a/vendor/github.com/docker/docker/pkg/system/filesys_unix.go +++ b/vendor/github.com/docker/docker/pkg/system/filesys_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package system // import "github.com/docker/docker/pkg/system" diff --git a/vendor/github.com/docker/docker/pkg/system/image_os.go b/vendor/github.com/docker/docker/pkg/system/image_os.go deleted file mode 100644 index e3de86be2..000000000 --- a/vendor/github.com/docker/docker/pkg/system/image_os.go +++ /dev/null @@ -1,10 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" -import ( - "runtime" - "strings" -) - -// IsOSSupported determines if an operating system is supported by the host. -func IsOSSupported(os string) bool { - return strings.EqualFold(runtime.GOOS, os) -} diff --git a/vendor/github.com/docker/docker/pkg/system/image_os_deprecated.go b/vendor/github.com/docker/docker/pkg/system/image_os_deprecated.go new file mode 100644 index 000000000..afb57dae6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/image_os_deprecated.go @@ -0,0 +1,19 @@ +package system + +import ( + "errors" + "runtime" + "strings" +) + +// ErrNotSupportedOperatingSystem means the operating system is not supported. +// +// Deprecated: use [github.com/docker/docker/image.CheckOS] and check the error returned. +var ErrNotSupportedOperatingSystem = errors.New("operating system is not supported") + +// IsOSSupported determines if an operating system is supported by the host. +// +// Deprecated: use [github.com/docker/docker/image.CheckOS] and check the error returned. +func IsOSSupported(os string) bool { + return strings.EqualFold(runtime.GOOS, os) +} diff --git a/vendor/github.com/docker/docker/pkg/system/init_windows.go b/vendor/github.com/docker/docker/pkg/system/init_windows.go index 3c2a43ddb..7603efbbd 100644 --- a/vendor/github.com/docker/docker/pkg/system/init_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/init_windows.go @@ -1,9 +1,7 @@ package system // import "github.com/docker/docker/pkg/system" -var ( - // containerdRuntimeSupported determines if containerd should be the runtime. - containerdRuntimeSupported = false -) +// containerdRuntimeSupported determines if containerd should be the runtime. +var containerdRuntimeSupported = false // InitContainerdRuntime sets whether to use containerd for runtime on Windows. func InitContainerdRuntime(cdPath string) { diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_unix.go b/vendor/github.com/docker/docker/pkg/system/lstat_unix.go index 654b9f2c9..5e29a6b3b 100644 --- a/vendor/github.com/docker/docker/pkg/system/lstat_unix.go +++ b/vendor/github.com/docker/docker/pkg/system/lstat_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package system // import "github.com/docker/docker/pkg/system" diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_deprecated.go b/vendor/github.com/docker/docker/pkg/system/meminfo_deprecated.go deleted file mode 100644 index 216519923..000000000 --- a/vendor/github.com/docker/docker/pkg/system/meminfo_deprecated.go +++ /dev/null @@ -1,16 +0,0 @@ -package system - -import "github.com/docker/docker/pkg/meminfo" - -// MemInfo contains memory statistics of the host system. -// -// Deprecated: use [meminfo.Memory]. -type MemInfo = meminfo.Memory - -// ReadMemInfo retrieves memory statistics of the host system and returns a -// MemInfo type. -// -// Deprecated: use [meminfo.Read]. -func ReadMemInfo() (*meminfo.Memory, error) { - return meminfo.Read() -} diff --git a/vendor/github.com/docker/docker/pkg/system/mknod.go b/vendor/github.com/docker/docker/pkg/system/mknod.go index d27152c0f..2a62237a4 100644 --- a/vendor/github.com/docker/docker/pkg/system/mknod.go +++ b/vendor/github.com/docker/docker/pkg/system/mknod.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package system // import "github.com/docker/docker/pkg/system" diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_freebsd.go b/vendor/github.com/docker/docker/pkg/system/mknod_freebsd.go index c890be116..e218e742d 100644 --- a/vendor/github.com/docker/docker/pkg/system/mknod_freebsd.go +++ b/vendor/github.com/docker/docker/pkg/system/mknod_freebsd.go @@ -1,5 +1,4 @@ //go:build freebsd -// +build freebsd package system // import "github.com/docker/docker/pkg/system" diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_unix.go b/vendor/github.com/docker/docker/pkg/system/mknod_unix.go index 4586aad19..34df0b923 100644 --- a/vendor/github.com/docker/docker/pkg/system/mknod_unix.go +++ b/vendor/github.com/docker/docker/pkg/system/mknod_unix.go @@ -1,5 +1,4 @@ //go:build !freebsd && !windows -// +build !freebsd,!windows package system // import "github.com/docker/docker/pkg/system" diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_windows.go b/vendor/github.com/docker/docker/pkg/system/mknod_windows.go deleted file mode 100644 index ec89d7a15..000000000 --- a/vendor/github.com/docker/docker/pkg/system/mknod_windows.go +++ /dev/null @@ -1,11 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -// Mknod is not implemented on Windows. -func Mknod(path string, mode uint32, dev int) error { - return ErrNotSupportedPlatform -} - -// Mkdev is not implemented on Windows. -func Mkdev(major int64, minor int64) uint32 { - panic("Mkdev not implemented on Windows.") -} diff --git a/vendor/github.com/docker/docker/pkg/system/path_deprecated.go b/vendor/github.com/docker/docker/pkg/system/path_deprecated.go deleted file mode 100644 index 5c95026c3..000000000 --- a/vendor/github.com/docker/docker/pkg/system/path_deprecated.go +++ /dev/null @@ -1,18 +0,0 @@ -package system - -const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - -// DefaultPathEnv is unix style list of directories to search for -// executables. Each directory is separated from the next by a colon -// ':' character . -// For Windows containers, an empty string is returned as the default -// path will be set by the container, and Docker has no context of what the -// default path should be. -// -// Deprecated: use oci.DefaultPathEnv -func DefaultPathEnv(os string) string { - if os == "windows" { - return "" - } - return defaultUnixPathEnv -} diff --git a/vendor/github.com/docker/docker/pkg/system/process_deprecated.go b/vendor/github.com/docker/docker/pkg/system/process_deprecated.go deleted file mode 100644 index 7b9f19acd..000000000 --- a/vendor/github.com/docker/docker/pkg/system/process_deprecated.go +++ /dev/null @@ -1,27 +0,0 @@ -//go:build linux || freebsd || darwin || windows -// +build linux freebsd darwin windows - -package system - -import "github.com/docker/docker/pkg/process" - -var ( - // IsProcessAlive returns true if process with a given pid is running. - // - // Deprecated: use [process.Alive]. - IsProcessAlive = process.Alive - - // IsProcessZombie return true if process has a state with "Z" - // - // Deprecated: use [process.Zombie]. - // - // TODO(thaJeztah): remove the Windows implementation in process once we remove this stub. - IsProcessZombie = process.Zombie -) - -// KillProcess force-stops a process. -// -// Deprecated: use [process.Kill]. -func KillProcess(pid int) { - _ = process.Kill(pid) -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_bsd.go b/vendor/github.com/docker/docker/pkg/system/stat_bsd.go index 8e61d820f..435b776ee 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_bsd.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_bsd.go @@ -1,5 +1,4 @@ //go:build freebsd || netbsd -// +build freebsd netbsd package system // import "github.com/docker/docker/pkg/system" @@ -7,10 +6,12 @@ import "syscall" // fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, + return &StatT{ + size: s.Size, mode: uint32(s.Mode), uid: s.Uid, gid: s.Gid, rdev: uint64(s.Rdev), - mtim: s.Mtimespec}, nil + mtim: s.Mtimespec, + }, nil } diff --git a/vendor/github.com/docker/docker/pkg/system/stat_darwin.go b/vendor/github.com/docker/docker/pkg/system/stat_darwin.go index c1c0ee9f3..e0b629df0 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_darwin.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_darwin.go @@ -4,10 +4,12 @@ import "syscall" // fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, + return &StatT{ + size: s.Size, mode: uint32(s.Mode), uid: s.Uid, gid: s.Gid, rdev: uint64(s.Rdev), - mtim: s.Mtimespec}, nil + mtim: s.Mtimespec, + }, nil } diff --git a/vendor/github.com/docker/docker/pkg/system/stat_linux.go b/vendor/github.com/docker/docker/pkg/system/stat_linux.go index 3ac02393f..4309d42b9 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_linux.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_linux.go @@ -4,13 +4,15 @@ import "syscall" // fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, + return &StatT{ + size: s.Size, mode: s.Mode, uid: s.Uid, gid: s.Gid, // the type is 32bit on mips rdev: uint64(s.Rdev), //nolint: unconvert - mtim: s.Mtim}, nil + mtim: s.Mtim, + }, nil } // FromStatT converts a syscall.Stat_t type to a system.Stat_t type diff --git a/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go b/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go index 756b92d1e..851374e5d 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go @@ -4,10 +4,12 @@ import "syscall" // fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, + return &StatT{ + size: s.Size, mode: uint32(s.Mode), uid: s.Uid, gid: s.Gid, rdev: uint64(s.Rdev), - mtim: s.Mtim}, nil + mtim: s.Mtim, + }, nil } diff --git a/vendor/github.com/docker/docker/pkg/system/stat_unix.go b/vendor/github.com/docker/docker/pkg/system/stat_unix.go index a45ffddf7..205e54677 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_unix.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package system // import "github.com/docker/docker/pkg/system" diff --git a/vendor/github.com/docker/docker/pkg/system/stat_windows.go b/vendor/github.com/docker/docker/pkg/system/stat_windows.go index 0ff3af2fa..10876cd73 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_windows.go @@ -45,5 +45,6 @@ func fromStatT(fi *os.FileInfo) (*StatT, error) { return &StatT{ size: (*fi).Size(), mode: (*fi).Mode(), - mtim: (*fi).ModTime()}, nil + mtim: (*fi).ModTime(), + }, nil } diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_unix.go b/vendor/github.com/docker/docker/pkg/system/utimes_unix.go index 2768750a0..f3a079f88 100644 --- a/vendor/github.com/docker/docker/pkg/system/utimes_unix.go +++ b/vendor/github.com/docker/docker/pkg/system/utimes_unix.go @@ -1,5 +1,4 @@ //go:build linux || freebsd -// +build linux freebsd package system // import "github.com/docker/docker/pkg/system" diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go b/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go index bfed4af03..7c19d5915 100644 --- a/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go +++ b/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux && !freebsd -// +build !linux,!freebsd package system // import "github.com/docker/docker/pkg/system" diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs.go b/vendor/github.com/docker/docker/pkg/system/xattrs.go new file mode 100644 index 000000000..b3f4e8a21 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/xattrs.go @@ -0,0 +1,18 @@ +package system // import "github.com/docker/docker/pkg/system" + +type XattrError struct { + Op string + Attr string + Path string + Err error +} + +func (e *XattrError) Error() string { return e.Op + " " + e.Attr + " " + e.Path + ": " + e.Err.Error() } + +func (e *XattrError) Unwrap() error { return e.Err } + +// Timeout reports whether this error represents a timeout. +func (e *XattrError) Timeout() bool { + t, ok := e.Err.(interface{ Timeout() bool }) + return ok && t.Timeout() +} diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go index 95b609fe7..facfbb312 100644 --- a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go +++ b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go @@ -1,11 +1,17 @@ package system // import "github.com/docker/docker/pkg/system" -import "golang.org/x/sys/unix" +import ( + "golang.org/x/sys/unix" +) // Lgetxattr retrieves the value of the extended attribute identified by attr // and associated with the given path in the file system. // It will returns a nil slice and nil error if the xattr is not set. func Lgetxattr(path string, attr string) ([]byte, error) { + sysErr := func(err error) ([]byte, error) { + return nil, &XattrError{Op: "lgetxattr", Attr: attr, Path: path, Err: err} + } + // Start with a 128 length byte array dest := make([]byte, 128) sz, errno := unix.Lgetxattr(path, attr, dest) @@ -14,7 +20,7 @@ func Lgetxattr(path string, attr string) ([]byte, error) { // Buffer too small, use zero-sized buffer to get the actual size sz, errno = unix.Lgetxattr(path, attr, []byte{}) if errno != nil { - return nil, errno + return sysErr(errno) } dest = make([]byte, sz) sz, errno = unix.Lgetxattr(path, attr, dest) @@ -24,7 +30,7 @@ func Lgetxattr(path string, attr string) ([]byte, error) { case errno == unix.ENODATA: return nil, nil case errno != nil: - return nil, errno + return sysErr(errno) } return dest[:sz], nil @@ -33,5 +39,9 @@ func Lgetxattr(path string, attr string) ([]byte, error) { // Lsetxattr sets the value of the extended attribute identified by attr // and associated with the given path in the file system. func Lsetxattr(path string, attr string, data []byte, flags int) error { - return unix.Lsetxattr(path, attr, data, flags) + err := unix.Lsetxattr(path, attr, data, flags) + if err != nil { + return &XattrError{Op: "lsetxattr", Attr: attr, Path: path, Err: err} + } + return nil } diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go index b165a5dbf..2a3698f12 100644 --- a/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go +++ b/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package system // import "github.com/docker/docker/pkg/system" diff --git a/vendor/github.com/docker/go-connections/nat/nat.go b/vendor/github.com/docker/go-connections/nat/nat.go index 296c96a63..4049d780c 100644 --- a/vendor/github.com/docker/go-connections/nat/nat.go +++ b/vendor/github.com/docker/go-connections/nat/nat.go @@ -177,27 +177,27 @@ func ParsePortSpec(rawPort string) ([]PortMapping, error) { // Strip [] from IPV6 addresses rawIP, _, err := net.SplitHostPort(ip + ":") if err != nil { - return nil, fmt.Errorf("Invalid ip address %v: %s", ip, err) + return nil, fmt.Errorf("invalid IP address %v: %w", ip, err) } ip = rawIP } if ip != "" && net.ParseIP(ip) == nil { - return nil, fmt.Errorf("Invalid ip address: %s", ip) + return nil, fmt.Errorf("invalid IP address: %s", ip) } if containerPort == "" { - return nil, fmt.Errorf("No port specified: %s", rawPort) + return nil, fmt.Errorf("no port specified: %s", rawPort) } startPort, endPort, err := ParsePortRange(containerPort) if err != nil { - return nil, fmt.Errorf("Invalid containerPort: %s", containerPort) + return nil, fmt.Errorf("invalid containerPort: %s", containerPort) } var startHostPort, endHostPort uint64 = 0, 0 if len(hostPort) > 0 { startHostPort, endHostPort, err = ParsePortRange(hostPort) if err != nil { - return nil, fmt.Errorf("Invalid hostPort: %s", hostPort) + return nil, fmt.Errorf("invalid hostPort: %s", hostPort) } } @@ -206,12 +206,12 @@ func ParsePortSpec(rawPort string) ([]PortMapping, error) { // In this case, use the host port range as the dynamic // host port range to allocate into. if endPort != startPort { - return nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort) + return nil, fmt.Errorf("invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort) } } if !validateProto(strings.ToLower(proto)) { - return nil, fmt.Errorf("Invalid proto: %s", proto) + return nil, fmt.Errorf("invalid proto: %s", proto) } ports := []PortMapping{} diff --git a/vendor/github.com/docker/go-connections/nat/parse.go b/vendor/github.com/docker/go-connections/nat/parse.go index 892adf8c6..e4b53e8a3 100644 --- a/vendor/github.com/docker/go-connections/nat/parse.go +++ b/vendor/github.com/docker/go-connections/nat/parse.go @@ -6,34 +6,10 @@ import ( "strings" ) -// PartParser parses and validates the specified string (data) using the specified template -// e.g. ip:public:private -> 192.168.0.1:80:8000 -// DEPRECATED: do not use, this function may be removed in a future version -func PartParser(template, data string) (map[string]string, error) { - // ip:public:private - var ( - templateParts = strings.Split(template, ":") - parts = strings.Split(data, ":") - out = make(map[string]string, len(templateParts)) - ) - if len(parts) != len(templateParts) { - return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) - } - - for i, t := range templateParts { - value := "" - if len(parts) > i { - value = parts[i] - } - out[t] = value - } - return out, nil -} - // ParsePortRange parses and validates the specified string as a port-range (8000-9000) func ParsePortRange(ports string) (uint64, uint64, error) { if ports == "" { - return 0, 0, fmt.Errorf("Empty string specified for ports.") + return 0, 0, fmt.Errorf("empty string specified for ports") } if !strings.Contains(ports, "-") { start, err := strconv.ParseUint(ports, 10, 16) @@ -51,7 +27,7 @@ func ParsePortRange(ports string) (uint64, uint64, error) { return 0, 0, err } if end < start { - return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports) + return 0, 0, fmt.Errorf("invalid range specified for port: %s", ports) } return start, end, nil } diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go index 5b65c546a..78a34a980 100644 --- a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go +++ b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go @@ -1,4 +1,4 @@ -// +build !windows +//go:build !windows package sockets @@ -15,7 +15,7 @@ const maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path) func configureUnixTransport(tr *http.Transport, proto, addr string) error { if len(addr) > maxUnixSocketPathSize { - return fmt.Errorf("Unix socket path %q is too long", addr) + return fmt.Errorf("unix socket path %q is too long", addr) } // No need for compression in local communications. tr.DisableCompression = true diff --git a/vendor/github.com/docker/go-connections/sockets/unix_socket.go b/vendor/github.com/docker/go-connections/sockets/unix_socket.go index e7591e6ed..b9233521e 100644 --- a/vendor/github.com/docker/go-connections/sockets/unix_socket.go +++ b/vendor/github.com/docker/go-connections/sockets/unix_socket.go @@ -1,9 +1,9 @@ -// +build !windows +//go:build !windows /* Package sockets is a simple unix domain socket wrapper. -Usage +# Usage For example: @@ -42,7 +42,7 @@ For example: if _, err := conn.Read(buf); err != nil { panic(err) } else if string(buf) != echoStr { - panic(fmt.Errorf("Msg may lost")) + panic(fmt.Errorf("msg may lost")) } } */ @@ -103,7 +103,7 @@ func NewUnixSocketWithOpts(path string, opts ...SockOption) (net.Listener, error // We don't use "defer" here, to reset the umask to its original value as soon // as possible. Ideally we'd be able to detect if WithChmod() was passed as // an option, and skip changing umask if default permissions are used. - origUmask := syscall.Umask(0777) + origUmask := syscall.Umask(0o777) l, err := net.Listen("unix", path) syscall.Umask(origUmask) if err != nil { @@ -122,5 +122,5 @@ func NewUnixSocketWithOpts(path string, opts ...SockOption) (net.Listener, error // NewUnixSocket creates a unix socket with the specified path and group. func NewUnixSocket(path string, gid int) (net.Listener, error) { - return NewUnixSocketWithOpts(path, WithChown(0, gid), WithChmod(0660)) + return NewUnixSocketWithOpts(path, WithChown(0, gid), WithChmod(0o660)) } diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool.go similarity index 95% rename from vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go rename to vendor/github.com/docker/go-connections/tlsconfig/certpool.go index 1ca0965e0..f84c624ba 100644 --- a/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go +++ b/vendor/github.com/docker/go-connections/tlsconfig/certpool.go @@ -1,5 +1,3 @@ -// +build go1.7 - package tlsconfig import ( diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go deleted file mode 100644 index 1ff81c333..000000000 --- a/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !go1.7 - -package tlsconfig - -import ( - "crypto/x509" -) - -// SystemCertPool returns an new empty cert pool, -// accessing system cert pool is supported in go 1.7 -func SystemCertPool() (*x509.CertPool, error) { - return x509.NewCertPool(), nil -} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config.go b/vendor/github.com/docker/go-connections/tlsconfig/config.go index 992968373..606c98a38 100644 --- a/vendor/github.com/docker/go-connections/tlsconfig/config.go +++ b/vendor/github.com/docker/go-connections/tlsconfig/config.go @@ -1,6 +1,7 @@ // Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. // // As a reminder from https://golang.org/pkg/crypto/tls/#Config: +// // A Config structure is used to configure a TLS client or server. After one has been passed to a TLS function it must not be modified. // A Config may be reused; the tls package will also not modify it. package tlsconfig @@ -9,11 +10,9 @@ import ( "crypto/tls" "crypto/x509" "encoding/pem" + "errors" "fmt" - "io/ioutil" "os" - - "github.com/pkg/errors" ) // Options represents the information needed to create client and server TLS configurations. @@ -36,7 +35,12 @@ type Options struct { ExclusiveRootPools bool MinVersion uint16 // If Passphrase is set, it will be used to decrypt a TLS private key - // if the key is encrypted + // if the key is encrypted. + // + // Deprecated: Use of encrypted TLS private keys has been deprecated, and + // will be removed in a future release. Golang has deprecated support for + // legacy PEM encryption (as specified in RFC 1423), as it is insecure by + // design (see https://go-review.googlesource.com/c/go/+/264159). Passphrase string } @@ -99,7 +103,7 @@ func certPool(caFile string, exclusivePool bool) (*x509.CertPool, error) { return nil, fmt.Errorf("failed to read system certificates: %v", err) } } - pemData, err := ioutil.ReadFile(caFile) + pemData, err := os.ReadFile(caFile) if err != nil { return nil, fmt.Errorf("could not read CA certificate %q: %v", caFile, err) } @@ -109,6 +113,15 @@ func certPool(caFile string, exclusivePool bool) (*x509.CertPool, error) { return certPool, nil } +// allTLSVersions lists all the TLS versions and is used by the code that validates +// a uint16 value as a TLS version. +var allTLSVersions = map[uint16]struct{}{ + tls.VersionTLS10: {}, + tls.VersionTLS11: {}, + tls.VersionTLS12: {}, + tls.VersionTLS13: {}, +} + // isValidMinVersion checks that the input value is a valid tls minimum version func isValidMinVersion(version uint16) bool { _, ok := allTLSVersions[version] @@ -120,10 +133,10 @@ func isValidMinVersion(version uint16) bool { func adjustMinVersion(options Options, config *tls.Config) error { if options.MinVersion > 0 { if !isValidMinVersion(options.MinVersion) { - return fmt.Errorf("Invalid minimum TLS version: %x", options.MinVersion) + return fmt.Errorf("invalid minimum TLS version: %x", options.MinVersion) } if options.MinVersion < config.MinVersion { - return fmt.Errorf("Requested minimum TLS version is too low. Should be at-least: %x", config.MinVersion) + return fmt.Errorf("requested minimum TLS version is too low. Should be at-least: %x", config.MinVersion) } config.MinVersion = options.MinVersion } @@ -132,9 +145,14 @@ func adjustMinVersion(options Options, config *tls.Config) error { } // IsErrEncryptedKey returns true if the 'err' is an error of incorrect -// password when trying to decrypt a TLS private key +// password when trying to decrypt a TLS private key. +// +// Deprecated: Use of encrypted TLS private keys has been deprecated, and +// will be removed in a future release. Golang has deprecated support for +// legacy PEM encryption (as specified in RFC 1423), as it is insecure by +// design (see https://go-review.googlesource.com/c/go/+/264159). func IsErrEncryptedKey(err error) bool { - return errors.Cause(err) == x509.IncorrectPasswordError + return errors.Is(err, x509.IncorrectPasswordError) } // getPrivateKey returns the private key in 'keyBytes', in PEM-encoded format. @@ -151,7 +169,7 @@ func getPrivateKey(keyBytes []byte, passphrase string) ([]byte, error) { if x509.IsEncryptedPEMBlock(pemBlock) { //nolint:staticcheck // Ignore SA1019 (IsEncryptedPEMBlock is deprecated) keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(passphrase)) //nolint:staticcheck // Ignore SA1019 (DecryptPEMBlock is deprecated) if err != nil { - return nil, errors.Wrap(err, "private key is encrypted, but could not decrypt it") + return nil, fmt.Errorf("private key is encrypted, but could not decrypt it: %w", err) } keyBytes = pem.EncodeToMemory(&pem.Block{Type: pemBlock.Type, Bytes: keyBytes}) } @@ -167,26 +185,24 @@ func getCert(options Options) ([]tls.Certificate, error) { return nil, nil } - errMessage := "Could not load X509 key pair" - - cert, err := ioutil.ReadFile(options.CertFile) + cert, err := os.ReadFile(options.CertFile) if err != nil { - return nil, errors.Wrap(err, errMessage) + return nil, err } - prKeyBytes, err := ioutil.ReadFile(options.KeyFile) + prKeyBytes, err := os.ReadFile(options.KeyFile) if err != nil { - return nil, errors.Wrap(err, errMessage) + return nil, err } prKeyBytes, err = getPrivateKey(prKeyBytes, options.Passphrase) if err != nil { - return nil, errors.Wrap(err, errMessage) + return nil, err } tlsCert, err := tls.X509KeyPair(cert, prKeyBytes) if err != nil { - return nil, errors.Wrap(err, errMessage) + return nil, err } return []tls.Certificate{tlsCert}, nil @@ -206,7 +222,7 @@ func Client(options Options) (*tls.Config, error) { tlsCerts, err := getCert(options) if err != nil { - return nil, err + return nil, fmt.Errorf("could not load X509 key pair: %w", err) } tlsConfig.Certificates = tlsCerts @@ -224,9 +240,9 @@ func Server(options Options) (*tls.Config, error) { tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) if err != nil { if os.IsNotExist(err) { - return nil, fmt.Errorf("Could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err) + return nil, fmt.Errorf("could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err) } - return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err) + return nil, fmt.Errorf("error reading X509 key pair - make sure the key is not encrypted (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err) } tlsConfig.Certificates = []tls.Certificate{tlsCert} if options.ClientAuth >= tls.VerifyClientCertIfGiven && options.CAFile != "" { diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go index 6b4c6a7c0..a82f9fa52 100644 --- a/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go +++ b/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go @@ -1,7 +1,4 @@ -// +build go1.5 - // Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. -// package tlsconfig import ( diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go deleted file mode 100644 index ee22df47c..000000000 --- a/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !go1.5 - -// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. -// -package tlsconfig - -import ( - "crypto/tls" -) - -// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) -var clientCipherSuites = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, -} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/versions_go113.go b/vendor/github.com/docker/go-connections/tlsconfig/versions_go113.go deleted file mode 100644 index d8215f8e7..000000000 --- a/vendor/github.com/docker/go-connections/tlsconfig/versions_go113.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build go1.13 - -package tlsconfig - -import ( - "crypto/tls" -) - -// allTLSVersions lists all the TLS versions and is used by the code that validates -// a uint16 value as a TLS version. -var allTLSVersions = map[uint16]struct{}{ - tls.VersionTLS10: {}, - tls.VersionTLS11: {}, - tls.VersionTLS12: {}, - tls.VersionTLS13: {}, -} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/versions_other.go b/vendor/github.com/docker/go-connections/tlsconfig/versions_other.go deleted file mode 100644 index a5ba7f4a3..000000000 --- a/vendor/github.com/docker/go-connections/tlsconfig/versions_other.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !go1.13 - -package tlsconfig - -import ( - "crypto/tls" -) - -// allTLSVersions lists all the TLS versions and is used by the code that validates -// a uint16 value as a TLS version. -var allTLSVersions = map[uint16]struct{}{ - tls.VersionTLS10: {}, - tls.VersionTLS11: {}, - tls.VersionTLS12: {}, -} diff --git a/vendor/github.com/felixge/httpsnoop/.gitignore b/vendor/github.com/felixge/httpsnoop/.gitignore new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/felixge/httpsnoop/LICENSE.txt b/vendor/github.com/felixge/httpsnoop/LICENSE.txt new file mode 100644 index 000000000..e028b46a9 --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2016 Felix Geisendörfer (felix@debuggable.com) + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. diff --git a/vendor/github.com/felixge/httpsnoop/Makefile b/vendor/github.com/felixge/httpsnoop/Makefile new file mode 100644 index 000000000..4e12afdd9 --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/Makefile @@ -0,0 +1,10 @@ +.PHONY: ci generate clean + +ci: clean generate + go test -race -v ./... + +generate: + go generate . + +clean: + rm -rf *_generated*.go diff --git a/vendor/github.com/felixge/httpsnoop/README.md b/vendor/github.com/felixge/httpsnoop/README.md new file mode 100644 index 000000000..cf6b42f3d --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/README.md @@ -0,0 +1,95 @@ +# httpsnoop + +Package httpsnoop provides an easy way to capture http related metrics (i.e. +response time, bytes written, and http status code) from your application's +http.Handlers. + +Doing this requires non-trivial wrapping of the http.ResponseWriter interface, +which is also exposed for users interested in a more low-level API. + +[![Go Reference](https://pkg.go.dev/badge/github.com/felixge/httpsnoop.svg)](https://pkg.go.dev/github.com/felixge/httpsnoop) +[![Build Status](https://github.com/felixge/httpsnoop/actions/workflows/main.yaml/badge.svg)](https://github.com/felixge/httpsnoop/actions/workflows/main.yaml) + +## Usage Example + +```go +// myH is your app's http handler, perhaps a http.ServeMux or similar. +var myH http.Handler +// wrappedH wraps myH in order to log every request. +wrappedH := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + m := httpsnoop.CaptureMetrics(myH, w, r) + log.Printf( + "%s %s (code=%d dt=%s written=%d)", + r.Method, + r.URL, + m.Code, + m.Duration, + m.Written, + ) +}) +http.ListenAndServe(":8080", wrappedH) +``` + +## Why this package exists + +Instrumenting an application's http.Handler is surprisingly difficult. + +However if you google for e.g. "capture ResponseWriter status code" you'll find +lots of advise and code examples that suggest it to be a fairly trivial +undertaking. Unfortunately everything I've seen so far has a high chance of +breaking your application. + +The main problem is that a `http.ResponseWriter` often implements additional +interfaces such as `http.Flusher`, `http.CloseNotifier`, `http.Hijacker`, `http.Pusher`, and +`io.ReaderFrom`. So the naive approach of just wrapping `http.ResponseWriter` +in your own struct that also implements the `http.ResponseWriter` interface +will hide the additional interfaces mentioned above. This has a high change of +introducing subtle bugs into any non-trivial application. + +Another approach I've seen people take is to return a struct that implements +all of the interfaces above. However, that's also problematic, because it's +difficult to fake some of these interfaces behaviors when the underlying +`http.ResponseWriter` doesn't have an implementation. It's also dangerous, +because an application may choose to operate differently, merely because it +detects the presence of these additional interfaces. + +This package solves this problem by checking which additional interfaces a +`http.ResponseWriter` implements, returning a wrapped version implementing the +exact same set of interfaces. + +Additionally this package properly handles edge cases such as `WriteHeader` not +being called, or called more than once, as well as concurrent calls to +`http.ResponseWriter` methods, and even calls happening after the wrapped +`ServeHTTP` has already returned. + +Unfortunately this package is not perfect either. It's possible that it is +still missing some interfaces provided by the go core (let me know if you find +one), and it won't work for applications adding their own interfaces into the +mix. You can however use `httpsnoop.Unwrap(w)` to access the underlying +`http.ResponseWriter` and type-assert the result to its other interfaces. + +However, hopefully the explanation above has sufficiently scared you of rolling +your own solution to this problem. httpsnoop may still break your application, +but at least it tries to avoid it as much as possible. + +Anyway, the real problem here is that smuggling additional interfaces inside +`http.ResponseWriter` is a problematic design choice, but it probably goes as +deep as the Go language specification itself. But that's okay, I still prefer +Go over the alternatives ;). + +## Performance + +``` +BenchmarkBaseline-8 20000 94912 ns/op +BenchmarkCaptureMetrics-8 20000 95461 ns/op +``` + +As you can see, using `CaptureMetrics` on a vanilla http.Handler introduces an +overhead of ~500 ns per http request on my machine. However, the margin of +error appears to be larger than that, therefor it should be reasonable to +assume that the overhead introduced by `CaptureMetrics` is absolutely +negligible. + +## License + +MIT diff --git a/vendor/github.com/felixge/httpsnoop/capture_metrics.go b/vendor/github.com/felixge/httpsnoop/capture_metrics.go new file mode 100644 index 000000000..bec7b71b3 --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/capture_metrics.go @@ -0,0 +1,86 @@ +package httpsnoop + +import ( + "io" + "net/http" + "time" +) + +// Metrics holds metrics captured from CaptureMetrics. +type Metrics struct { + // Code is the first http response code passed to the WriteHeader func of + // the ResponseWriter. If no such call is made, a default code of 200 is + // assumed instead. + Code int + // Duration is the time it took to execute the handler. + Duration time.Duration + // Written is the number of bytes successfully written by the Write or + // ReadFrom function of the ResponseWriter. ResponseWriters may also write + // data to their underlaying connection directly (e.g. headers), but those + // are not tracked. Therefor the number of Written bytes will usually match + // the size of the response body. + Written int64 +} + +// CaptureMetrics wraps the given hnd, executes it with the given w and r, and +// returns the metrics it captured from it. +func CaptureMetrics(hnd http.Handler, w http.ResponseWriter, r *http.Request) Metrics { + return CaptureMetricsFn(w, func(ww http.ResponseWriter) { + hnd.ServeHTTP(ww, r) + }) +} + +// CaptureMetricsFn wraps w and calls fn with the wrapped w and returns the +// resulting metrics. This is very similar to CaptureMetrics (which is just +// sugar on top of this func), but is a more usable interface if your +// application doesn't use the Go http.Handler interface. +func CaptureMetricsFn(w http.ResponseWriter, fn func(http.ResponseWriter)) Metrics { + m := Metrics{Code: http.StatusOK} + m.CaptureMetrics(w, fn) + return m +} + +// CaptureMetrics wraps w and calls fn with the wrapped w and updates +// Metrics m with the resulting metrics. This is similar to CaptureMetricsFn, +// but allows one to customize starting Metrics object. +func (m *Metrics) CaptureMetrics(w http.ResponseWriter, fn func(http.ResponseWriter)) { + var ( + start = time.Now() + headerWritten bool + hooks = Hooks{ + WriteHeader: func(next WriteHeaderFunc) WriteHeaderFunc { + return func(code int) { + next(code) + + if !(code >= 100 && code <= 199) && !headerWritten { + m.Code = code + headerWritten = true + } + } + }, + + Write: func(next WriteFunc) WriteFunc { + return func(p []byte) (int, error) { + n, err := next(p) + + m.Written += int64(n) + headerWritten = true + return n, err + } + }, + + ReadFrom: func(next ReadFromFunc) ReadFromFunc { + return func(src io.Reader) (int64, error) { + n, err := next(src) + + headerWritten = true + m.Written += n + return n, err + } + }, + } + ) + + fn(Wrap(w, hooks)) + m.Duration += time.Since(start) +} diff --git a/vendor/github.com/felixge/httpsnoop/docs.go b/vendor/github.com/felixge/httpsnoop/docs.go new file mode 100644 index 000000000..203c35b3c --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/docs.go @@ -0,0 +1,10 @@ +// Package httpsnoop provides an easy way to capture http related metrics (i.e. +// response time, bytes written, and http status code) from your application's +// http.Handlers. +// +// Doing this requires non-trivial wrapping of the http.ResponseWriter +// interface, which is also exposed for users interested in a more low-level +// API. +package httpsnoop + +//go:generate go run codegen/main.go diff --git a/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go b/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go new file mode 100644 index 000000000..101cedde6 --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go @@ -0,0 +1,436 @@ +// +build go1.8 +// Code generated by "httpsnoop/codegen"; DO NOT EDIT. + +package httpsnoop + +import ( + "bufio" + "io" + "net" + "net/http" +) + +// HeaderFunc is part of the http.ResponseWriter interface. +type HeaderFunc func() http.Header + +// WriteHeaderFunc is part of the http.ResponseWriter interface. +type WriteHeaderFunc func(code int) + +// WriteFunc is part of the http.ResponseWriter interface. +type WriteFunc func(b []byte) (int, error) + +// FlushFunc is part of the http.Flusher interface. +type FlushFunc func() + +// CloseNotifyFunc is part of the http.CloseNotifier interface. +type CloseNotifyFunc func() <-chan bool + +// HijackFunc is part of the http.Hijacker interface. +type HijackFunc func() (net.Conn, *bufio.ReadWriter, error) + +// ReadFromFunc is part of the io.ReaderFrom interface. +type ReadFromFunc func(src io.Reader) (int64, error) + +// PushFunc is part of the http.Pusher interface. +type PushFunc func(target string, opts *http.PushOptions) error + +// Hooks defines a set of method interceptors for methods included in +// http.ResponseWriter as well as some others. You can think of them as +// middleware for the function calls they target. See Wrap for more details. +type Hooks struct { + Header func(HeaderFunc) HeaderFunc + WriteHeader func(WriteHeaderFunc) WriteHeaderFunc + Write func(WriteFunc) WriteFunc + Flush func(FlushFunc) FlushFunc + CloseNotify func(CloseNotifyFunc) CloseNotifyFunc + Hijack func(HijackFunc) HijackFunc + ReadFrom func(ReadFromFunc) ReadFromFunc + Push func(PushFunc) PushFunc +} + +// Wrap returns a wrapped version of w that provides the exact same interface +// as w. Specifically if w implements any combination of: +// +// - http.Flusher +// - http.CloseNotifier +// - http.Hijacker +// - io.ReaderFrom +// - http.Pusher +// +// The wrapped version will implement the exact same combination. If no hooks +// are set, the wrapped version also behaves exactly as w. Hooks targeting +// methods not supported by w are ignored. Any other hooks will intercept the +// method they target and may modify the call's arguments and/or return values. +// The CaptureMetrics implementation serves as a working example for how the +// hooks can be used. +func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter { + rw := &rw{w: w, h: hooks} + _, i0 := w.(http.Flusher) + _, i1 := w.(http.CloseNotifier) + _, i2 := w.(http.Hijacker) + _, i3 := w.(io.ReaderFrom) + _, i4 := w.(http.Pusher) + switch { + // combination 1/32 + case !i0 && !i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + }{rw, rw} + // combination 2/32 + case !i0 && !i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Pusher + }{rw, rw, rw} + // combination 3/32 + case !i0 && !i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + io.ReaderFrom + }{rw, rw, rw} + // combination 4/32 + case !i0 && !i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw} + // combination 5/32 + case !i0 && !i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + }{rw, rw, rw} + // combination 6/32 + case !i0 && !i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + http.Pusher + }{rw, rw, rw, rw} + // combination 7/32 + case !i0 && !i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 8/32 + case !i0 && !i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 9/32 + case !i0 && i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + }{rw, rw, rw} + // combination 10/32 + case !i0 && i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Pusher + }{rw, rw, rw, rw} + // combination 11/32 + case !i0 && i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 12/32 + case !i0 && i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 13/32 + case !i0 && i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw} + // combination 14/32 + case !i0 && i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 15/32 + case !i0 && i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 16/32 + case !i0 && i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 17/32 + case i0 && !i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + }{rw, rw, rw} + // combination 18/32 + case i0 && !i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Pusher + }{rw, rw, rw, rw} + // combination 19/32 + case i0 && !i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 20/32 + case i0 && !i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 21/32 + case i0 && !i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + }{rw, rw, rw, rw} + // combination 22/32 + case i0 && !i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 23/32 + case i0 && !i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 24/32 + case i0 && !i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 25/32 + case i0 && i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + }{rw, rw, rw, rw} + // combination 26/32 + case i0 && i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 27/32 + case i0 && i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 28/32 + case i0 && i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 29/32 + case i0 && i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw, rw} + // combination 30/32 + case i0 && i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 31/32 + case i0 && i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw, rw} + // combination 32/32 + case i0 && i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw, rw} + } + panic("unreachable") +} + +type rw struct { + w http.ResponseWriter + h Hooks +} + +func (w *rw) Unwrap() http.ResponseWriter { + return w.w +} + +func (w *rw) Header() http.Header { + f := w.w.(http.ResponseWriter).Header + if w.h.Header != nil { + f = w.h.Header(f) + } + return f() +} + +func (w *rw) WriteHeader(code int) { + f := w.w.(http.ResponseWriter).WriteHeader + if w.h.WriteHeader != nil { + f = w.h.WriteHeader(f) + } + f(code) +} + +func (w *rw) Write(b []byte) (int, error) { + f := w.w.(http.ResponseWriter).Write + if w.h.Write != nil { + f = w.h.Write(f) + } + return f(b) +} + +func (w *rw) Flush() { + f := w.w.(http.Flusher).Flush + if w.h.Flush != nil { + f = w.h.Flush(f) + } + f() +} + +func (w *rw) CloseNotify() <-chan bool { + f := w.w.(http.CloseNotifier).CloseNotify + if w.h.CloseNotify != nil { + f = w.h.CloseNotify(f) + } + return f() +} + +func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) { + f := w.w.(http.Hijacker).Hijack + if w.h.Hijack != nil { + f = w.h.Hijack(f) + } + return f() +} + +func (w *rw) ReadFrom(src io.Reader) (int64, error) { + f := w.w.(io.ReaderFrom).ReadFrom + if w.h.ReadFrom != nil { + f = w.h.ReadFrom(f) + } + return f(src) +} + +func (w *rw) Push(target string, opts *http.PushOptions) error { + f := w.w.(http.Pusher).Push + if w.h.Push != nil { + f = w.h.Push(f) + } + return f(target, opts) +} + +type Unwrapper interface { + Unwrap() http.ResponseWriter +} + +// Unwrap returns the underlying http.ResponseWriter from within zero or more +// layers of httpsnoop wrappers. +func Unwrap(w http.ResponseWriter) http.ResponseWriter { + if rw, ok := w.(Unwrapper); ok { + // recurse until rw.Unwrap() returns a non-Unwrapper + return Unwrap(rw.Unwrap()) + } else { + return w + } +} diff --git a/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go b/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go new file mode 100644 index 000000000..e0951df15 --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go @@ -0,0 +1,278 @@ +// +build !go1.8 +// Code generated by "httpsnoop/codegen"; DO NOT EDIT. + +package httpsnoop + +import ( + "bufio" + "io" + "net" + "net/http" +) + +// HeaderFunc is part of the http.ResponseWriter interface. +type HeaderFunc func() http.Header + +// WriteHeaderFunc is part of the http.ResponseWriter interface. +type WriteHeaderFunc func(code int) + +// WriteFunc is part of the http.ResponseWriter interface. +type WriteFunc func(b []byte) (int, error) + +// FlushFunc is part of the http.Flusher interface. +type FlushFunc func() + +// CloseNotifyFunc is part of the http.CloseNotifier interface. +type CloseNotifyFunc func() <-chan bool + +// HijackFunc is part of the http.Hijacker interface. +type HijackFunc func() (net.Conn, *bufio.ReadWriter, error) + +// ReadFromFunc is part of the io.ReaderFrom interface. +type ReadFromFunc func(src io.Reader) (int64, error) + +// Hooks defines a set of method interceptors for methods included in +// http.ResponseWriter as well as some others. You can think of them as +// middleware for the function calls they target. See Wrap for more details. +type Hooks struct { + Header func(HeaderFunc) HeaderFunc + WriteHeader func(WriteHeaderFunc) WriteHeaderFunc + Write func(WriteFunc) WriteFunc + Flush func(FlushFunc) FlushFunc + CloseNotify func(CloseNotifyFunc) CloseNotifyFunc + Hijack func(HijackFunc) HijackFunc + ReadFrom func(ReadFromFunc) ReadFromFunc +} + +// Wrap returns a wrapped version of w that provides the exact same interface +// as w. Specifically if w implements any combination of: +// +// - http.Flusher +// - http.CloseNotifier +// - http.Hijacker +// - io.ReaderFrom +// +// The wrapped version will implement the exact same combination. If no hooks +// are set, the wrapped version also behaves exactly as w. Hooks targeting +// methods not supported by w are ignored. Any other hooks will intercept the +// method they target and may modify the call's arguments and/or return values. +// The CaptureMetrics implementation serves as a working example for how the +// hooks can be used. +func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter { + rw := &rw{w: w, h: hooks} + _, i0 := w.(http.Flusher) + _, i1 := w.(http.CloseNotifier) + _, i2 := w.(http.Hijacker) + _, i3 := w.(io.ReaderFrom) + switch { + // combination 1/16 + case !i0 && !i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + }{rw, rw} + // combination 2/16 + case !i0 && !i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + io.ReaderFrom + }{rw, rw, rw} + // combination 3/16 + case !i0 && !i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + }{rw, rw, rw} + // combination 4/16 + case !i0 && !i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 5/16 + case !i0 && i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + }{rw, rw, rw} + // combination 6/16 + case !i0 && i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 7/16 + case !i0 && i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw} + // combination 8/16 + case !i0 && i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 9/16 + case i0 && !i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + }{rw, rw, rw} + // combination 10/16 + case i0 && !i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 11/16 + case i0 && !i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + }{rw, rw, rw, rw} + // combination 12/16 + case i0 && !i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 13/16 + case i0 && i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + }{rw, rw, rw, rw} + // combination 14/16 + case i0 && i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 15/16 + case i0 && i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw, rw} + // combination 16/16 + case i0 && i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw, rw} + } + panic("unreachable") +} + +type rw struct { + w http.ResponseWriter + h Hooks +} + +func (w *rw) Unwrap() http.ResponseWriter { + return w.w +} + +func (w *rw) Header() http.Header { + f := w.w.(http.ResponseWriter).Header + if w.h.Header != nil { + f = w.h.Header(f) + } + return f() +} + +func (w *rw) WriteHeader(code int) { + f := w.w.(http.ResponseWriter).WriteHeader + if w.h.WriteHeader != nil { + f = w.h.WriteHeader(f) + } + f(code) +} + +func (w *rw) Write(b []byte) (int, error) { + f := w.w.(http.ResponseWriter).Write + if w.h.Write != nil { + f = w.h.Write(f) + } + return f(b) +} + +func (w *rw) Flush() { + f := w.w.(http.Flusher).Flush + if w.h.Flush != nil { + f = w.h.Flush(f) + } + f() +} + +func (w *rw) CloseNotify() <-chan bool { + f := w.w.(http.CloseNotifier).CloseNotify + if w.h.CloseNotify != nil { + f = w.h.CloseNotify(f) + } + return f() +} + +func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) { + f := w.w.(http.Hijacker).Hijack + if w.h.Hijack != nil { + f = w.h.Hijack(f) + } + return f() +} + +func (w *rw) ReadFrom(src io.Reader) (int64, error) { + f := w.w.(io.ReaderFrom).ReadFrom + if w.h.ReadFrom != nil { + f = w.h.ReadFrom(f) + } + return f(src) +} + +type Unwrapper interface { + Unwrap() http.ResponseWriter +} + +// Unwrap returns the underlying http.ResponseWriter from within zero or more +// layers of httpsnoop wrappers. +func Unwrap(w http.ResponseWriter) http.ResponseWriter { + if rw, ok := w.(Unwrapper); ok { + // recurse until rw.Unwrap() returns a non-Unwrapper + return Unwrap(rw.Unwrap()) + } else { + return w + } +} diff --git a/vendor/github.com/go-jose/go-jose/v3/BUG-BOUNTY.md b/vendor/github.com/go-jose/go-jose/v3/BUG-BOUNTY.md deleted file mode 100644 index 3305db0f6..000000000 --- a/vendor/github.com/go-jose/go-jose/v3/BUG-BOUNTY.md +++ /dev/null @@ -1,10 +0,0 @@ -Serious about security -====================== - -Square recognizes the important contributions the security research community -can make. We therefore encourage reporting security issues with the code -contained in this repository. - -If you believe you have discovered a security vulnerability, please follow the -guidelines at . - diff --git a/vendor/github.com/go-jose/go-jose/v3/CHANGELOG.md b/vendor/github.com/go-jose/go-jose/v3/CHANGELOG.md index 7820c2f4d..ce2a54ebf 100644 --- a/vendor/github.com/go-jose/go-jose/v3/CHANGELOG.md +++ b/vendor/github.com/go-jose/go-jose/v3/CHANGELOG.md @@ -1,6 +1,76 @@ +# v4.0.1 + +## Fixed + + - An attacker could send a JWE containing compressed data that used large + amounts of memory and CPU when decompressed by `Decrypt` or `DecryptMulti`. + Those functions now return an error if the decompressed data would exceed + 250kB or 10x the compressed size (whichever is larger). Thanks to + Enze Wang@Alioth and Jianjun Chen@Zhongguancun Lab (@zer0yu and @chenjj) + for reporting. + +# v4.0.0 + +This release makes some breaking changes in order to more thoroughly +address the vulnerabilities discussed in [Three New Attacks Against JSON Web +Tokens][1], "Sign/encrypt confusion", "Billion hash attack", and "Polyglot +token". + +## Changed + + - Limit JWT encryption types (exclude password or public key types) (#78) + - Enforce minimum length for HMAC keys (#85) + - jwt: match any audience in a list, rather than requiring all audiences (#81) + - jwt: accept only Compact Serialization (#75) + - jws: Add expected algorithms for signatures (#74) + - Require specifying expected algorithms for ParseEncrypted, + ParseSigned, ParseDetached, jwt.ParseEncrypted, jwt.ParseSigned, + jwt.ParseSignedAndEncrypted (#69, #74) + - Usually there is a small, known set of appropriate algorithms for a program + to use and it's a mistake to allow unexpected algorithms. For instance the + "billion hash attack" relies in part on programs accepting the PBES2 + encryption algorithm and doing the necessary work even if they weren't + specifically configured to allow PBES2. + - Revert "Strip padding off base64 strings" (#82) + - The specs require base64url encoding without padding. + - Minimum supported Go version is now 1.21 + +## Added + + - ParseSignedCompact, ParseSignedJSON, ParseEncryptedCompact, ParseEncryptedJSON. + - These allow parsing a specific serialization, as opposed to ParseSigned and + ParseEncrypted, which try to automatically detect which serialization was + provided. It's common to require a specific serialization for a specific + protocol - for instance JWT requires Compact serialization. + +[1]: https://i.blackhat.com/BH-US-23/Presentations/US-23-Tervoort-Three-New-Attacks-Against-JSON-Web-Tokens.pdf + +# v3.0.3 + +## Fixed + + - Limit decompression output size to prevent a DoS. Backport from v4.0.1. + +# v3.0.2 + +## Fixed + + - DecryptMulti: handle decompression error (#19) + +## Changed + + - jwe/CompactSerialize: improve performance (#67) + - Increase the default number of PBKDF2 iterations to 600k (#48) + - Return the proper algorithm for ECDSA keys (#45) + +## Added + + - Add Thumbprint support for opaque signers (#38) + # v3.0.1 -Fixed: +## Fixed + - Security issue: an attacker specifying a large "p2c" value can cause JSONWebEncryption.Decrypt and JSONWebEncryption.DecryptMulti to consume large amounts of CPU, causing a DoS. Thanks to Matt Schwager (@mschwager) for the diff --git a/vendor/github.com/go-jose/go-jose/v3/README.md b/vendor/github.com/go-jose/go-jose/v3/README.md index b90c7e5c6..282cd9e13 100644 --- a/vendor/github.com/go-jose/go-jose/v3/README.md +++ b/vendor/github.com/go-jose/go-jose/v3/README.md @@ -1,10 +1,17 @@ # Go JOSE -[![godoc](http://img.shields.io/badge/godoc-jose_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2) -[![godoc](http://img.shields.io/badge/godoc-jwt_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2/jwt) -[![license](http://img.shields.io/badge/license-apache_2.0-blue.svg?style=flat)](https://raw.githubusercontent.com/go-jose/go-jose/master/LICENSE) -[![build](https://travis-ci.org/go-jose/go-jose.svg?branch=master)](https://travis-ci.org/go-jose/go-jose) -[![coverage](https://coveralls.io/repos/github/go-jose/go-jose/badge.svg?branch=master)](https://coveralls.io/r/go-jose/go-jose) +### Versions + +[Version 4](https://github.com/go-jose/go-jose) +([branch](https://github.com/go-jose/go-jose/), +[doc](https://pkg.go.dev/github.com/go-jose/go-jose/v4), [releases](https://github.com/go-jose/go-jose/releases)) is the current stable version: + + import "github.com/go-jose/go-jose/v4" + +The old [square/go-jose](https://github.com/square/go-jose) repo contains the prior v1 and v2 versions, which +are deprecated. + +### Summary Package jose aims to provide an implementation of the Javascript Object Signing and Encryption set of standards. This includes support for JSON Web Encryption, @@ -21,13 +28,13 @@ US maintained blocked list. ## Overview The implementation follows the -[JSON Web Encryption](http://dx.doi.org/10.17487/RFC7516) (RFC 7516), -[JSON Web Signature](http://dx.doi.org/10.17487/RFC7515) (RFC 7515), and -[JSON Web Token](http://dx.doi.org/10.17487/RFC7519) (RFC 7519) specifications. +[JSON Web Encryption](https://dx.doi.org/10.17487/RFC7516) (RFC 7516), +[JSON Web Signature](https://dx.doi.org/10.17487/RFC7515) (RFC 7515), and +[JSON Web Token](https://dx.doi.org/10.17487/RFC7519) (RFC 7519) specifications. Tables of supported algorithms are shown below. The library supports both the compact and JWS/JWE JSON Serialization formats, and has optional support for multiple recipients. It also comes with a small command-line utility -([`jose-util`](https://github.com/go-jose/go-jose/tree/master/jose-util)) +([`jose-util`](https://pkg.go.dev/github.com/go-jose/go-jose/jose-util)) for dealing with JOSE messages in a shell. **Note**: We use a forked version of the `encoding/json` package from the Go @@ -36,31 +43,10 @@ of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/curren This is to avoid differences in interpretation of messages between go-jose and libraries in other languages. -### Versions - -[Version 2](https://gopkg.in/go-jose/go-jose.v2) -([branch](https://github.com/go-jose/go-jose/tree/v2), -[doc](https://godoc.org/gopkg.in/go-jose/go-jose.v2)) is the current stable version: - - import "gopkg.in/go-jose/go-jose.v2" - -[Version 3](https://github.com/go-jose/go-jose) -([branch](https://github.com/go-jose/go-jose/tree/master), -[doc](https://godoc.org/github.com/go-jose/go-jose)) is the under development/unstable version (not released yet): - - import "github.com/go-jose/go-jose/v3" - -All new feature development takes place on the `master` branch, which we are -preparing to release as version 3 soon. Version 2 will continue to receive -critical bug and security fixes. Note that starting with version 3 we are -using Go modules for versioning instead of `gopkg.in` as before. Version 3 also will require Go version 1.13 or higher. - -Version 1 (on the `v1` branch) is frozen and not supported anymore. - ### Supported algorithms See below for a table of supported algorithms. Algorithm identifiers match -the names in the [JSON Web Algorithms](http://dx.doi.org/10.17487/RFC7518) +the names in the [JSON Web Algorithms](https://dx.doi.org/10.17487/RFC7518) standard where possible. The Godoc reference has a list of constants. Key encryption | Algorithm identifier(s) @@ -103,20 +89,20 @@ allows attaching a key id. Algorithm(s) | Corresponding types :------------------------- | ------------------------------- - RSA | *[rsa.PublicKey](http://golang.org/pkg/crypto/rsa/#PublicKey), *[rsa.PrivateKey](http://golang.org/pkg/crypto/rsa/#PrivateKey) - ECDH, ECDSA | *[ecdsa.PublicKey](http://golang.org/pkg/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](http://golang.org/pkg/crypto/ecdsa/#PrivateKey) - EdDSA1 | [ed25519.PublicKey](https://godoc.org/pkg/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://godoc.org/pkg/crypto/ed25519#PrivateKey) + RSA | *[rsa.PublicKey](https://pkg.go.dev/crypto/rsa/#PublicKey), *[rsa.PrivateKey](https://pkg.go.dev/crypto/rsa/#PrivateKey) + ECDH, ECDSA | *[ecdsa.PublicKey](https://pkg.go.dev/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](https://pkg.go.dev/crypto/ecdsa/#PrivateKey) + EdDSA1 | [ed25519.PublicKey](https://pkg.go.dev/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://pkg.go.dev/crypto/ed25519#PrivateKey) AES, HMAC | []byte 1. Only available in version 2 or later of the package ## Examples -[![godoc](http://img.shields.io/badge/godoc-jose_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2) -[![godoc](http://img.shields.io/badge/godoc-jwt_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2/jwt) +[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v3.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v3) +[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v3/jwt.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v3/jwt) Examples can be found in the Godoc reference for this package. The -[`jose-util`](https://github.com/go-jose/go-jose/tree/master/jose-util) +[`jose-util`](https://github.com/go-jose/go-jose/tree/v3/jose-util) subdirectory also contains a small command-line utility which might be useful as an example as well. diff --git a/vendor/github.com/go-jose/go-jose/v3/SECURITY.md b/vendor/github.com/go-jose/go-jose/v3/SECURITY.md new file mode 100644 index 000000000..2f18a75a8 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/SECURITY.md @@ -0,0 +1,13 @@ +# Security Policy +This document explains how to contact the Let's Encrypt security team to report security vulnerabilities. + +## Supported Versions +| Version | Supported | +| ------- | ----------| +| >= v3 | ✓ | +| v2 | ✗ | +| v1 | ✗ | + +## Reporting a vulnerability + +Please see [https://letsencrypt.org/contact/#security](https://letsencrypt.org/contact/#security) for the email address to report a vulnerability. Ensure that the subject line for your report contains the word `vulnerability` and is descriptive. Your email should be acknowledged within 24 hours. If you do not receive a response within 24 hours, please follow-up again with another email. diff --git a/vendor/github.com/go-jose/go-jose/v3/asymmetric.go b/vendor/github.com/go-jose/go-jose/v3/asymmetric.go index 78abc3268..d4d4961b2 100644 --- a/vendor/github.com/go-jose/go-jose/v3/asymmetric.go +++ b/vendor/github.com/go-jose/go-jose/v3/asymmetric.go @@ -285,6 +285,9 @@ func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm switch alg { case RS256, RS384, RS512: + // TODO(https://github.com/go-jose/go-jose/issues/40): As of go1.20, the + // random parameter is legacy and ignored, and it can be nil. + // https://cs.opensource.google/go/go/+/refs/tags/go1.20:src/crypto/rsa/pkcs1v15.go;l=263;bpv=0;bpt=1 out, err = rsa.SignPKCS1v15(RandReader, ctx.privateKey, hash, hashed) case PS256, PS384, PS512: out, err = rsa.SignPSS(RandReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{ diff --git a/vendor/github.com/go-jose/go-jose/v3/crypter.go b/vendor/github.com/go-jose/go-jose/v3/crypter.go index 6901137e4..8870e8905 100644 --- a/vendor/github.com/go-jose/go-jose/v3/crypter.go +++ b/vendor/github.com/go-jose/go-jose/v3/crypter.go @@ -21,7 +21,6 @@ import ( "crypto/rsa" "errors" "fmt" - "reflect" "github.com/go-jose/go-jose/v3/json" ) @@ -76,14 +75,24 @@ type recipientKeyInfo struct { type EncrypterOptions struct { Compression CompressionAlgorithm - // Optional map of additional keys to be inserted into the protected header - // of a JWS object. Some specifications which make use of JWS like to insert - // additional values here. All values must be JSON-serializable. + // Optional map of name/value pairs to be inserted into the protected + // header of a JWS object. Some specifications which make use of + // JWS require additional values here. + // + // Values will be serialized by [json.Marshal] and must be valid inputs to + // that function. + // + // [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal ExtraHeaders map[HeaderKey]interface{} } // WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it -// if necessary. It returns itself and so can be used in a fluent style. +// if necessary, and returns the updated EncrypterOptions. +// +// The v parameter will be serialized by [json.Marshal] and must be a valid +// input to that function. +// +// [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal func (eo *EncrypterOptions) WithHeader(k HeaderKey, v interface{}) *EncrypterOptions { if eo.ExtraHeaders == nil { eo.ExtraHeaders = map[HeaderKey]interface{}{} @@ -111,7 +120,17 @@ func (eo *EncrypterOptions) WithType(typ ContentType) *EncrypterOptions { // default of 100000 will be used for the count and a 128-bit random salt will // be generated. type Recipient struct { - Algorithm KeyAlgorithm + Algorithm KeyAlgorithm + // Key must have one of these types: + // - ed25519.PublicKey + // - *ecdsa.PublicKey + // - *rsa.PublicKey + // - *JSONWebKey + // - JSONWebKey + // - []byte (a symmetric key) + // - Any type that satisfies the OpaqueKeyEncrypter interface + // + // The type of Key must match the value of Algorithm. Key interface{} KeyID string PBES2Count int @@ -150,16 +169,17 @@ func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions) switch rcpt.Algorithm { case DIRECT: // Direct encryption mode must be treated differently - if reflect.TypeOf(rawKey) != reflect.TypeOf([]byte{}) { + keyBytes, ok := rawKey.([]byte) + if !ok { return nil, ErrUnsupportedKeyType } - if encrypter.cipher.keySize() != len(rawKey.([]byte)) { + if encrypter.cipher.keySize() != len(keyBytes) { return nil, ErrInvalidKeySize } encrypter.keyGenerator = staticKeyGenerator{ - key: rawKey.([]byte), + key: keyBytes, } - recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, rawKey.([]byte)) + recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, keyBytes) recipientInfo.keyID = keyID if rcpt.KeyID != "" { recipientInfo.keyID = rcpt.KeyID @@ -168,16 +188,16 @@ func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions) return encrypter, nil case ECDH_ES: // ECDH-ES (w/o key wrapping) is similar to DIRECT mode - typeOf := reflect.TypeOf(rawKey) - if typeOf != reflect.TypeOf(&ecdsa.PublicKey{}) { + keyDSA, ok := rawKey.(*ecdsa.PublicKey) + if !ok { return nil, ErrUnsupportedKeyType } encrypter.keyGenerator = ecKeyGenerator{ size: encrypter.cipher.keySize(), algID: string(enc), - publicKey: rawKey.(*ecdsa.PublicKey), + publicKey: keyDSA, } - recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, rawKey.(*ecdsa.PublicKey)) + recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, keyDSA) recipientInfo.keyID = keyID if rcpt.KeyID != "" { recipientInfo.keyID = rcpt.KeyID @@ -270,9 +290,8 @@ func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKey recipient, err := makeJWERecipient(alg, encryptionKey.Key) recipient.keyID = encryptionKey.KeyID return recipient, err - } - if encrypter, ok := encryptionKey.(OpaqueKeyEncrypter); ok { - return newOpaqueKeyEncrypter(alg, encrypter) + case OpaqueKeyEncrypter: + return newOpaqueKeyEncrypter(alg, encryptionKey) } return recipientKeyInfo{}, ErrUnsupportedKeyType } @@ -300,11 +319,11 @@ func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) { return newDecrypter(decryptionKey.Key) case *JSONWebKey: return newDecrypter(decryptionKey.Key) + case OpaqueKeyDecrypter: + return &opaqueKeyDecrypter{decrypter: decryptionKey}, nil + default: + return nil, ErrUnsupportedKeyType } - if okd, ok := decryptionKey.(OpaqueKeyDecrypter); ok { - return &opaqueKeyDecrypter{decrypter: okd}, nil - } - return nil, ErrUnsupportedKeyType } // Implementation of encrypt method producing a JWE object. @@ -403,9 +422,27 @@ func (ctx *genericEncrypter) Options() EncrypterOptions { } } -// Decrypt and validate the object and return the plaintext. Note that this -// function does not support multi-recipient, if you desire multi-recipient +// Decrypt and validate the object and return the plaintext. This +// function does not support multi-recipient. If you desire multi-recipient // decryption use DecryptMulti instead. +// +// The decryptionKey argument must contain a private or symmetric key +// and must have one of these types: +// - *ecdsa.PrivateKey +// - *rsa.PrivateKey +// - *JSONWebKey +// - JSONWebKey +// - *JSONWebKeySet +// - JSONWebKeySet +// - []byte (a symmetric key) +// - string (a symmetric key) +// - Any type that satisfies the OpaqueKeyDecrypter interface. +// +// Note that ed25519 is only available for signatures, not encryption, so is +// not an option here. +// +// Automatically decompresses plaintext, but returns an error if the decompressed +// data would be >250kB or >10x the size of the compressed data, whichever is larger. func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) { headers := obj.mergedHeaders(nil) @@ -462,15 +499,24 @@ func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) // The "zip" header parameter may only be present in the protected header. if comp := obj.protected.getCompression(); comp != "" { plaintext, err = decompress(comp, plaintext) + if err != nil { + return nil, fmt.Errorf("go-jose/go-jose: failed to decompress plaintext: %v", err) + } } - return plaintext, err + return plaintext, nil } // DecryptMulti decrypts and validates the object and returns the plaintexts, // with support for multiple recipients. It returns the index of the recipient // for which the decryption was successful, the merged headers for that recipient, // and the plaintext. +// +// The decryptionKey argument must have one of the types allowed for the +// decryptionKey argument of Decrypt(). +// +// Automatically decompresses plaintext, but returns an error if the decompressed +// data would be >250kB or >3x the size of the compressed data, whichever is larger. func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Header, []byte, error) { globalHeaders := obj.mergedHeaders(nil) @@ -532,7 +578,10 @@ func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Heade // The "zip" header parameter may only be present in the protected header. if comp := obj.protected.getCompression(); comp != "" { - plaintext, _ = decompress(comp, plaintext) + plaintext, err = decompress(comp, plaintext) + if err != nil { + return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: failed to decompress plaintext: %v", err) + } } sanitized, err := headers.sanitized() diff --git a/vendor/github.com/go-jose/go-jose/v3/doc.go b/vendor/github.com/go-jose/go-jose/v3/doc.go index 71ec1c419..0ad40ca08 100644 --- a/vendor/github.com/go-jose/go-jose/v3/doc.go +++ b/vendor/github.com/go-jose/go-jose/v3/doc.go @@ -15,13 +15,11 @@ */ /* - Package jose aims to provide an implementation of the Javascript Object Signing and Encryption set of standards. It implements encryption and signing based on the JSON Web Encryption and JSON Web Signature standards, with optional JSON Web Token support available in a sub-package. The library supports both the compact and JWS/JWE JSON Serialization formats, and has optional support for multiple recipients. - */ package jose diff --git a/vendor/github.com/go-jose/go-jose/v3/encoding.go b/vendor/github.com/go-jose/go-jose/v3/encoding.go index 968a42496..9f07cfdcb 100644 --- a/vendor/github.com/go-jose/go-jose/v3/encoding.go +++ b/vendor/github.com/go-jose/go-jose/v3/encoding.go @@ -21,6 +21,7 @@ import ( "compress/flate" "encoding/base64" "encoding/binary" + "fmt" "io" "math/big" "strings" @@ -85,7 +86,7 @@ func decompress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { } } -// Compress with DEFLATE +// deflate compresses the input. func deflate(input []byte) ([]byte, error) { output := new(bytes.Buffer) @@ -97,15 +98,27 @@ func deflate(input []byte) ([]byte, error) { return output.Bytes(), err } -// Decompress with DEFLATE +// inflate decompresses the input. +// +// Errors if the decompressed data would be >250kB or >10x the size of the +// compressed data, whichever is larger. func inflate(input []byte) ([]byte, error) { output := new(bytes.Buffer) reader := flate.NewReader(bytes.NewBuffer(input)) - _, err := io.Copy(output, reader) - if err != nil { + maxCompressedSize := 10 * int64(len(input)) + if maxCompressedSize < 250000 { + maxCompressedSize = 250000 + } + + limit := maxCompressedSize + 1 + n, err := io.CopyN(output, reader, limit) + if err != nil && err != io.EOF { return nil, err } + if n == limit { + return nil, fmt.Errorf("uncompressed data would be too large (>%d bytes)", maxCompressedSize) + } err = reader.Close() return output.Bytes(), err @@ -189,3 +202,36 @@ func base64URLDecode(value string) ([]byte, error) { value = strings.TrimRight(value, "=") return base64.RawURLEncoding.DecodeString(value) } + +func base64EncodeLen(sl []byte) int { + return base64.RawURLEncoding.EncodedLen(len(sl)) +} + +func base64JoinWithDots(inputs ...[]byte) string { + if len(inputs) == 0 { + return "" + } + + // Count of dots. + totalCount := len(inputs) - 1 + + for _, input := range inputs { + totalCount += base64EncodeLen(input) + } + + out := make([]byte, totalCount) + startEncode := 0 + for i, input := range inputs { + base64.RawURLEncoding.Encode(out[startEncode:], input) + + if i == len(inputs)-1 { + continue + } + + startEncode += base64EncodeLen(input) + out[startEncode] = '.' + startEncode++ + } + + return string(out) +} diff --git a/vendor/github.com/go-jose/go-jose/v3/json/decode.go b/vendor/github.com/go-jose/go-jose/v3/json/decode.go index 4dbc4146c..50634dd84 100644 --- a/vendor/github.com/go-jose/go-jose/v3/json/decode.go +++ b/vendor/github.com/go-jose/go-jose/v3/json/decode.go @@ -75,14 +75,13 @@ import ( // // The JSON null value unmarshals into an interface, map, pointer, or slice // by setting that Go value to nil. Because null is often used in JSON to mean -// ``not present,'' unmarshaling a JSON null into any other Go type has no effect +// “not present,” unmarshaling a JSON null into any other Go type has no effect // on the value and produces no error. // // When unmarshaling quoted strings, invalid UTF-8 or // invalid UTF-16 surrogate pairs are not treated as an error. // Instead, they are replaced by the Unicode replacement // character U+FFFD. -// func Unmarshal(data []byte, v interface{}) error { // Check for well-formedness. // Avoids filling out half a data structure diff --git a/vendor/github.com/go-jose/go-jose/v3/json/encode.go b/vendor/github.com/go-jose/go-jose/v3/json/encode.go index ea0a13619..98de68ce1 100644 --- a/vendor/github.com/go-jose/go-jose/v3/json/encode.go +++ b/vendor/github.com/go-jose/go-jose/v3/json/encode.go @@ -58,6 +58,7 @@ import ( // becomes a member of the object unless // - the field's tag is "-", or // - the field is empty and its tag specifies the "omitempty" option. +// // The empty values are false, 0, any // nil pointer or interface value, and any array, slice, map, or string of // length zero. The object's default key string is the struct field name @@ -65,28 +66,28 @@ import ( // the struct field's tag value is the key name, followed by an optional comma // and options. Examples: // -// // Field is ignored by this package. -// Field int `json:"-"` +// // Field is ignored by this package. +// Field int `json:"-"` // -// // Field appears in JSON as key "myName". -// Field int `json:"myName"` +// // Field appears in JSON as key "myName". +// Field int `json:"myName"` // -// // Field appears in JSON as key "myName" and -// // the field is omitted from the object if its value is empty, -// // as defined above. -// Field int `json:"myName,omitempty"` +// // Field appears in JSON as key "myName" and +// // the field is omitted from the object if its value is empty, +// // as defined above. +// Field int `json:"myName,omitempty"` // -// // Field appears in JSON as key "Field" (the default), but -// // the field is skipped if empty. -// // Note the leading comma. -// Field int `json:",omitempty"` +// // Field appears in JSON as key "Field" (the default), but +// // the field is skipped if empty. +// // Note the leading comma. +// Field int `json:",omitempty"` // // The "string" option signals that a field is stored as JSON inside a // JSON-encoded string. It applies only to fields of string, floating point, // integer, or boolean types. This extra level of encoding is sometimes used // when communicating with JavaScript programs: // -// Int64String int64 `json:",string"` +// Int64String int64 `json:",string"` // // The key name will be used if it's a non-empty string consisting of // only Unicode letters, digits, dollar signs, percent signs, hyphens, @@ -133,7 +134,6 @@ import ( // JSON cannot represent cyclic data structures and Marshal does not // handle them. Passing cyclic structures to Marshal will result in // an infinite recursion. -// func Marshal(v interface{}) ([]byte, error) { e := &encodeState{} err := e.marshal(v) diff --git a/vendor/github.com/go-jose/go-jose/v3/json/stream.go b/vendor/github.com/go-jose/go-jose/v3/json/stream.go index 9b2b926b0..f03b171e6 100644 --- a/vendor/github.com/go-jose/go-jose/v3/json/stream.go +++ b/vendor/github.com/go-jose/go-jose/v3/json/stream.go @@ -240,7 +240,6 @@ var _ Unmarshaler = (*RawMessage)(nil) // Number, for JSON numbers // string, for JSON string literals // nil, for JSON null -// type Token interface{} const ( diff --git a/vendor/github.com/go-jose/go-jose/v3/jwe.go b/vendor/github.com/go-jose/go-jose/v3/jwe.go index bce304504..4267ac750 100644 --- a/vendor/github.com/go-jose/go-jose/v3/jwe.go +++ b/vendor/github.com/go-jose/go-jose/v3/jwe.go @@ -252,13 +252,13 @@ func (obj JSONWebEncryption) CompactSerialize() (string, error) { serializedProtected := mustSerializeJSON(obj.protected) - return fmt.Sprintf( - "%s.%s.%s.%s.%s", - base64.RawURLEncoding.EncodeToString(serializedProtected), - base64.RawURLEncoding.EncodeToString(obj.recipients[0].encryptedKey), - base64.RawURLEncoding.EncodeToString(obj.iv), - base64.RawURLEncoding.EncodeToString(obj.ciphertext), - base64.RawURLEncoding.EncodeToString(obj.tag)), nil + return base64JoinWithDots( + serializedProtected, + obj.recipients[0].encryptedKey, + obj.iv, + obj.ciphertext, + obj.tag, + ), nil } // FullSerialize serializes an object using the full JSON serialization format. diff --git a/vendor/github.com/go-jose/go-jose/v3/jwk.go b/vendor/github.com/go-jose/go-jose/v3/jwk.go index 78ff5aca5..e4021959a 100644 --- a/vendor/github.com/go-jose/go-jose/v3/jwk.go +++ b/vendor/github.com/go-jose/go-jose/v3/jwk.go @@ -67,9 +67,21 @@ type rawJSONWebKey struct { X5tSHA256 string `json:"x5t#S256,omitempty"` } -// JSONWebKey represents a public or private key in JWK format. +// JSONWebKey represents a public or private key in JWK format. It can be +// marshaled into JSON and unmarshaled from JSON. type JSONWebKey struct { - // Cryptographic key, can be a symmetric or asymmetric key. + // Key is the Go in-memory representation of this key. It must have one + // of these types: + // - ed25519.PublicKey + // - ed25519.PrivateKey + // - *ecdsa.PublicKey + // - *ecdsa.PrivateKey + // - *rsa.PublicKey + // - *rsa.PrivateKey + // - []byte (a symmetric key) + // + // When marshaling this JSONWebKey into JSON, the "kty" header parameter + // will be automatically set based on the type of this field. Key interface{} // Key identifier, parsed from `kid` header. KeyID string @@ -389,6 +401,8 @@ func (k *JSONWebKey) Thumbprint(hash crypto.Hash) ([]byte, error) { input, err = rsaThumbprintInput(key.N, key.E) case ed25519.PrivateKey: input, err = edThumbprintInput(ed25519.PublicKey(key[32:])) + case OpaqueSigner: + return key.Public().Thumbprint(hash) default: return nil, fmt.Errorf("go-jose/go-jose: unknown key type '%s'", reflect.TypeOf(key)) } diff --git a/vendor/github.com/go-jose/go-jose/v3/jws.go b/vendor/github.com/go-jose/go-jose/v3/jws.go index 865f16ad3..e37007dbb 100644 --- a/vendor/github.com/go-jose/go-jose/v3/jws.go +++ b/vendor/github.com/go-jose/go-jose/v3/jws.go @@ -314,15 +314,18 @@ func (obj JSONWebSignature) compactSerialize(detached bool) (string, error) { return "", ErrNotSupported } - serializedProtected := base64.RawURLEncoding.EncodeToString(mustSerializeJSON(obj.Signatures[0].protected)) - payload := "" - signature := base64.RawURLEncoding.EncodeToString(obj.Signatures[0].Signature) + serializedProtected := mustSerializeJSON(obj.Signatures[0].protected) + var payload []byte if !detached { - payload = base64.RawURLEncoding.EncodeToString(obj.payload) + payload = obj.payload } - return fmt.Sprintf("%s.%s.%s", serializedProtected, payload, signature), nil + return base64JoinWithDots( + serializedProtected, + payload, + obj.Signatures[0].Signature, + ), nil } // CompactSerialize serializes an object using the compact serialization format. diff --git a/vendor/github.com/go-jose/go-jose/v3/opaque.go b/vendor/github.com/go-jose/go-jose/v3/opaque.go index fc3e8d2ef..68db085ef 100644 --- a/vendor/github.com/go-jose/go-jose/v3/opaque.go +++ b/vendor/github.com/go-jose/go-jose/v3/opaque.go @@ -121,7 +121,7 @@ func (oke *opaqueKeyEncrypter) encryptKey(cek []byte, alg KeyAlgorithm) (recipie return oke.encrypter.encryptKey(cek, alg) } -//OpaqueKeyDecrypter is an interface that supports decrypting keys with an opaque key. +// OpaqueKeyDecrypter is an interface that supports decrypting keys with an opaque key. type OpaqueKeyDecrypter interface { DecryptKey(encryptedKey []byte, header Header) ([]byte, error) } diff --git a/vendor/github.com/go-jose/go-jose/v3/shared.go b/vendor/github.com/go-jose/go-jose/v3/shared.go index fc2505e0e..489a04e32 100644 --- a/vendor/github.com/go-jose/go-jose/v3/shared.go +++ b/vendor/github.com/go-jose/go-jose/v3/shared.go @@ -183,8 +183,13 @@ type Header struct { // Unverified certificate chain parsed from x5c header. certificates []*x509.Certificate - // Any headers not recognised above get unmarshalled - // from JSON in a generic manner and placed in this map. + // At parse time, each header parameter with a name other than "kid", + // "jwk", "alg", "nonce", or "x5c" will have its value passed to + // [json.Unmarshal] to unmarshal it into an interface value. + // The resulting value will be stored in this map, with the header + // parameter name as the key. + // + // [json.Unmarshal]: https://pkg.go.dev/encoding/json#Unmarshal ExtraHeaders map[HeaderKey]interface{} } diff --git a/vendor/github.com/go-jose/go-jose/v3/signing.go b/vendor/github.com/go-jose/go-jose/v3/signing.go index 81d55f587..52f3d8560 100644 --- a/vendor/github.com/go-jose/go-jose/v3/signing.go +++ b/vendor/github.com/go-jose/go-jose/v3/signing.go @@ -40,6 +40,15 @@ type Signer interface { } // SigningKey represents an algorithm/key used to sign a message. +// +// Key must have one of these types: +// - ed25519.PrivateKey +// - *ecdsa.PrivateKey +// - *rsa.PrivateKey +// - *JSONWebKey +// - JSONWebKey +// - []byte (an HMAC key) +// - Any type that satisfies the OpaqueSigner interface type SigningKey struct { Algorithm SignatureAlgorithm Key interface{} @@ -52,12 +61,22 @@ type SignerOptions struct { // Optional map of additional keys to be inserted into the protected header // of a JWS object. Some specifications which make use of JWS like to insert - // additional values here. All values must be JSON-serializable. + // additional values here. + // + // Values will be serialized by [json.Marshal] and must be valid inputs to + // that function. + // + // [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal ExtraHeaders map[HeaderKey]interface{} } // WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it -// if necessary. It returns itself and so can be used in a fluent style. +// if necessary, and returns the updated SignerOptions. +// +// The v argument will be serialized by [json.Marshal] and must be a valid +// input to that function. +// +// [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal func (so *SignerOptions) WithHeader(k HeaderKey, v interface{}) *SignerOptions { if so.ExtraHeaders == nil { so.ExtraHeaders = map[HeaderKey]interface{}{} @@ -173,11 +192,11 @@ func newVerifier(verificationKey interface{}) (payloadVerifier, error) { return newVerifier(verificationKey.Key) case *JSONWebKey: return newVerifier(verificationKey.Key) + case OpaqueVerifier: + return &opaqueVerifier{verifier: verificationKey}, nil + default: + return nil, ErrUnsupportedKeyType } - if ov, ok := verificationKey.(OpaqueVerifier); ok { - return &opaqueVerifier{verifier: ov}, nil - } - return nil, ErrUnsupportedKeyType } func (ctx *genericSigner) addRecipient(alg SignatureAlgorithm, signingKey interface{}) error { @@ -204,11 +223,11 @@ func makeJWSRecipient(alg SignatureAlgorithm, signingKey interface{}) (recipient return newJWKSigner(alg, signingKey) case *JSONWebKey: return newJWKSigner(alg, *signingKey) + case OpaqueSigner: + return newOpaqueSigner(alg, signingKey) + default: + return recipientSigInfo{}, ErrUnsupportedKeyType } - if signer, ok := signingKey.(OpaqueSigner); ok { - return newOpaqueSigner(alg, signer) - } - return recipientSigInfo{}, ErrUnsupportedKeyType } func newJWKSigner(alg SignatureAlgorithm, signingKey JSONWebKey) (recipientSigInfo, error) { @@ -321,12 +340,21 @@ func (ctx *genericSigner) Options() SignerOptions { } // Verify validates the signature on the object and returns the payload. -// This function does not support multi-signature, if you desire multi-sig +// This function does not support multi-signature. If you desire multi-signature // verification use VerifyMulti instead. // // Be careful when verifying signatures based on embedded JWKs inside the // payload header. You cannot assume that the key received in a payload is // trusted. +// +// The verificationKey argument must have one of these types: +// - ed25519.PublicKey +// - *ecdsa.PublicKey +// - *rsa.PublicKey +// - *JSONWebKey +// - JSONWebKey +// - []byte (an HMAC key) +// - Any type that implements the OpaqueVerifier interface. func (obj JSONWebSignature) Verify(verificationKey interface{}) ([]byte, error) { err := obj.DetachedVerify(obj.payload, verificationKey) if err != nil { @@ -346,6 +374,9 @@ func (obj JSONWebSignature) UnsafePayloadWithoutVerification() []byte { // most cases, you will probably want to use Verify instead. DetachedVerify // is only useful if you have a payload and signature that are separated from // each other. +// +// The verificationKey argument must have one of the types allowed for the +// verificationKey argument of JSONWebSignature.Verify(). func (obj JSONWebSignature) DetachedVerify(payload []byte, verificationKey interface{}) error { key := tryJWKS(verificationKey, obj.headers()...) verifier, err := newVerifier(key) @@ -388,6 +419,9 @@ func (obj JSONWebSignature) DetachedVerify(payload []byte, verificationKey inter // returns the index of the signature that was verified, along with the signature // object and the payload. We return the signature and index to guarantee that // callers are getting the verified value. +// +// The verificationKey argument must have one of the types allowed for the +// verificationKey argument of JSONWebSignature.Verify(). func (obj JSONWebSignature) VerifyMulti(verificationKey interface{}) (int, Signature, []byte, error) { idx, sig, err := obj.DetachedVerifyMulti(obj.payload, verificationKey) if err != nil { @@ -405,6 +439,9 @@ func (obj JSONWebSignature) VerifyMulti(verificationKey interface{}) (int, Signa // DetachedVerifyMulti is only useful if you have a payload and signature that are // separated from each other, and the signature can have multiple signers at the // same time. +// +// The verificationKey argument must have one of the types allowed for the +// verificationKey argument of JSONWebSignature.Verify(). func (obj JSONWebSignature) DetachedVerifyMulti(payload []byte, verificationKey interface{}) (int, Signature, error) { key := tryJWKS(verificationKey, obj.headers()...) verifier, err := newVerifier(key) diff --git a/vendor/github.com/go-jose/go-jose/v3/symmetric.go b/vendor/github.com/go-jose/go-jose/v3/symmetric.go index 1ffd2708b..10d8e19fd 100644 --- a/vendor/github.com/go-jose/go-jose/v3/symmetric.go +++ b/vendor/github.com/go-jose/go-jose/v3/symmetric.go @@ -40,12 +40,17 @@ var RandReader = rand.Reader const ( // RFC7518 recommends a minimum of 1,000 iterations: - // https://tools.ietf.org/html/rfc7518#section-4.8.1.2 + // - https://tools.ietf.org/html/rfc7518#section-4.8.1.2 + // // NIST recommends a minimum of 10,000: - // https://pages.nist.gov/800-63-3/sp800-63b.html - // 1Password uses 100,000: - // https://support.1password.com/pbkdf2/ - defaultP2C = 100000 + // - https://pages.nist.gov/800-63-3/sp800-63b.html + // + // 1Password increased in 2023 from 100,000 to 650,000: + // - https://support.1password.com/pbkdf2/ + // + // OWASP recommended 600,000 in Dec 2022: + // - https://cheatsheetseries.owasp.org/cheatsheets/Password_Storage_Cheat_Sheet.html#pbkdf2 + defaultP2C = 600000 // Default salt size: 128 bits defaultP2SSize = 16 ) diff --git a/vendor/github.com/go-logr/stdr/LICENSE b/vendor/github.com/go-logr/stdr/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/go-logr/stdr/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-logr/stdr/README.md b/vendor/github.com/go-logr/stdr/README.md new file mode 100644 index 000000000..515866789 --- /dev/null +++ b/vendor/github.com/go-logr/stdr/README.md @@ -0,0 +1,6 @@ +# Minimal Go logging using logr and Go's standard library + +[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/stdr.svg)](https://pkg.go.dev/github.com/go-logr/stdr) + +This package implements the [logr interface](https://github.com/go-logr/logr) +in terms of Go's standard log package(https://pkg.go.dev/log). diff --git a/vendor/github.com/go-logr/stdr/stdr.go b/vendor/github.com/go-logr/stdr/stdr.go new file mode 100644 index 000000000..93a8aab51 --- /dev/null +++ b/vendor/github.com/go-logr/stdr/stdr.go @@ -0,0 +1,170 @@ +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package stdr implements github.com/go-logr/logr.Logger in terms of +// Go's standard log package. +package stdr + +import ( + "log" + "os" + + "github.com/go-logr/logr" + "github.com/go-logr/logr/funcr" +) + +// The global verbosity level. See SetVerbosity(). +var globalVerbosity int + +// SetVerbosity sets the global level against which all info logs will be +// compared. If this is greater than or equal to the "V" of the logger, the +// message will be logged. A higher value here means more logs will be written. +// The previous verbosity value is returned. This is not concurrent-safe - +// callers must be sure to call it from only one goroutine. +func SetVerbosity(v int) int { + old := globalVerbosity + globalVerbosity = v + return old +} + +// New returns a logr.Logger which is implemented by Go's standard log package, +// or something like it. If std is nil, this will use a default logger +// instead. +// +// Example: stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) +func New(std StdLogger) logr.Logger { + return NewWithOptions(std, Options{}) +} + +// NewWithOptions returns a logr.Logger which is implemented by Go's standard +// log package, or something like it. See New for details. +func NewWithOptions(std StdLogger, opts Options) logr.Logger { + if std == nil { + // Go's log.Default() is only available in 1.16 and higher. + std = log.New(os.Stderr, "", log.LstdFlags) + } + + if opts.Depth < 0 { + opts.Depth = 0 + } + + fopts := funcr.Options{ + LogCaller: funcr.MessageClass(opts.LogCaller), + } + + sl := &logger{ + Formatter: funcr.NewFormatter(fopts), + std: std, + } + + // For skipping our own logger.Info/Error. + sl.Formatter.AddCallDepth(1 + opts.Depth) + + return logr.New(sl) +} + +// Options carries parameters which influence the way logs are generated. +type Options struct { + // Depth biases the assumed number of call frames to the "true" caller. + // This is useful when the calling code calls a function which then calls + // stdr (e.g. a logging shim to another API). Values less than zero will + // be treated as zero. + Depth int + + // LogCaller tells stdr to add a "caller" key to some or all log lines. + // Go's log package has options to log this natively, too. + LogCaller MessageClass + + // TODO: add an option to log the date/time +} + +// MessageClass indicates which category or categories of messages to consider. +type MessageClass int + +const ( + // None ignores all message classes. + None MessageClass = iota + // All considers all message classes. + All + // Info only considers info messages. + Info + // Error only considers error messages. + Error +) + +// StdLogger is the subset of the Go stdlib log.Logger API that is needed for +// this adapter. +type StdLogger interface { + // Output is the same as log.Output and log.Logger.Output. + Output(calldepth int, logline string) error +} + +type logger struct { + funcr.Formatter + std StdLogger +} + +var _ logr.LogSink = &logger{} +var _ logr.CallDepthLogSink = &logger{} + +func (l logger) Enabled(level int) bool { + return globalVerbosity >= level +} + +func (l logger) Info(level int, msg string, kvList ...interface{}) { + prefix, args := l.FormatInfo(level, msg, kvList) + if prefix != "" { + args = prefix + ": " + args + } + _ = l.std.Output(l.Formatter.GetDepth()+1, args) +} + +func (l logger) Error(err error, msg string, kvList ...interface{}) { + prefix, args := l.FormatError(err, msg, kvList) + if prefix != "" { + args = prefix + ": " + args + } + _ = l.std.Output(l.Formatter.GetDepth()+1, args) +} + +func (l logger) WithName(name string) logr.LogSink { + l.Formatter.AddName(name) + return &l +} + +func (l logger) WithValues(kvList ...interface{}) logr.LogSink { + l.Formatter.AddValues(kvList) + return &l +} + +func (l logger) WithCallDepth(depth int) logr.LogSink { + l.Formatter.AddCallDepth(depth) + return &l +} + +// Underlier exposes access to the underlying logging implementation. Since +// callers only have a logr.Logger, they have to know which implementation is +// in use, so this interface is less of an abstraction and more of way to test +// type conversion. +type Underlier interface { + GetUnderlying() StdLogger +} + +// GetUnderlying returns the StdLogger underneath this logger. Since StdLogger +// is itself an interface, the result may or may not be a Go log.Logger. +func (l logger) GetUnderlying() StdLogger { + return l.std +} diff --git a/vendor/github.com/go-openapi/errors/.golangci.yml b/vendor/github.com/go-openapi/errors/.golangci.yml index 4e1fc0c7d..cf88ead32 100644 --- a/vendor/github.com/go-openapi/errors/.golangci.yml +++ b/vendor/github.com/go-openapi/errors/.golangci.yml @@ -4,45 +4,59 @@ linters-settings: golint: min-confidence: 0 gocyclo: - min-complexity: 30 + min-complexity: 45 maligned: suggest-new: true dupl: - threshold: 100 + threshold: 200 goconst: min-len: 2 - min-occurrences: 4 + min-occurrences: 3 + linters: enable-all: true disable: + - errname # this repo doesn't follow the convention advised by this linter - maligned + - unparam - lll + - gochecknoinits - gochecknoglobals + - funlen - godox - gocognit - whitespace - wsl - - funlen - - gochecknoglobals - - gochecknoinits - - scopelint - wrapcheck - - exhaustivestruct - - exhaustive - - nlreturn - testpackage - - gci - - gofumpt - - goerr113 + - nlreturn - gomnd - - tparallel + - exhaustivestruct + - goerr113 + - errorlint - nestif - godot - - errorlint + - gofumpt - paralleltest - tparallel - - cyclop - - errname - - varnamelen + - thelper + - ifshort - exhaustruct - - maintidx + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint + - nosnakecase diff --git a/vendor/github.com/go-openapi/errors/README.md b/vendor/github.com/go-openapi/errors/README.md index 4aac049e6..6d57ea55c 100644 --- a/vendor/github.com/go-openapi/errors/README.md +++ b/vendor/github.com/go-openapi/errors/README.md @@ -1,11 +1,8 @@ -# OpenAPI errors +# OpenAPI errors [![Build Status](https://github.com/go-openapi/errors/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/errors/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/errors/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/errors) -[![Build Status](https://travis-ci.org/go-openapi/errors.svg?branch=master)](https://travis-ci.org/go-openapi/errors) -[![codecov](https://codecov.io/gh/go-openapi/errors/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/errors) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) [![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/errors/master/LICENSE) [![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/errors.svg)](https://pkg.go.dev/github.com/go-openapi/errors) -[![GolangCI](https://golangci.com/badges/github.com/go-openapi/errors.svg)](https://golangci.com) [![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/errors)](https://goreportcard.com/report/github.com/go-openapi/errors) Shared errors and error interface used throughout the various libraries found in the go-openapi toolkit. diff --git a/vendor/github.com/go-openapi/errors/api.go b/vendor/github.com/go-openapi/errors/api.go index c13f3435f..5320cb963 100644 --- a/vendor/github.com/go-openapi/errors/api.go +++ b/vendor/github.com/go-openapi/errors/api.go @@ -55,9 +55,15 @@ func (a apiError) MarshalJSON() ([]byte, error) { // New creates a new API error with a code and a message func New(code int32, message string, args ...interface{}) Error { if len(args) > 0 { - return &apiError{code, fmt.Sprintf(message, args...)} + return &apiError{ + code: code, + message: fmt.Sprintf(message, args...), + } + } + return &apiError{ + code: code, + message: message, } - return &apiError{code, message} } // NotFound creates a new not found error @@ -130,10 +136,14 @@ func flattenComposite(errs *CompositeError) *CompositeError { // MethodNotAllowed creates a new method not allowed error func MethodNotAllowed(requested string, allow []string) Error { msg := fmt.Sprintf("method %s is not allowed, but [%s] are", requested, strings.Join(allow, ",")) - return &MethodNotAllowedError{code: http.StatusMethodNotAllowed, Allowed: allow, message: msg} + return &MethodNotAllowedError{ + code: http.StatusMethodNotAllowed, + Allowed: allow, + message: msg, + } } -// ServeError the error handler interface implementation +// ServeError implements the http error handler interface func ServeError(rw http.ResponseWriter, r *http.Request, err error) { rw.Header().Set("Content-Type", "application/json") switch e := err.(type) { diff --git a/vendor/github.com/go-openapi/errors/schema.go b/vendor/github.com/go-openapi/errors/schema.go index da5f6c78c..cf7ac2ed4 100644 --- a/vendor/github.com/go-openapi/errors/schema.go +++ b/vendor/github.com/go-openapi/errors/schema.go @@ -120,6 +120,10 @@ func (c *CompositeError) Error() string { return c.message } +func (c *CompositeError) Unwrap() []error { + return c.Errors +} + // MarshalJSON implements the JSON encoding interface func (c CompositeError) MarshalJSON() ([]byte, error) { return json.Marshal(map[string]interface{}{ @@ -133,7 +137,7 @@ func (c CompositeError) MarshalJSON() ([]byte, error) { func CompositeValidationError(errors ...error) *CompositeError { return &CompositeError{ code: CompositeErrorCode, - Errors: append([]error{}, errors...), + Errors: append(make([]error, 0, len(errors)), errors...), message: "validation failure list", } } diff --git a/vendor/github.com/go-openapi/strfmt/.golangci.yml b/vendor/github.com/go-openapi/strfmt/.golangci.yml index be4899cb1..22f8d21cc 100644 --- a/vendor/github.com/go-openapi/strfmt/.golangci.yml +++ b/vendor/github.com/go-openapi/strfmt/.golangci.yml @@ -4,56 +4,58 @@ linters-settings: golint: min-confidence: 0 gocyclo: - min-complexity: 31 + min-complexity: 45 maligned: suggest-new: true dupl: - threshold: 100 + threshold: 200 goconst: min-len: 2 - min-occurrences: 4 + min-occurrences: 3 linters: - enable: - - revive - - goimports - - gosec + enable-all: true + disable: + - maligned - unparam - - unconvert - - predeclared - - prealloc - - misspell - - # disable: - # - maligned - # - lll - # - gochecknoinits - # - gochecknoglobals - # - godox - # - gocognit - # - whitespace - # - wsl - # - funlen - # - wrapcheck - # - testpackage - # - nlreturn - # - gofumpt - # - goerr113 - # - gci - # - gomnd - # - godot - # - exhaustivestruct - # - paralleltest - # - varnamelen - # - ireturn - # - exhaustruct - # #- thelper - -issues: - exclude-rules: - - path: bson.go - text: "should be .*ObjectID" - linters: - - golint - - stylecheck - + - lll + - gochecknoinits + - gochecknoglobals + - funlen + - godox + - gocognit + - whitespace + - wsl + - wrapcheck + - testpackage + - nlreturn + - gomnd + - exhaustivestruct + - goerr113 + - errorlint + - nestif + - godot + - gofumpt + - paralleltest + - tparallel + - thelper + - ifshort + - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint + - nosnakecase diff --git a/vendor/github.com/go-openapi/strfmt/README.md b/vendor/github.com/go-openapi/strfmt/README.md index 0cf89d776..f6b39c6c5 100644 --- a/vendor/github.com/go-openapi/strfmt/README.md +++ b/vendor/github.com/go-openapi/strfmt/README.md @@ -1,8 +1,7 @@ -# Strfmt [![Build Status](https://travis-ci.org/go-openapi/strfmt.svg?branch=master)](https://travis-ci.org/go-openapi/strfmt) [![codecov](https://codecov.io/gh/go-openapi/strfmt/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/strfmt) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) - +# Strfmt [![Build Status](https://github.com/go-openapi/strfmt/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/strfmt/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/strfmt/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/strfmt) +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) [![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/strfmt/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/strfmt?status.svg)](http://godoc.org/github.com/go-openapi/strfmt) -[![GolangCI](https://golangci.com/badges/github.com/go-openapi/strfmt.svg)](https://golangci.com) [![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/strfmt)](https://goreportcard.com/report/github.com/go-openapi/strfmt) This package exposes a registry of data types to support string formats in the go-openapi toolkit. diff --git a/vendor/github.com/go-openapi/strfmt/bson.go b/vendor/github.com/go-openapi/strfmt/bson.go index a8a3604a2..cfa9a526f 100644 --- a/vendor/github.com/go-openapi/strfmt/bson.go +++ b/vendor/github.com/go-openapi/strfmt/bson.go @@ -39,10 +39,10 @@ func IsBSONObjectID(str string) bool { // ObjectId represents a BSON object ID (alias to go.mongodb.org/mongo-driver/bson/primitive.ObjectID) // // swagger:strfmt bsonobjectid -type ObjectId bsonprim.ObjectID //nolint:revive +type ObjectId bsonprim.ObjectID //nolint:revive,stylecheck // NewObjectId creates a ObjectId from a Hex String -func NewObjectId(hex string) ObjectId { //nolint:revive +func NewObjectId(hex string) ObjectId { //nolint:revive,stylecheck oid, err := bsonprim.ObjectIDFromHex(hex) if err != nil { panic(err) @@ -135,7 +135,7 @@ func (id *ObjectId) UnmarshalBSON(data []byte) error { // BSON document if the error is nil. func (id ObjectId) MarshalBSONValue() (bsontype.Type, []byte, error) { oid := bsonprim.ObjectID(id) - return bsontype.ObjectID, oid[:], nil + return bson.TypeObjectID, oid[:], nil } // UnmarshalBSONValue is an interface implemented by types that can unmarshal a diff --git a/vendor/github.com/go-openapi/strfmt/default.go b/vendor/github.com/go-openapi/strfmt/default.go index a89a4de3f..281371406 100644 --- a/vendor/github.com/go-openapi/strfmt/default.go +++ b/vendor/github.com/go-openapi/strfmt/default.go @@ -25,6 +25,7 @@ import ( "strings" "github.com/asaskevich/govalidator" + "github.com/google/uuid" "go.mongodb.org/mongo-driver/bson" ) @@ -57,24 +58,35 @@ const ( // - long top-level domain names (e.g. example.london) are permitted // - symbol unicode points are permitted (e.g. emoji) (not for top-level domain) HostnamePattern = `^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$` + + // json null type + jsonNull = "null" +) + +const ( // UUIDPattern Regex for UUID that allows uppercase - UUIDPattern = `(?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{12}$` + // + // Deprecated: strfmt no longer uses regular expressions to validate UUIDs. + UUIDPattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$)|(^[0-9a-f]{32}$)` + // UUID3Pattern Regex for UUID3 that allows uppercase - UUID3Pattern = `(?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?3[0-9a-f]{3}-?[0-9a-f]{4}-?[0-9a-f]{12}$` + // + // Deprecated: strfmt no longer uses regular expressions to validate UUIDs. + UUID3Pattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$)|(^[0-9a-f]{12}3[0-9a-f]{3}?[0-9a-f]{16}$)` + // UUID4Pattern Regex for UUID4 that allows uppercase - UUID4Pattern = `(?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?4[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$` + // + // Deprecated: strfmt no longer uses regular expressions to validate UUIDs. + UUID4Pattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$)|(^[0-9a-f]{12}4[0-9a-f]{3}[89ab][0-9a-f]{15}$)` + // UUID5Pattern Regex for UUID5 that allows uppercase - UUID5Pattern = `(?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?5[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$` - // json null type - jsonNull = "null" + // + // Deprecated: strfmt no longer uses regular expressions to validate UUIDs. + UUID5Pattern = `(?i)(^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$)|(^[0-9a-f]{12}5[0-9a-f]{3}[89ab][0-9a-f]{15}$)` ) var ( rxHostname = regexp.MustCompile(HostnamePattern) - rxUUID = regexp.MustCompile(UUIDPattern) - rxUUID3 = regexp.MustCompile(UUID3Pattern) - rxUUID4 = regexp.MustCompile(UUID4Pattern) - rxUUID5 = regexp.MustCompile(UUID5Pattern) ) // IsHostname returns true when the string is a valid hostname @@ -99,24 +111,28 @@ func IsHostname(str string) bool { return valid } -// IsUUID returns true is the string matches a UUID, upper case is allowed +// IsUUID returns true is the string matches a UUID (in any version, including v6 and v7), upper case is allowed func IsUUID(str string) bool { - return rxUUID.MatchString(str) + _, err := uuid.Parse(str) + return err == nil } -// IsUUID3 returns true is the string matches a UUID, upper case is allowed +// IsUUID3 returns true is the string matches a UUID v3, upper case is allowed func IsUUID3(str string) bool { - return rxUUID3.MatchString(str) + id, err := uuid.Parse(str) + return err == nil && id.Version() == uuid.Version(3) } -// IsUUID4 returns true is the string matches a UUID, upper case is allowed +// IsUUID4 returns true is the string matches a UUID v4, upper case is allowed func IsUUID4(str string) bool { - return rxUUID4.MatchString(str) + id, err := uuid.Parse(str) + return err == nil && id.Version() == uuid.Version(4) } -// IsUUID5 returns true is the string matches a UUID, upper case is allowed +// IsUUID5 returns true is the string matches a UUID v5, upper case is allowed func IsUUID5(str string) bool { - return rxUUID5.MatchString(str) + id, err := uuid.Parse(str) + return err == nil && id.Version() == uuid.Version(5) } // IsEmail validates an email address. diff --git a/vendor/github.com/go-openapi/strfmt/format.go b/vendor/github.com/go-openapi/strfmt/format.go index ad3b3c355..888e107c3 100644 --- a/vendor/github.com/go-openapi/strfmt/format.go +++ b/vendor/github.com/go-openapi/strfmt/format.go @@ -16,6 +16,7 @@ package strfmt import ( "encoding" + stderrors "errors" "fmt" "reflect" "strings" @@ -94,7 +95,7 @@ func NewSeededFormats(seeds []knownFormat, normalizer NameNormalizer) Registry { } // MapStructureHookFunc is a decode hook function for mapstructure -func (f *defaultFormats) MapStructureHookFunc() mapstructure.DecodeHookFunc { //nolint:gocyclo,cyclop +func (f *defaultFormats) MapStructureHookFunc() mapstructure.DecodeHookFunc { return func(from reflect.Type, to reflect.Type, obj interface{}) (interface{}, error) { if from.Kind() != reflect.String { return obj, nil @@ -117,7 +118,7 @@ func (f *defaultFormats) MapStructureHookFunc() mapstructure.DecodeHookFunc { // case "datetime": input := data if len(input) == 0 { - return nil, fmt.Errorf("empty string is an invalid datetime format") + return nil, stderrors.New("empty string is an invalid datetime format") } return ParseDateTime(input) case "duration": diff --git a/vendor/github.com/go-openapi/strfmt/time.go b/vendor/github.com/go-openapi/strfmt/time.go index 9bef4c3b3..f08ba4da5 100644 --- a/vendor/github.com/go-openapi/strfmt/time.go +++ b/vendor/github.com/go-openapi/strfmt/time.go @@ -76,6 +76,8 @@ const ( ISO8601TimeWithReducedPrecisionLocaltime = "2006-01-02T15:04" // ISO8601TimeUniversalSortableDateTimePattern represents a ISO8601 universal sortable date time pattern. ISO8601TimeUniversalSortableDateTimePattern = "2006-01-02 15:04:05" + // short form of ISO8601TimeUniversalSortableDateTimePattern + ISO8601TimeUniversalSortableDateTimePatternShortForm = "2006-01-02" // DateTimePattern pattern to match for the date-time format from http://tools.ietf.org/html/rfc3339#section-5.6 DateTimePattern = `^([0-9]{2}):([0-9]{2}):([0-9]{2})(.[0-9]+)?(z|([+-][0-9]{2}:[0-9]{2}))$` ) @@ -84,7 +86,7 @@ var ( rxDateTime = regexp.MustCompile(DateTimePattern) // DateTimeFormats is the collection of formats used by ParseDateTime() - DateTimeFormats = []string{RFC3339Micro, RFC3339MicroNoColon, RFC3339Millis, RFC3339MillisNoColon, time.RFC3339, time.RFC3339Nano, ISO8601LocalTime, ISO8601TimeWithReducedPrecision, ISO8601TimeWithReducedPrecisionLocaltime, ISO8601TimeUniversalSortableDateTimePattern} + DateTimeFormats = []string{RFC3339Micro, RFC3339MicroNoColon, RFC3339Millis, RFC3339MillisNoColon, time.RFC3339, time.RFC3339Nano, ISO8601LocalTime, ISO8601TimeWithReducedPrecision, ISO8601TimeWithReducedPrecisionLocaltime, ISO8601TimeUniversalSortableDateTimePattern, ISO8601TimeUniversalSortableDateTimePatternShortForm} // MarshalFormat sets the time resolution format used for marshaling time (set to milliseconds) MarshalFormat = RFC3339Millis @@ -245,7 +247,7 @@ func (t DateTime) MarshalBSONValue() (bsontype.Type, []byte, error) { buf := make([]byte, 8) binary.LittleEndian.PutUint64(buf, uint64(i64)) - return bsontype.DateTime, buf, nil + return bson.TypeDateTime, buf, nil } // UnmarshalBSONValue is an interface implemented by types that can unmarshal a @@ -253,7 +255,7 @@ func (t DateTime) MarshalBSONValue() (bsontype.Type, []byte, error) { // assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it // wishes to retain the data after returning. func (t *DateTime) UnmarshalBSONValue(tpe bsontype.Type, data []byte) error { - if tpe == bsontype.Null { + if tpe == bson.TypeNull { *t = DateTime{} return nil } diff --git a/vendor/github.com/go-openapi/swag/.gitignore b/vendor/github.com/go-openapi/swag/.gitignore index d69b53acc..c4b1b64f0 100644 --- a/vendor/github.com/go-openapi/swag/.gitignore +++ b/vendor/github.com/go-openapi/swag/.gitignore @@ -2,3 +2,4 @@ secrets.yml vendor Godeps .idea +*.out diff --git a/vendor/github.com/go-openapi/swag/.golangci.yml b/vendor/github.com/go-openapi/swag/.golangci.yml index bf503e400..80e2be004 100644 --- a/vendor/github.com/go-openapi/swag/.golangci.yml +++ b/vendor/github.com/go-openapi/swag/.golangci.yml @@ -4,14 +4,14 @@ linters-settings: golint: min-confidence: 0 gocyclo: - min-complexity: 25 + min-complexity: 45 maligned: suggest-new: true dupl: - threshold: 100 + threshold: 200 goconst: min-len: 3 - min-occurrences: 2 + min-occurrences: 3 linters: enable-all: true @@ -20,35 +20,41 @@ linters: - lll - gochecknoinits - gochecknoglobals - - nlreturn - - testpackage + - funlen + - godox + - gocognit + - whitespace + - wsl - wrapcheck + - testpackage + - nlreturn - gomnd - - exhaustive - exhaustivestruct - goerr113 - - wsl - - whitespace - - gofumpt - - godot + - errorlint - nestif - - godox - - funlen - - gci - - gocognit + - godot + - gofumpt - paralleltest + - tparallel - thelper - ifshort - - gomoddirectives - - cyclop - - forcetypeassert - - ireturn - - tagliatelle - - varnamelen - - goimports - - tenv - - golint - exhaustruct - - nilnil + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint - nosnakecase diff --git a/vendor/github.com/go-openapi/swag/BENCHMARK.md b/vendor/github.com/go-openapi/swag/BENCHMARK.md new file mode 100644 index 000000000..e7f28ed6b --- /dev/null +++ b/vendor/github.com/go-openapi/swag/BENCHMARK.md @@ -0,0 +1,52 @@ +# Benchmarks + +## Name mangling utilities + +```bash +go test -bench XXX -run XXX -benchtime 30s +``` + +### Benchmarks at b3e7a5386f996177e4808f11acb2aa93a0f660df + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz +BenchmarkToXXXName/ToGoName-4 862623 44101 ns/op 10450 B/op 732 allocs/op +BenchmarkToXXXName/ToVarName-4 853656 40728 ns/op 10468 B/op 734 allocs/op +BenchmarkToXXXName/ToFileName-4 1268312 27813 ns/op 9785 B/op 617 allocs/op +BenchmarkToXXXName/ToCommandName-4 1276322 27903 ns/op 9785 B/op 617 allocs/op +BenchmarkToXXXName/ToHumanNameLower-4 895334 40354 ns/op 10472 B/op 731 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-4 882441 40678 ns/op 10566 B/op 749 allocs/op +``` + +### Benchmarks after PR #79 + +~ x10 performance improvement and ~ /100 memory allocations. + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz +BenchmarkToXXXName/ToGoName-4 9595830 3991 ns/op 42 B/op 5 allocs/op +BenchmarkToXXXName/ToVarName-4 9194276 3984 ns/op 62 B/op 7 allocs/op +BenchmarkToXXXName/ToFileName-4 17002711 2123 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToCommandName-4 16772926 2111 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToHumanNameLower-4 9788331 3749 ns/op 92 B/op 6 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-4 9188260 3941 ns/op 104 B/op 6 allocs/op +``` + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: AMD Ryzen 7 5800X 8-Core Processor +BenchmarkToXXXName/ToGoName-16 18527378 1972 ns/op 42 B/op 5 allocs/op +BenchmarkToXXXName/ToVarName-16 15552692 2093 ns/op 62 B/op 7 allocs/op +BenchmarkToXXXName/ToFileName-16 32161176 1117 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToCommandName-16 32256634 1137 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToHumanNameLower-16 18599661 1946 ns/op 92 B/op 6 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-16 17581353 2054 ns/op 105 B/op 6 allocs/op +``` diff --git a/vendor/github.com/go-openapi/swag/README.md b/vendor/github.com/go-openapi/swag/README.md index 217f6fa50..a72922299 100644 --- a/vendor/github.com/go-openapi/swag/README.md +++ b/vendor/github.com/go-openapi/swag/README.md @@ -1,7 +1,8 @@ -# Swag [![Build Status](https://travis-ci.org/go-openapi/swag.svg?branch=master)](https://travis-ci.org/go-openapi/swag) [![codecov](https://codecov.io/gh/go-openapi/swag/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/swag) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +# Swag [![Build Status](https://github.com/go-openapi/swag/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/swag/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/swag/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/swag) +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) [![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE) -[![GoDoc](https://godoc.org/github.com/go-openapi/swag?status.svg)](http://godoc.org/github.com/go-openapi/swag) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/swag.svg)](https://pkg.go.dev/github.com/go-openapi/swag) [![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/swag)](https://goreportcard.com/report/github.com/go-openapi/swag) Contains a bunch of helper functions for go-openapi and go-swagger projects. @@ -18,4 +19,5 @@ You may also use it standalone for your projects. This repo has only few dependencies outside of the standard library: -* YAML utilities depend on gopkg.in/yaml.v2 +* YAML utilities depend on `gopkg.in/yaml.v3` +* `github.com/mailru/easyjson v0.7.7` diff --git a/vendor/github.com/go-openapi/swag/initialism_index.go b/vendor/github.com/go-openapi/swag/initialism_index.go new file mode 100644 index 000000000..20a359bb6 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/initialism_index.go @@ -0,0 +1,202 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "sort" + "strings" + "sync" +) + +var ( + // commonInitialisms are common acronyms that are kept as whole uppercased words. + commonInitialisms *indexOfInitialisms + + // initialisms is a slice of sorted initialisms + initialisms []string + + // a copy of initialisms pre-baked as []rune + initialismsRunes [][]rune + initialismsUpperCased [][]rune + + isInitialism func(string) bool + + maxAllocMatches int +) + +func init() { + // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769 + configuredInitialisms := map[string]bool{ + "ACL": true, + "API": true, + "ASCII": true, + "CPU": true, + "CSS": true, + "DNS": true, + "EOF": true, + "GUID": true, + "HTML": true, + "HTTPS": true, + "HTTP": true, + "ID": true, + "IP": true, + "IPv4": true, + "IPv6": true, + "JSON": true, + "LHS": true, + "OAI": true, + "QPS": true, + "RAM": true, + "RHS": true, + "RPC": true, + "SLA": true, + "SMTP": true, + "SQL": true, + "SSH": true, + "TCP": true, + "TLS": true, + "TTL": true, + "UDP": true, + "UI": true, + "UID": true, + "UUID": true, + "URI": true, + "URL": true, + "UTF8": true, + "VM": true, + "XML": true, + "XMPP": true, + "XSRF": true, + "XSS": true, + } + + // a thread-safe index of initialisms + commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms) + initialisms = commonInitialisms.sorted() + initialismsRunes = asRunes(initialisms) + initialismsUpperCased = asUpperCased(initialisms) + maxAllocMatches = maxAllocHeuristic(initialismsRunes) + + // a test function + isInitialism = commonInitialisms.isInitialism +} + +func asRunes(in []string) [][]rune { + out := make([][]rune, len(in)) + for i, initialism := range in { + out[i] = []rune(initialism) + } + + return out +} + +func asUpperCased(in []string) [][]rune { + out := make([][]rune, len(in)) + + for i, initialism := range in { + out[i] = []rune(upper(trim(initialism))) + } + + return out +} + +func maxAllocHeuristic(in [][]rune) int { + heuristic := make(map[rune]int) + for _, initialism := range in { + heuristic[initialism[0]]++ + } + + var maxAlloc int + for _, val := range heuristic { + if val > maxAlloc { + maxAlloc = val + } + } + + return maxAlloc +} + +// AddInitialisms add additional initialisms +func AddInitialisms(words ...string) { + for _, word := range words { + // commonInitialisms[upper(word)] = true + commonInitialisms.add(upper(word)) + } + // sort again + initialisms = commonInitialisms.sorted() + initialismsRunes = asRunes(initialisms) + initialismsUpperCased = asUpperCased(initialisms) +} + +// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms. +// Since go1.9, this may be implemented with sync.Map. +type indexOfInitialisms struct { + sortMutex *sync.Mutex + index *sync.Map +} + +func newIndexOfInitialisms() *indexOfInitialisms { + return &indexOfInitialisms{ + sortMutex: new(sync.Mutex), + index: new(sync.Map), + } +} + +func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms { + m.sortMutex.Lock() + defer m.sortMutex.Unlock() + for k, v := range initial { + m.index.Store(k, v) + } + return m +} + +func (m *indexOfInitialisms) isInitialism(key string) bool { + _, ok := m.index.Load(key) + return ok +} + +func (m *indexOfInitialisms) add(key string) *indexOfInitialisms { + m.index.Store(key, true) + return m +} + +func (m *indexOfInitialisms) sorted() (result []string) { + m.sortMutex.Lock() + defer m.sortMutex.Unlock() + m.index.Range(func(key, _ interface{}) bool { + k := key.(string) + result = append(result, k) + return true + }) + sort.Sort(sort.Reverse(byInitialism(result))) + return +} + +type byInitialism []string + +func (s byInitialism) Len() int { + return len(s) +} +func (s byInitialism) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} +func (s byInitialism) Less(i, j int) bool { + if len(s[i]) != len(s[j]) { + return len(s[i]) < len(s[j]) + } + + return strings.Compare(s[i], s[j]) > 0 +} diff --git a/vendor/github.com/go-openapi/swag/loading.go b/vendor/github.com/go-openapi/swag/loading.go index 00038c377..783442fdd 100644 --- a/vendor/github.com/go-openapi/swag/loading.go +++ b/vendor/github.com/go-openapi/swag/loading.go @@ -21,6 +21,7 @@ import ( "net/http" "net/url" "os" + "path" "path/filepath" "runtime" "strings" @@ -40,43 +41,97 @@ var LoadHTTPBasicAuthPassword = "" var LoadHTTPCustomHeaders = map[string]string{} // LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in -func LoadFromFileOrHTTP(path string) ([]byte, error) { - return LoadStrategy(path, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(path) +func LoadFromFileOrHTTP(pth string) ([]byte, error) { + return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(pth) } // LoadFromFileOrHTTPWithTimeout loads the bytes from a file or a remote http server based on the path passed in // timeout arg allows for per request overriding of the request timeout -func LoadFromFileOrHTTPWithTimeout(path string, timeout time.Duration) ([]byte, error) { - return LoadStrategy(path, os.ReadFile, loadHTTPBytes(timeout))(path) +func LoadFromFileOrHTTPWithTimeout(pth string, timeout time.Duration) ([]byte, error) { + return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(timeout))(pth) } -// LoadStrategy returns a loader function for a given path or uri -func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) { - if strings.HasPrefix(path, "http") { +// LoadStrategy returns a loader function for a given path or URI. +// +// The load strategy returns the remote load for any path starting with `http`. +// So this works for any URI with a scheme `http` or `https`. +// +// The fallback strategy is to call the local loader. +// +// The local loader takes a local file system path (absolute or relative) as argument, +// or alternatively a `file://...` URI, **without host** (see also below for windows). +// +// There are a few liberalities, initially intended to be tolerant regarding the URI syntax, +// especially on windows. +// +// Before the local loader is called, the given path is transformed: +// - percent-encoded characters are unescaped +// - simple paths (e.g. `./folder/file`) are passed as-is +// - on windows, occurrences of `/` are replaced by `\`, so providing a relative path such a `folder/file` works too. +// +// For paths provided as URIs with the "file" scheme, please note that: +// - `file://` is simply stripped. +// This means that the host part of the URI is not parsed at all. +// For example, `file:///folder/file" becomes "/folder/file`, +// but `file://localhost/folder/file` becomes `localhost/folder/file` on unix systems. +// Similarly, `file://./folder/file` yields `./folder/file`. +// - on windows, `file://...` can take a host so as to specify an UNC share location. +// +// Reminder about windows-specifics: +// - `file://host/folder/file` becomes an UNC path like `\\host\folder\file` (no port specification is supported) +// - `file:///c:/folder/file` becomes `C:\folder\file` +// - `file://c:/folder/file` is tolerated (without leading `/`) and becomes `c:\folder\file` +func LoadStrategy(pth string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) { + if strings.HasPrefix(pth, "http") { return remote } - return func(pth string) ([]byte, error) { - upth, err := pathUnescape(pth) + + return func(p string) ([]byte, error) { + upth, err := url.PathUnescape(p) if err != nil { return nil, err } - if strings.HasPrefix(pth, `file://`) { - if runtime.GOOS == "windows" { - // support for canonical file URIs on windows. - // Zero tolerance here for dodgy URIs. - u, _ := url.Parse(upth) - if u.Host != "" { - // assume UNC name (volume share) - // file://host/share/folder\... ==> \\host\share\path\folder - // NOTE: UNC port not yet supported - upth = strings.Join([]string{`\`, u.Host, u.Path}, `\`) - } else { - // file:///c:/folder/... ==> just remove the leading slash - upth = strings.TrimPrefix(upth, `file:///`) - } - } else { - upth = strings.TrimPrefix(upth, `file://`) + if !strings.HasPrefix(p, `file://`) { + // regular file path provided: just normalize slashes + return local(filepath.FromSlash(upth)) + } + + if runtime.GOOS != "windows" { + // crude processing: this leaves full URIs with a host with a (mostly) unexpected result + upth = strings.TrimPrefix(upth, `file://`) + + return local(filepath.FromSlash(upth)) + } + + // windows-only pre-processing of file://... URIs + + // support for canonical file URIs on windows. + u, err := url.Parse(filepath.ToSlash(upth)) + if err != nil { + return nil, err + } + + if u.Host != "" { + // assume UNC name (volume share) + // NOTE: UNC port not yet supported + + // when the "host" segment is a drive letter: + // file://C:/folder/... => C:\folder + upth = path.Clean(strings.Join([]string{u.Host, u.Path}, `/`)) + if !strings.HasSuffix(u.Host, ":") && u.Host[0] != '.' { + // tolerance: if we have a leading dot, this can't be a host + // file://host/share/folder\... ==> \\host\share\path\folder + upth = "//" + upth + } + } else { + // no host, let's figure out if this is a drive letter + upth = strings.TrimPrefix(upth, `file://`) + first, _, _ := strings.Cut(strings.TrimPrefix(u.Path, "/"), "/") + if strings.HasSuffix(first, ":") { + // drive letter in the first segment: + // file:///c:/folder/... ==> strip the leading slash + upth = strings.TrimPrefix(upth, `/`) } } diff --git a/vendor/github.com/go-openapi/swag/name_lexem.go b/vendor/github.com/go-openapi/swag/name_lexem.go index aa7f6a9bb..8bb64ac32 100644 --- a/vendor/github.com/go-openapi/swag/name_lexem.go +++ b/vendor/github.com/go-openapi/swag/name_lexem.go @@ -14,74 +14,80 @@ package swag -import "unicode" +import ( + "unicode" + "unicode/utf8" +) type ( - nameLexem interface { - GetUnsafeGoName() string - GetOriginal() string - IsInitialism() bool - } + lexemKind uint8 - initialismNameLexem struct { + nameLexem struct { original string matchedInitialism string + kind lexemKind } +) - casualNameLexem struct { - original string - } +const ( + lexemKindCasualName lexemKind = iota + lexemKindInitialismName ) -func newInitialismNameLexem(original, matchedInitialism string) *initialismNameLexem { - return &initialismNameLexem{ +func newInitialismNameLexem(original, matchedInitialism string) nameLexem { + return nameLexem{ + kind: lexemKindInitialismName, original: original, matchedInitialism: matchedInitialism, } } -func newCasualNameLexem(original string) *casualNameLexem { - return &casualNameLexem{ +func newCasualNameLexem(original string) nameLexem { + return nameLexem{ + kind: lexemKindCasualName, original: original, } } -func (l *initialismNameLexem) GetUnsafeGoName() string { - return l.matchedInitialism -} +func (l nameLexem) GetUnsafeGoName() string { + if l.kind == lexemKindInitialismName { + return l.matchedInitialism + } + + var ( + first rune + rest string + ) -func (l *casualNameLexem) GetUnsafeGoName() string { - var first rune - var rest string for i, orig := range l.original { if i == 0 { first = orig continue } + if i > 0 { rest = l.original[i:] break } } + if len(l.original) > 1 { - return string(unicode.ToUpper(first)) + lower(rest) + b := poolOfBuffers.BorrowBuffer(utf8.UTFMax + len(rest)) + defer func() { + poolOfBuffers.RedeemBuffer(b) + }() + b.WriteRune(unicode.ToUpper(first)) + b.WriteString(lower(rest)) + return b.String() } return l.original } -func (l *initialismNameLexem) GetOriginal() string { +func (l nameLexem) GetOriginal() string { return l.original } -func (l *casualNameLexem) GetOriginal() string { - return l.original -} - -func (l *initialismNameLexem) IsInitialism() bool { - return true -} - -func (l *casualNameLexem) IsInitialism() bool { - return false +func (l nameLexem) IsInitialism() bool { + return l.kind == lexemKindInitialismName } diff --git a/vendor/github.com/go-openapi/swag/post_go19.go b/vendor/github.com/go-openapi/swag/post_go19.go deleted file mode 100644 index 7c7da9c08..000000000 --- a/vendor/github.com/go-openapi/swag/post_go19.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build go1.9 -// +build go1.9 - -package swag - -import ( - "sort" - "sync" -) - -// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms. -// Since go1.9, this may be implemented with sync.Map. -type indexOfInitialisms struct { - sortMutex *sync.Mutex - index *sync.Map -} - -func newIndexOfInitialisms() *indexOfInitialisms { - return &indexOfInitialisms{ - sortMutex: new(sync.Mutex), - index: new(sync.Map), - } -} - -func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms { - m.sortMutex.Lock() - defer m.sortMutex.Unlock() - for k, v := range initial { - m.index.Store(k, v) - } - return m -} - -func (m *indexOfInitialisms) isInitialism(key string) bool { - _, ok := m.index.Load(key) - return ok -} - -func (m *indexOfInitialisms) add(key string) *indexOfInitialisms { - m.index.Store(key, true) - return m -} - -func (m *indexOfInitialisms) sorted() (result []string) { - m.sortMutex.Lock() - defer m.sortMutex.Unlock() - m.index.Range(func(key, value interface{}) bool { - k := key.(string) - result = append(result, k) - return true - }) - sort.Sort(sort.Reverse(byInitialism(result))) - return -} diff --git a/vendor/github.com/go-openapi/swag/pre_go19.go b/vendor/github.com/go-openapi/swag/pre_go19.go deleted file mode 100644 index 0565db377..000000000 --- a/vendor/github.com/go-openapi/swag/pre_go19.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.9 -// +build !go1.9 - -package swag - -import ( - "sort" - "sync" -) - -// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms. -// Before go1.9, this may be implemented with a mutex on the map. -type indexOfInitialisms struct { - getMutex *sync.Mutex - index map[string]bool -} - -func newIndexOfInitialisms() *indexOfInitialisms { - return &indexOfInitialisms{ - getMutex: new(sync.Mutex), - index: make(map[string]bool, 50), - } -} - -func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms { - m.getMutex.Lock() - defer m.getMutex.Unlock() - for k, v := range initial { - m.index[k] = v - } - return m -} - -func (m *indexOfInitialisms) isInitialism(key string) bool { - m.getMutex.Lock() - defer m.getMutex.Unlock() - _, ok := m.index[key] - return ok -} - -func (m *indexOfInitialisms) add(key string) *indexOfInitialisms { - m.getMutex.Lock() - defer m.getMutex.Unlock() - m.index[key] = true - return m -} - -func (m *indexOfInitialisms) sorted() (result []string) { - m.getMutex.Lock() - defer m.getMutex.Unlock() - for k := range m.index { - result = append(result, k) - } - sort.Sort(sort.Reverse(byInitialism(result))) - return -} diff --git a/vendor/github.com/go-openapi/swag/split.go b/vendor/github.com/go-openapi/swag/split.go index a1825fb7d..274727a86 100644 --- a/vendor/github.com/go-openapi/swag/split.go +++ b/vendor/github.com/go-openapi/swag/split.go @@ -15,124 +15,269 @@ package swag import ( + "bytes" + "sync" "unicode" + "unicode/utf8" ) -var nameReplaceTable = map[rune]string{ - '@': "At ", - '&': "And ", - '|': "Pipe ", - '$': "Dollar ", - '!': "Bang ", - '-': "", - '_': "", -} - type ( splitter struct { - postSplitInitialismCheck bool initialisms []string + initialismsRunes [][]rune + initialismsUpperCased [][]rune // initialisms cached in their trimmed, upper-cased version + postSplitInitialismCheck bool + } + + splitterOption func(*splitter) + + initialismMatch struct { + body []rune + start, end int + complete bool + } + initialismMatches []initialismMatch +) + +type ( + // memory pools of temporary objects. + // + // These are used to recycle temporarily allocated objects + // and relieve the GC from undue pressure. + + matchesPool struct { + *sync.Pool } - splitterOption func(*splitter) *splitter + buffersPool struct { + *sync.Pool + } + + lexemsPool struct { + *sync.Pool + } + + splittersPool struct { + *sync.Pool + } ) -// split calls the splitter; splitter provides more control and post options +var ( + // poolOfMatches holds temporary slices for recycling during the initialism match process + poolOfMatches = matchesPool{ + Pool: &sync.Pool{ + New: func() any { + s := make(initialismMatches, 0, maxAllocMatches) + + return &s + }, + }, + } + + poolOfBuffers = buffersPool{ + Pool: &sync.Pool{ + New: func() any { + return new(bytes.Buffer) + }, + }, + } + + poolOfLexems = lexemsPool{ + Pool: &sync.Pool{ + New: func() any { + s := make([]nameLexem, 0, maxAllocMatches) + + return &s + }, + }, + } + + poolOfSplitters = splittersPool{ + Pool: &sync.Pool{ + New: func() any { + s := newSplitter() + + return &s + }, + }, + } +) + +// nameReplaceTable finds a word representation for special characters. +func nameReplaceTable(r rune) (string, bool) { + switch r { + case '@': + return "At ", true + case '&': + return "And ", true + case '|': + return "Pipe ", true + case '$': + return "Dollar ", true + case '!': + return "Bang ", true + case '-': + return "", true + case '_': + return "", true + default: + return "", false + } +} + +// split calls the splitter. +// +// Use newSplitter for more control and options func split(str string) []string { - lexems := newSplitter().split(str) - result := make([]string, 0, len(lexems)) + s := poolOfSplitters.BorrowSplitter() + lexems := s.split(str) + result := make([]string, 0, len(*lexems)) - for _, lexem := range lexems { + for _, lexem := range *lexems { result = append(result, lexem.GetOriginal()) } + poolOfLexems.RedeemLexems(lexems) + poolOfSplitters.RedeemSplitter(s) return result } -func (s *splitter) split(str string) []nameLexem { - return s.toNameLexems(str) -} - -func newSplitter(options ...splitterOption) *splitter { - splitter := &splitter{ +func newSplitter(options ...splitterOption) splitter { + s := splitter{ postSplitInitialismCheck: false, initialisms: initialisms, + initialismsRunes: initialismsRunes, + initialismsUpperCased: initialismsUpperCased, } for _, option := range options { - splitter = option(splitter) + option(&s) } - return splitter + return s } // withPostSplitInitialismCheck allows to catch initialisms after main split process -func withPostSplitInitialismCheck(s *splitter) *splitter { +func withPostSplitInitialismCheck(s *splitter) { s.postSplitInitialismCheck = true +} + +func (p matchesPool) BorrowMatches() *initialismMatches { + s := p.Get().(*initialismMatches) + *s = (*s)[:0] // reset slice, keep allocated capacity + return s } -type ( - initialismMatch struct { - start, end int - body []rune - complete bool +func (p buffersPool) BorrowBuffer(size int) *bytes.Buffer { + s := p.Get().(*bytes.Buffer) + s.Reset() + + if s.Cap() < size { + s.Grow(size) } - initialismMatches []*initialismMatch -) -func (s *splitter) toNameLexems(name string) []nameLexem { + return s +} + +func (p lexemsPool) BorrowLexems() *[]nameLexem { + s := p.Get().(*[]nameLexem) + *s = (*s)[:0] // reset slice, keep allocated capacity + + return s +} + +func (p splittersPool) BorrowSplitter(options ...splitterOption) *splitter { + s := p.Get().(*splitter) + s.postSplitInitialismCheck = false // reset options + for _, apply := range options { + apply(s) + } + + return s +} + +func (p matchesPool) RedeemMatches(s *initialismMatches) { + p.Put(s) +} + +func (p buffersPool) RedeemBuffer(s *bytes.Buffer) { + p.Put(s) +} + +func (p lexemsPool) RedeemLexems(s *[]nameLexem) { + p.Put(s) +} + +func (p splittersPool) RedeemSplitter(s *splitter) { + p.Put(s) +} + +func (m initialismMatch) isZero() bool { + return m.start == 0 && m.end == 0 +} + +func (s splitter) split(name string) *[]nameLexem { nameRunes := []rune(name) matches := s.gatherInitialismMatches(nameRunes) + if matches == nil { + return poolOfLexems.BorrowLexems() + } + return s.mapMatchesToNameLexems(nameRunes, matches) } -func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches { - matches := make(initialismMatches, 0) +func (s splitter) gatherInitialismMatches(nameRunes []rune) *initialismMatches { + var matches *initialismMatches for currentRunePosition, currentRune := range nameRunes { - newMatches := make(initialismMatches, 0, len(matches)) + // recycle these allocations as we loop over runes + // with such recycling, only 2 slices should be allocated per call + // instead of o(n). + newMatches := poolOfMatches.BorrowMatches() // check current initialism matches - for _, match := range matches { - if keepCompleteMatch := match.complete; keepCompleteMatch { - newMatches = append(newMatches, match) - continue - } + if matches != nil { // skip first iteration + for _, match := range *matches { + if keepCompleteMatch := match.complete; keepCompleteMatch { + *newMatches = append(*newMatches, match) + continue + } - // drop failed match - currentMatchRune := match.body[currentRunePosition-match.start] - if !s.initialismRuneEqual(currentMatchRune, currentRune) { - continue - } + // drop failed match + currentMatchRune := match.body[currentRunePosition-match.start] + if currentMatchRune != currentRune { + continue + } - // try to complete ongoing match - if currentRunePosition-match.start == len(match.body)-1 { - // we are close; the next step is to check the symbol ahead - // if it is a small letter, then it is not the end of match - // but beginning of the next word - - if currentRunePosition < len(nameRunes)-1 { - nextRune := nameRunes[currentRunePosition+1] - if newWord := unicode.IsLower(nextRune); newWord { - // oh ok, it was the start of a new word - continue + // try to complete ongoing match + if currentRunePosition-match.start == len(match.body)-1 { + // we are close; the next step is to check the symbol ahead + // if it is a small letter, then it is not the end of match + // but beginning of the next word + + if currentRunePosition < len(nameRunes)-1 { + nextRune := nameRunes[currentRunePosition+1] + if newWord := unicode.IsLower(nextRune); newWord { + // oh ok, it was the start of a new word + continue + } } + + match.complete = true + match.end = currentRunePosition } - match.complete = true - match.end = currentRunePosition + *newMatches = append(*newMatches, match) } - - newMatches = append(newMatches, match) } // check for new initialism matches - for _, initialism := range s.initialisms { - initialismRunes := []rune(initialism) - if s.initialismRuneEqual(initialismRunes[0], currentRune) { - newMatches = append(newMatches, &initialismMatch{ + for i := range s.initialisms { + initialismRunes := s.initialismsRunes[i] + if initialismRunes[0] == currentRune { + *newMatches = append(*newMatches, initialismMatch{ start: currentRunePosition, body: initialismRunes, complete: false, @@ -140,24 +285,28 @@ func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches { } } + if matches != nil { + poolOfMatches.RedeemMatches(matches) + } matches = newMatches } + // up to the caller to redeem this last slice return matches } -func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMatches) []nameLexem { - nameLexems := make([]nameLexem, 0) +func (s splitter) mapMatchesToNameLexems(nameRunes []rune, matches *initialismMatches) *[]nameLexem { + nameLexems := poolOfLexems.BorrowLexems() - var lastAcceptedMatch *initialismMatch - for _, match := range matches { + var lastAcceptedMatch initialismMatch + for _, match := range *matches { if !match.complete { continue } - if firstMatch := lastAcceptedMatch == nil; firstMatch { - nameLexems = append(nameLexems, s.breakCasualString(nameRunes[:match.start])...) - nameLexems = append(nameLexems, s.breakInitialism(string(match.body))) + if firstMatch := lastAcceptedMatch.isZero(); firstMatch { + s.appendBrokenDownCasualString(nameLexems, nameRunes[:match.start]) + *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body))) lastAcceptedMatch = match @@ -169,63 +318,66 @@ func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMa } middle := nameRunes[lastAcceptedMatch.end+1 : match.start] - nameLexems = append(nameLexems, s.breakCasualString(middle)...) - nameLexems = append(nameLexems, s.breakInitialism(string(match.body))) + s.appendBrokenDownCasualString(nameLexems, middle) + *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body))) lastAcceptedMatch = match } // we have not found any accepted matches - if lastAcceptedMatch == nil { - return s.breakCasualString(nameRunes) - } - - if lastAcceptedMatch.end+1 != len(nameRunes) { + if lastAcceptedMatch.isZero() { + *nameLexems = (*nameLexems)[:0] + s.appendBrokenDownCasualString(nameLexems, nameRunes) + } else if lastAcceptedMatch.end+1 != len(nameRunes) { rest := nameRunes[lastAcceptedMatch.end+1:] - nameLexems = append(nameLexems, s.breakCasualString(rest)...) + s.appendBrokenDownCasualString(nameLexems, rest) } - return nameLexems -} + poolOfMatches.RedeemMatches(matches) -func (s *splitter) initialismRuneEqual(a, b rune) bool { - return a == b + return nameLexems } -func (s *splitter) breakInitialism(original string) nameLexem { +func (s splitter) breakInitialism(original string) nameLexem { return newInitialismNameLexem(original, original) } -func (s *splitter) breakCasualString(str []rune) []nameLexem { - segments := make([]nameLexem, 0) - currentSegment := "" +func (s splitter) appendBrokenDownCasualString(segments *[]nameLexem, str []rune) { + currentSegment := poolOfBuffers.BorrowBuffer(len(str)) // unlike strings.Builder, bytes.Buffer initial storage can reused + defer func() { + poolOfBuffers.RedeemBuffer(currentSegment) + }() addCasualNameLexem := func(original string) { - segments = append(segments, newCasualNameLexem(original)) + *segments = append(*segments, newCasualNameLexem(original)) } addInitialismNameLexem := func(original, match string) { - segments = append(segments, newInitialismNameLexem(original, match)) + *segments = append(*segments, newInitialismNameLexem(original, match)) } - addNameLexem := func(original string) { - if s.postSplitInitialismCheck { - for _, initialism := range s.initialisms { - if upper(initialism) == upper(original) { - addInitialismNameLexem(original, initialism) + var addNameLexem func(string) + if s.postSplitInitialismCheck { + addNameLexem = func(original string) { + for i := range s.initialisms { + if isEqualFoldIgnoreSpace(s.initialismsUpperCased[i], original) { + addInitialismNameLexem(original, s.initialisms[i]) + return } } - } - addCasualNameLexem(original) + addCasualNameLexem(original) + } + } else { + addNameLexem = addCasualNameLexem } - for _, rn := range string(str) { - if replace, found := nameReplaceTable[rn]; found { - if currentSegment != "" { - addNameLexem(currentSegment) - currentSegment = "" + for _, rn := range str { + if replace, found := nameReplaceTable(rn); found { + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) + currentSegment.Reset() } if replace != "" { @@ -236,27 +388,121 @@ func (s *splitter) breakCasualString(str []rune) []nameLexem { } if !unicode.In(rn, unicode.L, unicode.M, unicode.N, unicode.Pc) { - if currentSegment != "" { - addNameLexem(currentSegment) - currentSegment = "" + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) + currentSegment.Reset() } continue } if unicode.IsUpper(rn) { - if currentSegment != "" { - addNameLexem(currentSegment) + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) } - currentSegment = "" + currentSegment.Reset() } - currentSegment += string(rn) + currentSegment.WriteRune(rn) + } + + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) } +} + +// isEqualFoldIgnoreSpace is the same as strings.EqualFold, but +// it ignores leading and trailing blank spaces in the compared +// string. +// +// base is assumed to be composed of upper-cased runes, and be already +// trimmed. +// +// This code is heavily inspired from strings.EqualFold. +func isEqualFoldIgnoreSpace(base []rune, str string) bool { + var i, baseIndex int + // equivalent to b := []byte(str), but without data copy + b := hackStringBytes(str) + + for i < len(b) { + if c := b[i]; c < utf8.RuneSelf { + // fast path for ASCII + if c != ' ' && c != '\t' { + break + } + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if !unicode.IsSpace(r) { + break + } + i += size + } + + if i >= len(b) { + return len(base) == 0 + } + + for _, baseRune := range base { + if i >= len(b) { + break + } + + if c := b[i]; c < utf8.RuneSelf { + // single byte rune case (ASCII) + if baseRune >= utf8.RuneSelf { + return false + } + + baseChar := byte(baseRune) + if c != baseChar && + !('a' <= c && c <= 'z' && c-'a'+'A' == baseChar) { + return false + } + + baseIndex++ + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if unicode.ToUpper(r) != baseRune { + return false + } + baseIndex++ + i += size + } + + if baseIndex != len(base) { + return false + } + + // all passed: now we should only have blanks + for i < len(b) { + if c := b[i]; c < utf8.RuneSelf { + // fast path for ASCII + if c != ' ' && c != '\t' { + return false + } + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if !unicode.IsSpace(r) { + return false + } - if currentSegment != "" { - addNameLexem(currentSegment) + i += size } - return segments + return true } diff --git a/vendor/github.com/go-openapi/swag/string_bytes.go b/vendor/github.com/go-openapi/swag/string_bytes.go new file mode 100644 index 000000000..c52d6bf71 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/string_bytes.go @@ -0,0 +1,22 @@ +package swag + +import "unsafe" + +type internalString struct { + Data unsafe.Pointer + Len int +} + +// hackStringBytes returns the (unsafe) underlying bytes slice of a string. +func hackStringBytes(str string) []byte { + p := (*internalString)(unsafe.Pointer(&str)).Data + return unsafe.Slice((*byte)(p), len(str)) +} + +/* + * go1.20 version (for when go mod moves to a go1.20 requirement): + +func hackStringBytes(str string) []byte { + return unsafe.Slice(unsafe.StringData(str), len(str)) +} +*/ diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go index d971fbe34..5051401c4 100644 --- a/vendor/github.com/go-openapi/swag/util.go +++ b/vendor/github.com/go-openapi/swag/util.go @@ -18,76 +18,25 @@ import ( "reflect" "strings" "unicode" + "unicode/utf8" ) -// commonInitialisms are common acronyms that are kept as whole uppercased words. -var commonInitialisms *indexOfInitialisms - -// initialisms is a slice of sorted initialisms -var initialisms []string - -var isInitialism func(string) bool - // GoNamePrefixFunc sets an optional rule to prefix go names // which do not start with a letter. // +// The prefix function is assumed to return a string that starts with an upper case letter. +// // e.g. to help convert "123" into "{prefix}123" // // The default is to prefix with "X" var GoNamePrefixFunc func(string) string -func init() { - // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769 - var configuredInitialisms = map[string]bool{ - "ACL": true, - "API": true, - "ASCII": true, - "CPU": true, - "CSS": true, - "DNS": true, - "EOF": true, - "GUID": true, - "HTML": true, - "HTTPS": true, - "HTTP": true, - "ID": true, - "IP": true, - "IPv4": true, - "IPv6": true, - "JSON": true, - "LHS": true, - "OAI": true, - "QPS": true, - "RAM": true, - "RHS": true, - "RPC": true, - "SLA": true, - "SMTP": true, - "SQL": true, - "SSH": true, - "TCP": true, - "TLS": true, - "TTL": true, - "UDP": true, - "UI": true, - "UID": true, - "UUID": true, - "URI": true, - "URL": true, - "UTF8": true, - "VM": true, - "XML": true, - "XMPP": true, - "XSRF": true, - "XSS": true, +func prefixFunc(name, in string) string { + if GoNamePrefixFunc == nil { + return "X" + in } - // a thread-safe index of initialisms - commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms) - initialisms = commonInitialisms.sorted() - - // a test function - isInitialism = commonInitialisms.isInitialism + return GoNamePrefixFunc(name) + in } const ( @@ -156,25 +105,9 @@ func SplitByFormat(data, format string) []string { return result } -type byInitialism []string - -func (s byInitialism) Len() int { - return len(s) -} -func (s byInitialism) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} -func (s byInitialism) Less(i, j int) bool { - if len(s[i]) != len(s[j]) { - return len(s[i]) < len(s[j]) - } - - return strings.Compare(s[i], s[j]) > 0 -} - // Removes leading whitespaces func trim(str string) string { - return strings.Trim(str, " ") + return strings.TrimSpace(str) } // Shortcut to strings.ToUpper() @@ -188,15 +121,20 @@ func lower(str string) string { } // Camelize an uppercased word -func Camelize(word string) (camelized string) { +func Camelize(word string) string { + camelized := poolOfBuffers.BorrowBuffer(len(word)) + defer func() { + poolOfBuffers.RedeemBuffer(camelized) + }() + for pos, ru := range []rune(word) { if pos > 0 { - camelized += string(unicode.ToLower(ru)) + camelized.WriteRune(unicode.ToLower(ru)) } else { - camelized += string(unicode.ToUpper(ru)) + camelized.WriteRune(unicode.ToUpper(ru)) } } - return + return camelized.String() } // ToFileName lowercases and underscores a go type name @@ -224,33 +162,40 @@ func ToCommandName(name string) string { // ToHumanNameLower represents a code name as a human series of words func ToHumanNameLower(name string) string { - in := newSplitter(withPostSplitInitialismCheck).split(name) - out := make([]string, 0, len(in)) + s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) + in := s.split(name) + poolOfSplitters.RedeemSplitter(s) + out := make([]string, 0, len(*in)) - for _, w := range in { + for _, w := range *in { if !w.IsInitialism() { out = append(out, lower(w.GetOriginal())) } else { - out = append(out, w.GetOriginal()) + out = append(out, trim(w.GetOriginal())) } } + poolOfLexems.RedeemLexems(in) return strings.Join(out, " ") } // ToHumanNameTitle represents a code name as a human series of words with the first letters titleized func ToHumanNameTitle(name string) string { - in := newSplitter(withPostSplitInitialismCheck).split(name) + s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) + in := s.split(name) + poolOfSplitters.RedeemSplitter(s) - out := make([]string, 0, len(in)) - for _, w := range in { - original := w.GetOriginal() + out := make([]string, 0, len(*in)) + for _, w := range *in { + original := trim(w.GetOriginal()) if !w.IsInitialism() { out = append(out, Camelize(original)) } else { out = append(out, original) } } + poolOfLexems.RedeemLexems(in) + return strings.Join(out, " ") } @@ -264,7 +209,7 @@ func ToJSONName(name string) string { out = append(out, lower(w)) continue } - out = append(out, Camelize(w)) + out = append(out, Camelize(trim(w))) } return strings.Join(out, "") } @@ -283,35 +228,70 @@ func ToVarName(name string) string { // ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes func ToGoName(name string) string { - lexems := newSplitter(withPostSplitInitialismCheck).split(name) + s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) + lexems := s.split(name) + poolOfSplitters.RedeemSplitter(s) + defer func() { + poolOfLexems.RedeemLexems(lexems) + }() + lexemes := *lexems + + if len(lexemes) == 0 { + return "" + } + + result := poolOfBuffers.BorrowBuffer(len(name)) + defer func() { + poolOfBuffers.RedeemBuffer(result) + }() + + // check if not starting with a letter, upper case + firstPart := lexemes[0].GetUnsafeGoName() + if lexemes[0].IsInitialism() { + firstPart = upper(firstPart) + } + + if c := firstPart[0]; c < utf8.RuneSelf { + // ASCII + switch { + case 'A' <= c && c <= 'Z': + result.WriteString(firstPart) + case 'a' <= c && c <= 'z': + result.WriteByte(c - 'a' + 'A') + result.WriteString(firstPart[1:]) + default: + result.WriteString(prefixFunc(name, firstPart)) + // NOTE: no longer check if prefixFunc returns a string that starts with uppercase: + // assume this is always the case + } + } else { + // unicode + firstRune, _ := utf8.DecodeRuneInString(firstPart) + switch { + case !unicode.IsLetter(firstRune): + result.WriteString(prefixFunc(name, firstPart)) + case !unicode.IsUpper(firstRune): + result.WriteString(prefixFunc(name, firstPart)) + /* + result.WriteRune(unicode.ToUpper(firstRune)) + result.WriteString(firstPart[offset:]) + */ + default: + result.WriteString(firstPart) + } + } - result := "" - for _, lexem := range lexems { + for _, lexem := range lexemes[1:] { goName := lexem.GetUnsafeGoName() // to support old behavior if lexem.IsInitialism() { goName = upper(goName) } - result += goName + result.WriteString(goName) } - if len(result) > 0 { - // Only prefix with X when the first character isn't an ascii letter - first := []rune(result)[0] - if !unicode.IsLetter(first) || (first > unicode.MaxASCII && !unicode.IsUpper(first)) { - if GoNamePrefixFunc == nil { - return "X" + result - } - result = GoNamePrefixFunc(name) + result - } - first = []rune(result)[0] - if unicode.IsLetter(first) && !unicode.IsUpper(first) { - result = string(append([]rune{unicode.ToUpper(first)}, []rune(result)[1:]...)) - } - } - - return result + return result.String() } // ContainsStrings searches a slice of strings for a case-sensitive match @@ -343,7 +323,7 @@ type zeroable interface { func IsZero(data interface{}) bool { v := reflect.ValueOf(data) // check for nil data - switch v.Kind() { + switch v.Kind() { //nolint:exhaustive case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: if v.IsNil() { return true @@ -356,7 +336,7 @@ func IsZero(data interface{}) bool { } // continue with slightly more complex reflection - switch v.Kind() { + switch v.Kind() { //nolint:exhaustive case reflect.String: return v.Len() == 0 case reflect.Bool: @@ -376,16 +356,6 @@ func IsZero(data interface{}) bool { } } -// AddInitialisms add additional initialisms -func AddInitialisms(words ...string) { - for _, word := range words { - // commonInitialisms[upper(word)] = true - commonInitialisms.add(upper(word)) - } - // sort again - initialisms = commonInitialisms.sorted() -} - // CommandLineOptionsGroup represents a group of user-defined command line options type CommandLineOptionsGroup struct { ShortDescription string diff --git a/vendor/github.com/go-openapi/swag/yaml.go b/vendor/github.com/go-openapi/swag/yaml.go index f09ee609f..f59e02593 100644 --- a/vendor/github.com/go-openapi/swag/yaml.go +++ b/vendor/github.com/go-openapi/swag/yaml.go @@ -16,8 +16,11 @@ package swag import ( "encoding/json" + "errors" "fmt" "path/filepath" + "reflect" + "sort" "strconv" "github.com/mailru/easyjson/jlexer" @@ -48,7 +51,7 @@ func BytesToYAMLDoc(data []byte) (interface{}, error) { return nil, err } if document.Kind != yaml.DocumentNode || len(document.Content) != 1 || document.Content[0].Kind != yaml.MappingNode { - return nil, fmt.Errorf("only YAML documents that are objects are supported") + return nil, errors.New("only YAML documents that are objects are supported") } return &document, nil } @@ -147,7 +150,7 @@ func yamlScalar(node *yaml.Node) (interface{}, error) { case yamlTimestamp: return node.Value, nil case yamlNull: - return nil, nil + return nil, nil //nolint:nilnil default: return nil, fmt.Errorf("YAML tag %q is not supported", node.LongTag()) } @@ -245,7 +248,27 @@ func (s JSONMapSlice) MarshalYAML() (interface{}, error) { return yaml.Marshal(&n) } +func isNil(input interface{}) bool { + if input == nil { + return true + } + kind := reflect.TypeOf(input).Kind() + switch kind { //nolint:exhaustive + case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan: + return reflect.ValueOf(input).IsNil() + default: + return false + } +} + func json2yaml(item interface{}) (*yaml.Node, error) { + if isNil(item) { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Value: "null", + }, nil + } + switch val := item.(type) { case JSONMapSlice: var n yaml.Node @@ -265,7 +288,14 @@ func json2yaml(item interface{}) (*yaml.Node, error) { case map[string]interface{}: var n yaml.Node n.Kind = yaml.MappingNode - for k, v := range val { + keys := make([]string, 0, len(val)) + for k := range val { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + v := val[k] childNode, err := json2yaml(v) if err != nil { return nil, err @@ -318,8 +348,9 @@ func json2yaml(item interface{}) (*yaml.Node, error) { Tag: yamlBoolScalar, Value: strconv.FormatBool(val), }, nil + default: + return nil, fmt.Errorf("unhandled type: %T", val) } - return nil, nil } // JSONMapItem represents the value of a key in a JSON object held by JSONMapSlice diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md index 7ed347d3a..7ec5ac7ea 100644 --- a/vendor/github.com/google/uuid/CHANGELOG.md +++ b/vendor/github.com/google/uuid/CHANGELOG.md @@ -1,5 +1,25 @@ # Changelog +## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16) + + +### Features + +* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3)) + + +### Bug Fixes + +* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06)) +* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6)) + +## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12) + + +### Features + +* Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29)) + ## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26) diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go index b404f4bec..dc60082d3 100644 --- a/vendor/github.com/google/uuid/hash.go +++ b/vendor/github.com/google/uuid/hash.go @@ -17,6 +17,12 @@ var ( NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) Nil UUID // empty UUID, all zeros + + // The Max UUID is special form of UUID that is specified to have all 128 bits set to 1. + Max = UUID{ + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + } ) // NewHash returns a new UUID derived from the hash of space concatenated with diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go index e6ef06cdc..c35112927 100644 --- a/vendor/github.com/google/uuid/time.go +++ b/vendor/github.com/google/uuid/time.go @@ -108,12 +108,23 @@ func setClockSequence(seq int) { } // Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in -// uuid. The time is only defined for version 1 and 2 UUIDs. +// uuid. The time is only defined for version 1, 2, 6 and 7 UUIDs. func (uuid UUID) Time() Time { - time := int64(binary.BigEndian.Uint32(uuid[0:4])) - time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 - time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 - return Time(time) + var t Time + switch uuid.Version() { + case 6: + time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110 + t = Time(time) + case 7: + time := binary.BigEndian.Uint64(uuid[:8]) + t = Time((time>>16)*10000 + g1582ns100) + default: // forward compatible + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + t = Time(time) + } + return t } // ClockSequence returns the clock sequence encoded in uuid. diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go index dc75f7d99..5232b4867 100644 --- a/vendor/github.com/google/uuid/uuid.go +++ b/vendor/github.com/google/uuid/uuid.go @@ -186,6 +186,59 @@ func Must(uuid UUID, err error) UUID { return uuid } +// Validate returns an error if s is not a properly formatted UUID in one of the following formats: +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} +// It returns an error if the format is invalid, otherwise nil. +func Validate(s string) error { + switch len(s) { + // Standard UUID format + case 36: + + // UUID with "urn:uuid:" prefix + case 36 + 9: + if !strings.EqualFold(s[:9], "urn:uuid:") { + return fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + + // UUID enclosed in braces + case 36 + 2: + if s[0] != '{' || s[len(s)-1] != '}' { + return fmt.Errorf("invalid bracketed UUID format") + } + s = s[1 : len(s)-1] + + // UUID without hyphens + case 32: + for i := 0; i < len(s); i += 2 { + _, ok := xtob(s[i], s[i+1]) + if !ok { + return errors.New("invalid UUID format") + } + } + + default: + return invalidLengthError{len(s)} + } + + // Check for standard UUID format + if len(s) == 36 { + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return errors.New("invalid UUID format") + } + for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} { + if _, ok := xtob(s[x], s[x+1]); !ok { + return errors.New("invalid UUID format") + } + } + } + + return nil +} + // String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx // , or "" if uuid is invalid. func (uuid UUID) String() string { diff --git a/vendor/github.com/google/uuid/version6.go b/vendor/github.com/google/uuid/version6.go new file mode 100644 index 000000000..339a959a7 --- /dev/null +++ b/vendor/github.com/google/uuid/version6.go @@ -0,0 +1,56 @@ +// Copyright 2023 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "encoding/binary" + +// UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality. +// It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs. +// Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead. +// +// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6 +// +// NewV6 returns a Version 6 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewV6 returns Nil and an error. +func NewV6() (UUID, error) { + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + /* + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | time_high | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | time_mid | time_low_and_version | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |clk_seq_hi_res | clk_seq_low | node (0-1) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | node (2-5) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ + + binary.BigEndian.PutUint64(uuid[0:], uint64(now)) + binary.BigEndian.PutUint16(uuid[8:], seq) + + uuid[6] = 0x60 | (uuid[6] & 0x0F) + uuid[8] = 0x80 | (uuid[8] & 0x3F) + + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + copy(uuid[10:], nodeID[:]) + nodeMu.Unlock() + + return uuid, nil +} diff --git a/vendor/github.com/google/uuid/version7.go b/vendor/github.com/google/uuid/version7.go new file mode 100644 index 000000000..3167b643d --- /dev/null +++ b/vendor/github.com/google/uuid/version7.go @@ -0,0 +1,104 @@ +// Copyright 2023 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// UUID version 7 features a time-ordered value field derived from the widely +// implemented and well known Unix Epoch timestamp source, +// the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded. +// As well as improved entropy characteristics over versions 1 or 6. +// +// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7 +// +// Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible. +// +// NewV7 returns a Version 7 UUID based on the current time(Unix Epoch). +// Uses the randomness pool if it was enabled with EnableRandPool. +// On error, NewV7 returns Nil and an error +func NewV7() (UUID, error) { + uuid, err := NewRandom() + if err != nil { + return uuid, err + } + makeV7(uuid[:]) + return uuid, nil +} + +// NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch). +// it use NewRandomFromReader fill random bits. +// On error, NewV7FromReader returns Nil and an error. +func NewV7FromReader(r io.Reader) (UUID, error) { + uuid, err := NewRandomFromReader(r) + if err != nil { + return uuid, err + } + + makeV7(uuid[:]) + return uuid, nil +} + +// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6]) +// uuid[8] already has the right version number (Variant is 10) +// see function NewV7 and NewV7FromReader +func makeV7(uuid []byte) { + /* + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | unix_ts_ms | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | unix_ts_ms | ver | rand_a (12 bit seq) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |var| rand_b | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | rand_b | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ + _ = uuid[15] // bounds check + + t, s := getV7Time() + + uuid[0] = byte(t >> 40) + uuid[1] = byte(t >> 32) + uuid[2] = byte(t >> 24) + uuid[3] = byte(t >> 16) + uuid[4] = byte(t >> 8) + uuid[5] = byte(t) + + uuid[6] = 0x70 | (0x0F & byte(s>>8)) + uuid[7] = byte(s) +} + +// lastV7time is the last time we returned stored as: +// +// 52 bits of time in milliseconds since epoch +// 12 bits of (fractional nanoseconds) >> 8 +var lastV7time int64 + +const nanoPerMilli = 1000000 + +// getV7Time returns the time in milliseconds and nanoseconds / 256. +// The returned (milli << 12 + seq) is guarenteed to be greater than +// (milli << 12 + seq) returned by any previous call to getV7Time. +func getV7Time() (milli, seq int64) { + timeMu.Lock() + defer timeMu.Unlock() + + nano := timeNow().UnixNano() + milli = nano / nanoPerMilli + // Sequence number is between 0 and 3906 (nanoPerMilli>>8) + seq = (nano - milli*nanoPerMilli) >> 8 + now := milli<<12 + seq + if now <= lastV7time { + now = lastV7time + 1 + milli = now >> 12 + seq = now & 0xfff + } + lastV7time = now + return milli, seq +} diff --git a/vendor/github.com/gorilla/schema/.editorconfig b/vendor/github.com/gorilla/schema/.editorconfig new file mode 100644 index 000000000..c6b74c3e0 --- /dev/null +++ b/vendor/github.com/gorilla/schema/.editorconfig @@ -0,0 +1,20 @@ +; https://editorconfig.org/ + +root = true + +[*] +insert_final_newline = true +charset = utf-8 +trim_trailing_whitespace = true +indent_style = space +indent_size = 2 + +[{Makefile,go.mod,go.sum,*.go,.gitmodules}] +indent_style = tab +indent_size = 4 + +[*.md] +indent_size = 4 +trim_trailing_whitespace = false + +eclint_indent_style = unset \ No newline at end of file diff --git a/vendor/github.com/gorilla/schema/.gitignore b/vendor/github.com/gorilla/schema/.gitignore new file mode 100644 index 000000000..84039fec6 --- /dev/null +++ b/vendor/github.com/gorilla/schema/.gitignore @@ -0,0 +1 @@ +coverage.coverprofile diff --git a/vendor/github.com/gorilla/schema/LICENSE b/vendor/github.com/gorilla/schema/LICENSE index 0e5fb8728..bb9d80bc9 100644 --- a/vendor/github.com/gorilla/schema/LICENSE +++ b/vendor/github.com/gorilla/schema/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2012 Rodrigo Moraes. All rights reserved. +Copyright (c) 2023 The Gorilla Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/gorilla/schema/Makefile b/vendor/github.com/gorilla/schema/Makefile new file mode 100644 index 000000000..98f5ab75f --- /dev/null +++ b/vendor/github.com/gorilla/schema/Makefile @@ -0,0 +1,34 @@ +GO_LINT=$(shell which golangci-lint 2> /dev/null || echo '') +GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest + +GO_SEC=$(shell which gosec 2> /dev/null || echo '') +GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest + +GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '') +GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest + +.PHONY: golangci-lint +golangci-lint: + $(if $(GO_LINT), ,go install $(GO_LINT_URI)) + @echo "##### Running golangci-lint" + golangci-lint run -v + +.PHONY: gosec +gosec: + $(if $(GO_SEC), ,go install $(GO_SEC_URI)) + @echo "##### Running gosec" + gosec ./... + +.PHONY: govulncheck +govulncheck: + $(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI)) + @echo "##### Running govulncheck" + govulncheck ./... + +.PHONY: verify +verify: golangci-lint gosec govulncheck + +.PHONY: test +test: + @echo "##### Running tests" + go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./... \ No newline at end of file diff --git a/vendor/github.com/gorilla/schema/README.md b/vendor/github.com/gorilla/schema/README.md index aefdd6699..dbeff3d0c 100644 --- a/vendor/github.com/gorilla/schema/README.md +++ b/vendor/github.com/gorilla/schema/README.md @@ -1,8 +1,12 @@ -schema -====== -[![GoDoc](https://godoc.org/github.com/gorilla/schema?status.svg)](https://godoc.org/github.com/gorilla/schema) [![Build Status](https://travis-ci.org/gorilla/schema.png?branch=master)](https://travis-ci.org/gorilla/schema) -[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/schema/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/schema?badge) +# gorilla/schema +![testing](https://github.com/gorilla/schema/actions/workflows/test.yml/badge.svg) +[![codecov](https://codecov.io/github/gorilla/schema/branch/main/graph/badge.svg)](https://codecov.io/github/gorilla/schema) +[![godoc](https://godoc.org/github.com/gorilla/schema?status.svg)](https://godoc.org/github.com/gorilla/schema) +[![sourcegraph](https://sourcegraph.com/github.com/gorilla/schema/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/schema?badge) + + +![Gorilla Logo](https://github.com/gorilla/.github/assets/53367916/d92caabf-98e0-473e-bfbf-ab554ba435e5) Package gorilla/schema converts structs to and from form values. diff --git a/vendor/github.com/gorilla/schema/cache.go b/vendor/github.com/gorilla/schema/cache.go index 0746c1202..bf21697cf 100644 --- a/vendor/github.com/gorilla/schema/cache.go +++ b/vendor/github.com/gorilla/schema/cache.go @@ -12,7 +12,7 @@ import ( "sync" ) -var invalidPath = errors.New("schema: invalid path") +var errInvalidPath = errors.New("schema: invalid path") // newCache returns a new cache. func newCache() *cache { @@ -53,13 +53,13 @@ func (c *cache) parsePath(p string, t reflect.Type) ([]pathPart, error) { keys := strings.Split(p, ".") for i := 0; i < len(keys); i++ { if t.Kind() != reflect.Struct { - return nil, invalidPath + return nil, errInvalidPath } if struc = c.get(t); struc == nil { - return nil, invalidPath + return nil, errInvalidPath } if field = struc.get(keys[i]); field == nil { - return nil, invalidPath + return nil, errInvalidPath } // Valid field. Append index. path = append(path, field.name) @@ -72,10 +72,10 @@ func (c *cache) parsePath(p string, t reflect.Type) ([]pathPart, error) { // So checking i+2 is not necessary anymore. i++ if i+1 > len(keys) { - return nil, invalidPath + return nil, errInvalidPath } if index64, err = strconv.ParseInt(keys[i], 10, 0); err != nil { - return nil, invalidPath + return nil, errInvalidPath } parts = append(parts, pathPart{ path: path, diff --git a/vendor/github.com/gorilla/schema/decoder.go b/vendor/github.com/gorilla/schema/decoder.go index 025e438b5..28b560bbb 100644 --- a/vendor/github.com/gorilla/schema/decoder.go +++ b/vendor/github.com/gorilla/schema/decoder.go @@ -193,7 +193,7 @@ func (d *Decoder) decode(v reflect.Value, path string, parts []pathPart, values if v.Type().Kind() == reflect.Struct { for i := 0; i < v.NumField(); i++ { field := v.Field(i) - if field.Type().Kind() == reflect.Ptr && field.IsNil() && v.Type().Field(i).Anonymous == true { + if field.Type().Kind() == reflect.Ptr && field.IsNil() && v.Type().Field(i).Anonymous { field.Set(reflect.New(field.Type().Elem())) } } diff --git a/vendor/github.com/gorilla/schema/encoder.go b/vendor/github.com/gorilla/schema/encoder.go index f0ed63121..51f0a78ca 100644 --- a/vendor/github.com/gorilla/schema/encoder.go +++ b/vendor/github.com/gorilla/schema/encoder.go @@ -3,6 +3,7 @@ package schema import ( "errors" "fmt" + "log" "reflect" "strconv" ) @@ -93,8 +94,11 @@ func (e *Encoder) encode(v reflect.Value, dst map[string][]string) error { } // Encode struct pointer types if the field is a valid pointer and a struct. - if isValidStructPointer(v.Field(i)) { - e.encode(v.Field(i).Elem(), dst) + if isValidStructPointer(v.Field(i)) && !e.hasCustomEncoder(v.Field(i).Type()) { + err := e.encode(v.Field(i).Elem(), dst) + if err != nil { + log.Fatal(err) + } continue } @@ -112,7 +116,10 @@ func (e *Encoder) encode(v reflect.Value, dst map[string][]string) error { } if v.Field(i).Type().Kind() == reflect.Struct { - e.encode(v.Field(i), dst) + err := e.encode(v.Field(i), dst) + if err != nil { + log.Fatal(err) + } continue } @@ -142,6 +149,11 @@ func (e *Encoder) encode(v reflect.Value, dst map[string][]string) error { return nil } +func (e *Encoder) hasCustomEncoder(t reflect.Type) bool { + _, exists := e.regenc[t] + return exists +} + func typeEncoder(t reflect.Type, reg map[reflect.Type]encoderFunc) encoderFunc { if f, ok := reg[t]; ok { return f diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml index 4c28dff46..a22953805 100644 --- a/vendor/github.com/klauspost/compress/.goreleaser.yml +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -3,7 +3,6 @@ before: hooks: - ./gen.sh - - go install mvdan.cc/garble@v0.10.1 builds: - @@ -32,7 +31,6 @@ builds: - mips64le goarm: - 7 - gobinary: garble - id: "s2d" binary: s2d @@ -59,7 +57,6 @@ builds: - mips64le goarm: - 7 - gobinary: garble - id: "s2sx" binary: s2sx @@ -87,7 +84,6 @@ builds: - mips64le goarm: - 7 - gobinary: garble archives: - diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 7e83f583c..1f72cdde1 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -16,6 +16,30 @@ This package provides various compression algorithms. # changelog +* Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6) + * zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923 + * s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925 + +* Jan 26th, 2024 - [v1.17.5](https://github.com/klauspost/compress/releases/tag/v1.17.5) + * flate: Fix reset with dictionary on custom window encodes https://github.com/klauspost/compress/pull/912 + * zstd: Add Frame header encoding and stripping https://github.com/klauspost/compress/pull/908 + * zstd: Limit better/best default window to 8MB https://github.com/klauspost/compress/pull/913 + * zstd: Speed improvements by @greatroar in https://github.com/klauspost/compress/pull/896 https://github.com/klauspost/compress/pull/910 + * s2: Fix callbacks for skippable blocks and disallow 0xfe (Padding) by @Jille in https://github.com/klauspost/compress/pull/916 https://github.com/klauspost/compress/pull/917 +https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/compress/pull/918 + +* Dec 1st, 2023 - [v1.17.4](https://github.com/klauspost/compress/releases/tag/v1.17.4) + * huff0: Speed up symbol counting by @greatroar in https://github.com/klauspost/compress/pull/887 + * huff0: Remove byteReader by @greatroar in https://github.com/klauspost/compress/pull/886 + * gzhttp: Allow overriding decompression on transport https://github.com/klauspost/compress/pull/892 + * gzhttp: Clamp compression level https://github.com/klauspost/compress/pull/890 + * gzip: Error out if reserved bits are set https://github.com/klauspost/compress/pull/891 + +* Nov 15th, 2023 - [v1.17.3](https://github.com/klauspost/compress/releases/tag/v1.17.3) + * fse: Fix max header size https://github.com/klauspost/compress/pull/881 + * zstd: Improve better/best compression https://github.com/klauspost/compress/pull/877 + * gzhttp: Fix missing content type on Close https://github.com/klauspost/compress/pull/883 + * Oct 22nd, 2023 - [v1.17.2](https://github.com/klauspost/compress/releases/tag/v1.17.2) * zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876 @@ -554,7 +578,7 @@ For direct deflate use, NewStatelessWriter and StatelessDeflate are available. S A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer: -``` +```go // replace 'ioutil.Discard' with your output. gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression) if err != nil { diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go index de912e187..66d1657d2 100644 --- a/vendor/github.com/klauspost/compress/flate/deflate.go +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -212,7 +212,7 @@ func (d *compressor) writeBlockSkip(tok *tokens, index int, eof bool) error { // Should only be used after a start/reset. func (d *compressor) fillWindow(b []byte) { // Do not fill window if we are in store-only or huffman mode. - if d.level <= 0 { + if d.level <= 0 && d.level > -MinCustomWindowSize { return } if d.fast != nil { diff --git a/vendor/github.com/klauspost/compress/huff0/bytereader.go b/vendor/github.com/klauspost/compress/huff0/bytereader.go deleted file mode 100644 index 4dcab8d23..000000000 --- a/vendor/github.com/klauspost/compress/huff0/bytereader.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package huff0 - -// byteReader provides a byte reader that reads -// little endian values from a byte stream. -// The input stream is manually advanced. -// The reader performs no bounds checks. -type byteReader struct { - b []byte - off int -} - -// init will initialize the reader and set the input. -func (b *byteReader) init(in []byte) { - b.b = in - b.off = 0 -} - -// Int32 returns a little endian int32 starting at current offset. -func (b byteReader) Int32() int32 { - v3 := int32(b.b[b.off+3]) - v2 := int32(b.b[b.off+2]) - v1 := int32(b.b[b.off+1]) - v0 := int32(b.b[b.off]) - return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 -} - -// Uint32 returns a little endian uint32 starting at current offset. -func (b byteReader) Uint32() uint32 { - v3 := uint32(b.b[b.off+3]) - v2 := uint32(b.b[b.off+2]) - v1 := uint32(b.b[b.off+1]) - v0 := uint32(b.b[b.off]) - return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 -} - -// remain will return the number of bytes remaining. -func (b byteReader) remain() int { - return len(b.b) - b.off -} diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go index 518436cf3..84aa3d12f 100644 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -350,6 +350,7 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { // Does not update s.clearCount. func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { reuse = true + _ = s.count // Assert that s != nil to speed up the following loop. for _, v := range in { s.count[v]++ } @@ -415,7 +416,7 @@ func (s *Scratch) validateTable(c cTable) bool { // minTableLog provides the minimum logSize to safely represent a distribution. func (s *Scratch) minTableLog() uint8 { - minBitsSrc := highBit32(uint32(s.br.remain())) + 1 + minBitsSrc := highBit32(uint32(s.srcLen)) + 1 minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2 if minBitsSrc < minBitsSymbols { return uint8(minBitsSrc) @@ -427,7 +428,7 @@ func (s *Scratch) minTableLog() uint8 { func (s *Scratch) optimalTableLog() { tableLog := s.TableLog minBits := s.minTableLog() - maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1 + maxBitsSrc := uint8(highBit32(uint32(s.srcLen-1))) - 1 if maxBitsSrc < tableLog { // Accuracy can be reduced tableLog = maxBitsSrc diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go index e8ad17ad0..77ecd68e0 100644 --- a/vendor/github.com/klauspost/compress/huff0/huff0.go +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -88,7 +88,7 @@ type Scratch struct { // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded. MaxDecodedSize int - br byteReader + srcLen int // MaxSymbolValue will override the maximum symbol value of the next block. MaxSymbolValue uint8 @@ -170,7 +170,7 @@ func (s *Scratch) prepare(in []byte) (*Scratch, error) { if s.fse == nil { s.fse = &fse.Scratch{} } - s.br.init(in) + s.srcLen = len(in) return s, nil } diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod index 2263853fc..5a4412f90 100644 --- a/vendor/github.com/klauspost/compress/s2sx.mod +++ b/vendor/github.com/klauspost/compress/s2sx.mod @@ -1,4 +1,4 @@ module github.com/klauspost/compress -go 1.16 +go 1.19 diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md index bdd49c8b2..92e2347bb 100644 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -259,7 +259,7 @@ nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68 ## Decompressor -Staus: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. +Status: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), kindly supplied by [fuzzit.dev](https://fuzzit.dev/). diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go index f6a240970..6a5a2988b 100644 --- a/vendor/github.com/klauspost/compress/zstd/decodeheader.go +++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go @@ -95,42 +95,54 @@ type Header struct { // If there isn't enough input, io.ErrUnexpectedEOF is returned. // The FirstBlock.OK will indicate if enough information was available to decode the first block header. func (h *Header) Decode(in []byte) error { + _, err := h.DecodeAndStrip(in) + return err +} + +// DecodeAndStrip will decode the header from the beginning of the stream +// and on success return the remaining bytes. +// This will decode the frame header and the first block header if enough bytes are provided. +// It is recommended to provide at least HeaderMaxSize bytes. +// If the frame header cannot be read an error will be returned. +// If there isn't enough input, io.ErrUnexpectedEOF is returned. +// The FirstBlock.OK will indicate if enough information was available to decode the first block header. +func (h *Header) DecodeAndStrip(in []byte) (remain []byte, err error) { *h = Header{} if len(in) < 4 { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } h.HeaderSize += 4 b, in := in[:4], in[4:] if string(b) != frameMagic { if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 { - return ErrMagicMismatch + return nil, ErrMagicMismatch } if len(in) < 4 { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } h.HeaderSize += 4 h.Skippable = true h.SkippableID = int(b[0] & 0xf) h.SkippableSize = binary.LittleEndian.Uint32(in) - return nil + return in[4:], nil } // Read Window_Descriptor // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor if len(in) < 1 { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } fhd, in := in[0], in[1:] h.HeaderSize++ h.SingleSegment = fhd&(1<<5) != 0 h.HasCheckSum = fhd&(1<<2) != 0 if fhd&(1<<3) != 0 { - return errors.New("reserved bit set on frame header") + return nil, errors.New("reserved bit set on frame header") } if !h.SingleSegment { if len(in) < 1 { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } var wd byte wd, in = in[0], in[1:] @@ -148,7 +160,7 @@ func (h *Header) Decode(in []byte) error { size = 4 } if len(in) < int(size) { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } b, in = in[:size], in[size:] h.HeaderSize += int(size) @@ -178,7 +190,7 @@ func (h *Header) Decode(in []byte) error { if fcsSize > 0 { h.HasFCS = true if len(in) < fcsSize { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } b, in = in[:fcsSize], in[fcsSize:] h.HeaderSize += int(fcsSize) @@ -199,7 +211,7 @@ func (h *Header) Decode(in []byte) error { // Frame Header done, we will not fail from now on. if len(in) < 3 { - return nil + return in, nil } tmp := in[:3] bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) @@ -209,7 +221,7 @@ func (h *Header) Decode(in []byte) error { cSize := int(bh >> 3) switch blockType { case blockTypeReserved: - return nil + return in, nil case blockTypeRLE: h.FirstBlock.Compressed = true h.FirstBlock.DecompressedSize = cSize @@ -225,5 +237,25 @@ func (h *Header) Decode(in []byte) error { } h.FirstBlock.OK = true - return nil + return in, nil +} + +// AppendTo will append the encoded header to the dst slice. +// There is no error checking performed on the header values. +func (h *Header) AppendTo(dst []byte) ([]byte, error) { + if h.Skippable { + magic := [4]byte{0x50, 0x2a, 0x4d, 0x18} + magic[0] |= byte(h.SkippableID & 0xf) + dst = append(dst, magic[:]...) + f := h.SkippableSize + return append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)), nil + } + f := frameHeader{ + ContentSize: h.FrameContentSize, + WindowSize: uint32(h.WindowSize), + SingleSegment: h.SingleSegment, + Checksum: h.HasCheckSum, + DictID: h.DictionaryID, + } + return f.appendTo(dst), nil } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go index c81a15357..87f42879a 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -201,14 +201,6 @@ encodeLoop: if delta >= e.maxMatchOff || delta <= 0 || load3232(src, offset) != first { return } - if debugAsserts { - if offset >= s { - panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff)) - } - if !bytes.Equal(src[s:s+4], src[offset:offset+4]) { - panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) - } - } // Try to quick reject if we already have a long match. if m.length > 16 { left := len(src) - int(m.s+m.length) @@ -227,8 +219,10 @@ encodeLoop: } } l := 4 + e.matchlen(s+4, offset+4, src) - if true { + if m.rep <= 0 { // Extend candidate match backwards as far as possible. + // Do not extend repeats as we can assume they are optimal + // and offsets change if s == nextEmit. tMin := s - e.maxMatchOff if tMin < 0 { tMin = 0 @@ -239,7 +233,14 @@ encodeLoop: l++ } } - + if debugAsserts { + if offset >= s { + panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff)) + } + if !bytes.Equal(src[s:s+l], src[offset:offset+l]) { + panic(fmt.Sprintf("second match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) + } + } cand := match{offset: offset, s: s, length: l, rep: rep} cand.estBits(bitsPerByte) if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 { @@ -336,24 +337,31 @@ encodeLoop: } if debugAsserts { + if best.offset >= best.s { + panic(fmt.Sprintf("best.offset > s: %d >= %d", best.offset, best.s)) + } + if best.s < nextEmit { + panic(fmt.Sprintf("s %d < nextEmit %d", best.s, nextEmit)) + } + if best.offset < s-e.maxMatchOff { + panic(fmt.Sprintf("best.offset < s-e.maxMatchOff: %d < %d", best.offset, s-e.maxMatchOff)) + } if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) { panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length])) } } // We have a match, we can store the forward value + s = best.s if best.rep > 0 { var seq seq seq.matchLen = uint32(best.length - zstdMinMatch) - if debugAsserts && s < nextEmit { - panic("s < nextEmit") - } addLiterals(&seq, best.s) // Repeat. If bit 4 is set, this is a non-lit repeat. seq.offset = uint32(best.rep & 3) if debugSequences { - println("repeat sequence", seq, "next s:", s) + println("repeat sequence", seq, "next s:", best.s, "off:", best.s-best.offset) } blk.sequences = append(blk.sequences, seq) @@ -396,7 +404,6 @@ encodeLoop: // A 4-byte match has been found. Update recent offsets. // We'll later see if more than 4 bytes. - s = best.s t := best.offset offset1, offset2, offset3 = s-t, offset1, offset2 diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index faaf81921..20671dcb9 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -94,7 +94,7 @@ func WithEncoderConcurrency(n int) EOption { // The value must be a power of two between MinWindowSize and MaxWindowSize. // A larger value will enable better compression but allocate more memory and, // for above-default values, take considerably longer. -// The default value is determined by the compression level. +// The default value is determined by the compression level and max 8MB. func WithWindowSize(n int) EOption { return func(o *encoderOptions) error { switch { @@ -232,9 +232,9 @@ func WithEncoderLevel(l EncoderLevel) EOption { case SpeedDefault: o.windowSize = 8 << 20 case SpeedBetterCompression: - o.windowSize = 16 << 20 + o.windowSize = 8 << 20 case SpeedBestCompression: - o.windowSize = 32 << 20 + o.windowSize = 8 << 20 } } if !o.customALEntropy { diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go index 2f5d5ed45..667ca0679 100644 --- a/vendor/github.com/klauspost/compress/zstd/frameenc.go +++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go @@ -76,7 +76,7 @@ func (f frameHeader) appendTo(dst []byte) []byte { if f.SingleSegment { dst = append(dst, uint8(f.ContentSize)) } - // Unless SingleSegment is set, framessizes < 256 are nto stored. + // Unless SingleSegment is set, framessizes < 256 are not stored. case 1: f.ContentSize -= 256 dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8)) diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go index 332e51fe4..8adfebb02 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go @@ -20,10 +20,9 @@ func (s *fseDecoder) buildDtable() error { if v == -1 { s.dt[highThreshold].setAddBits(uint8(i)) highThreshold-- - symbolNext[i] = 1 - } else { - symbolNext[i] = uint16(v) + v = 1 } + symbolNext[i] = uint16(v) } } @@ -35,10 +34,12 @@ func (s *fseDecoder) buildDtable() error { for ss, v := range s.norm[:s.symbolLen] { for i := 0; i < int(v); i++ { s.dt[position].setAddBits(uint8(ss)) - position = (position + step) & tableMask - for position > highThreshold { + for { // lowprob area position = (position + step) & tableMask + if position <= highThreshold { + break + } } } } diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s index 974b99725..5b06174b8 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -157,8 +157,7 @@ sequenceDecs_decode_amd64_ll_update_zero: // Update Literal Length State MOVBQZX DI, R14 - SHRQ $0x10, DI - MOVWQZX DI, DI + SHRL $0x10, DI LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -177,8 +176,7 @@ sequenceDecs_decode_amd64_ll_update_zero: // Update Match Length State MOVBQZX R8, R14 - SHRQ $0x10, R8 - MOVWQZX R8, R8 + SHRL $0x10, R8 LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -197,8 +195,7 @@ sequenceDecs_decode_amd64_ll_update_zero: // Update Offset State MOVBQZX R9, R14 - SHRQ $0x10, R9 - MOVWQZX R9, R9 + SHRL $0x10, R9 LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -459,8 +456,7 @@ sequenceDecs_decode_56_amd64_ll_update_zero: // Update Literal Length State MOVBQZX DI, R14 - SHRQ $0x10, DI - MOVWQZX DI, DI + SHRL $0x10, DI LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -479,8 +475,7 @@ sequenceDecs_decode_56_amd64_ll_update_zero: // Update Match Length State MOVBQZX R8, R14 - SHRQ $0x10, R8 - MOVWQZX R8, R8 + SHRL $0x10, R8 LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -499,8 +494,7 @@ sequenceDecs_decode_56_amd64_ll_update_zero: // Update Offset State MOVBQZX R9, R14 - SHRQ $0x10, R9 - MOVWQZX R9, R9 + SHRL $0x10, R9 LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -772,11 +766,10 @@ sequenceDecs_decode_bmi2_fill_2_end: BZHIQ R14, R15, R15 // Update Offset State - BZHIQ R8, R15, CX - SHRXQ R8, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, R8, R8 - ADDQ CX, R8 + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + SHRL $0x10, R8 + ADDQ CX, R8 // Load ctx.ofTable MOVQ ctx+16(FP), CX @@ -784,11 +777,10 @@ sequenceDecs_decode_bmi2_fill_2_end: MOVQ (CX)(R8*8), R8 // Update Match Length State - BZHIQ DI, R15, CX - SHRXQ DI, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, DI, DI - ADDQ CX, DI + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + SHRL $0x10, DI + ADDQ CX, DI // Load ctx.mlTable MOVQ ctx+16(FP), CX @@ -796,10 +788,9 @@ sequenceDecs_decode_bmi2_fill_2_end: MOVQ (CX)(DI*8), DI // Update Literal Length State - BZHIQ SI, R15, CX - MOVQ $0x00001010, R14 - BEXTRQ R14, SI, SI - ADDQ CX, SI + BZHIQ SI, R15, CX + SHRL $0x10, SI + ADDQ CX, SI // Load ctx.llTable MOVQ ctx+16(FP), CX @@ -1032,11 +1023,10 @@ sequenceDecs_decode_56_bmi2_fill_end: BZHIQ R14, R15, R15 // Update Offset State - BZHIQ R8, R15, CX - SHRXQ R8, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, R8, R8 - ADDQ CX, R8 + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + SHRL $0x10, R8 + ADDQ CX, R8 // Load ctx.ofTable MOVQ ctx+16(FP), CX @@ -1044,11 +1034,10 @@ sequenceDecs_decode_56_bmi2_fill_end: MOVQ (CX)(R8*8), R8 // Update Match Length State - BZHIQ DI, R15, CX - SHRXQ DI, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, DI, DI - ADDQ CX, DI + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + SHRL $0x10, DI + ADDQ CX, DI // Load ctx.mlTable MOVQ ctx+16(FP), CX @@ -1056,10 +1045,9 @@ sequenceDecs_decode_56_bmi2_fill_end: MOVQ (CX)(DI*8), DI // Update Literal Length State - BZHIQ SI, R15, CX - MOVQ $0x00001010, R14 - BEXTRQ R14, SI, SI - ADDQ CX, SI + BZHIQ SI, R15, CX + SHRL $0x10, SI + ADDQ CX, SI // Load ctx.llTable MOVQ ctx+16(FP), CX @@ -1967,8 +1955,7 @@ sequenceDecs_decodeSync_amd64_ll_update_zero: // Update Literal Length State MOVBQZX DI, R13 - SHRQ $0x10, DI - MOVWQZX DI, DI + SHRL $0x10, DI LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -1987,8 +1974,7 @@ sequenceDecs_decodeSync_amd64_ll_update_zero: // Update Match Length State MOVBQZX R8, R13 - SHRQ $0x10, R8 - MOVWQZX R8, R8 + SHRL $0x10, R8 LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -2007,8 +1993,7 @@ sequenceDecs_decodeSync_amd64_ll_update_zero: // Update Offset State MOVBQZX R9, R13 - SHRQ $0x10, R9 - MOVWQZX R9, R9 + SHRL $0x10, R9 LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -2514,11 +2499,10 @@ sequenceDecs_decodeSync_bmi2_fill_2_end: BZHIQ R13, R14, R14 // Update Offset State - BZHIQ R8, R14, CX - SHRXQ R8, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, R8, R8 - ADDQ CX, R8 + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + SHRL $0x10, R8 + ADDQ CX, R8 // Load ctx.ofTable MOVQ ctx+16(FP), CX @@ -2526,11 +2510,10 @@ sequenceDecs_decodeSync_bmi2_fill_2_end: MOVQ (CX)(R8*8), R8 // Update Match Length State - BZHIQ DI, R14, CX - SHRXQ DI, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, DI, DI - ADDQ CX, DI + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + SHRL $0x10, DI + ADDQ CX, DI // Load ctx.mlTable MOVQ ctx+16(FP), CX @@ -2538,10 +2521,9 @@ sequenceDecs_decodeSync_bmi2_fill_2_end: MOVQ (CX)(DI*8), DI // Update Literal Length State - BZHIQ SI, R14, CX - MOVQ $0x00001010, R13 - BEXTRQ R13, SI, SI - ADDQ CX, SI + BZHIQ SI, R14, CX + SHRL $0x10, SI + ADDQ CX, SI // Load ctx.llTable MOVQ ctx+16(FP), CX @@ -3055,8 +3037,7 @@ sequenceDecs_decodeSync_safe_amd64_ll_update_zero: // Update Literal Length State MOVBQZX DI, R13 - SHRQ $0x10, DI - MOVWQZX DI, DI + SHRL $0x10, DI LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -3075,8 +3056,7 @@ sequenceDecs_decodeSync_safe_amd64_ll_update_zero: // Update Match Length State MOVBQZX R8, R13 - SHRQ $0x10, R8 - MOVWQZX R8, R8 + SHRL $0x10, R8 LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -3095,8 +3075,7 @@ sequenceDecs_decodeSync_safe_amd64_ll_update_zero: // Update Offset State MOVBQZX R9, R13 - SHRQ $0x10, R9 - MOVWQZX R9, R9 + SHRL $0x10, R9 LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -3704,11 +3683,10 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_end: BZHIQ R13, R14, R14 // Update Offset State - BZHIQ R8, R14, CX - SHRXQ R8, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, R8, R8 - ADDQ CX, R8 + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + SHRL $0x10, R8 + ADDQ CX, R8 // Load ctx.ofTable MOVQ ctx+16(FP), CX @@ -3716,11 +3694,10 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_end: MOVQ (CX)(R8*8), R8 // Update Match Length State - BZHIQ DI, R14, CX - SHRXQ DI, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, DI, DI - ADDQ CX, DI + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + SHRL $0x10, DI + ADDQ CX, DI // Load ctx.mlTable MOVQ ctx+16(FP), CX @@ -3728,10 +3705,9 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_end: MOVQ (CX)(DI*8), DI // Update Literal Length State - BZHIQ SI, R14, CX - MOVQ $0x00001010, R13 - BEXTRQ R13, SI, SI - ADDQ CX, SI + BZHIQ SI, R14, CX + SHRL $0x10, SI + ADDQ CX, SI // Load ctx.llTable MOVQ ctx+16(FP), CX diff --git a/vendor/github.com/letsencrypt/boulder/core/objects.go b/vendor/github.com/letsencrypt/boulder/core/objects.go index 2afbfbe91..b52f0f5e0 100644 --- a/vendor/github.com/letsencrypt/boulder/core/objects.go +++ b/vendor/github.com/letsencrypt/boulder/core/objects.go @@ -75,11 +75,16 @@ type OCSPStatus string const ( OCSPStatusGood = OCSPStatus("good") OCSPStatusRevoked = OCSPStatus("revoked") + // Not a real OCSP status. This is a placeholder we write before the + // actual precertificate is issued, to ensure we never return "good" before + // issuance succeeds, for BR compliance reasons. + OCSPStatusNotReady = OCSPStatus("wait") ) var OCSPStatusToInt = map[OCSPStatus]int{ - OCSPStatusGood: ocsp.Good, - OCSPStatusRevoked: ocsp.Revoked, + OCSPStatusGood: ocsp.Good, + OCSPStatusRevoked: ocsp.Revoked, + OCSPStatusNotReady: -1, } // DNSPrefix is attached to DNS names in DNS challenges @@ -120,7 +125,7 @@ type ValidationRecord struct { URL string `json:"url,omitempty"` // Shared - Hostname string `json:"hostname"` + Hostname string `json:"hostname,omitempty"` Port string `json:"port,omitempty"` AddressesResolved []net.IP `json:"addressesResolved,omitempty"` AddressUsed net.IP `json:"addressUsed,omitempty"` @@ -337,11 +342,18 @@ type Authorization struct { // slice and the order of these challenges may not be predictable. Challenges []Challenge `json:"challenges,omitempty" db:"-"` - // Wildcard is a Boulder-specific Authorization field that indicates the - // authorization was created as a result of an order containing a name with - // a `*.`wildcard prefix. This will help convey to users that an - // Authorization with the identifier `example.com` and one DNS-01 challenge - // corresponds to a name `*.example.com` from an associated order. + // https://datatracker.ietf.org/doc/html/rfc8555#page-29 + // + // wildcard (optional, boolean): This field MUST be present and true + // for authorizations created as a result of a newOrder request + // containing a DNS identifier with a value that was a wildcard + // domain name. For other authorizations, it MUST be absent. + // Wildcard domain names are described in Section 7.1.3. + // + // This is not represented in the database because we calculate it from + // the identifier stored in the database. Unlike the identifier returned + // as part of the authorization, the identifier we store in the database + // can contain an asterisk. Wildcard bool `json:"wildcard,omitempty" db:"-"` } @@ -406,53 +418,46 @@ type Certificate struct { } // CertificateStatus structs are internal to the server. They represent the -// latest data about the status of the certificate, required for OCSP updating -// and for validating that the subscriber has accepted the certificate. +// latest data about the status of the certificate, required for generating new +// OCSP responses and determining if a certificate has been revoked. type CertificateStatus struct { ID int64 `db:"id"` Serial string `db:"serial"` // status: 'good' or 'revoked'. Note that good, expired certificates remain - // with status 'good' but don't necessarily get fresh OCSP responses. + // with status 'good' but don't necessarily get fresh OCSP responses. Status OCSPStatus `db:"status"` // ocspLastUpdated: The date and time of the last time we generated an OCSP - // response. If we have never generated one, this has the zero value of - // time.Time, i.e. Jan 1 1970. + // response. If we have never generated one, this has the zero value of + // time.Time, i.e. Jan 1 1970. OCSPLastUpdated time.Time `db:"ocspLastUpdated"` // revokedDate: If status is 'revoked', this is the date and time it was - // revoked. Otherwise it has the zero value of time.Time, i.e. Jan 1 1970. + // revoked. Otherwise it has the zero value of time.Time, i.e. Jan 1 1970. RevokedDate time.Time `db:"revokedDate"` // revokedReason: If status is 'revoked', this is the reason code for the - // revocation. Otherwise it is zero (which happens to be the reason - // code for 'unspecified'). + // revocation. Otherwise it is zero (which happens to be the reason + // code for 'unspecified'). RevokedReason revocation.Reason `db:"revokedReason"` LastExpirationNagSent time.Time `db:"lastExpirationNagSent"` - // The encoded and signed OCSP response. - OCSPResponse []byte `db:"ocspResponse"` - - // For performance reasons[0] we duplicate the `Expires` field of the - // `Certificates` object/table in `CertificateStatus` to avoid a costly `JOIN` - // later on just to retrieve this `Time` value. This helps both the OCSP - // updater and the expiration-mailer stay performant. - // - // Similarly, we add an explicit `IsExpired` boolean to `CertificateStatus` - // table that the OCSP updater so that the database can create a meaningful - // index on `(isExpired, ocspLastUpdated)` without a `JOIN` on `certificates`. - // For more detail see Boulder #1864[0]. - // - // [0]: https://github.com/letsencrypt/boulder/issues/1864 + // NotAfter and IsExpired are convenience columns which allow expensive + // queries to quickly filter out certificates that we don't need to care about + // anymore. These are particularly useful for the expiration mailer and CRL + // updater. See https://github.com/letsencrypt/boulder/issues/1864. NotAfter time.Time `db:"notAfter"` IsExpired bool `db:"isExpired"` - // TODO(#5152): Change this to an issuance.Issuer(Name)ID after it no longer - // has to support both IssuerNameIDs and IssuerIDs. - IssuerID int64 + // Note: this is not an issuance.IssuerNameID because that would create an + // import cycle between core and issuance. + // Note2: This field used to be called `issuerID`. We keep the old name in + // the DB, but update the Go field name to be clear which type of ID this + // is. + IssuerNameID int64 `db:"issuerID"` } // FQDNSet contains the SHA256 hash of the lowercased, comma joined dNSNames @@ -501,7 +506,7 @@ func RenewalInfoSimple(issued time.Time, expires time.Time) RenewalInfo { } // RenewalInfoImmediate constructs a `RenewalInfo` object with a suggested -// window in the past. Per the draft-ietf-acme-ari-00 spec, clients should +// window in the past. Per the draft-ietf-acme-ari-01 spec, clients should // attempt to renew immediately if the suggested window is in the past. The // passed `now` is assumed to be a timestamp representing the current moment in // time. diff --git a/vendor/github.com/letsencrypt/boulder/core/util.go b/vendor/github.com/letsencrypt/boulder/core/util.go index 6949e4563..d7fe02668 100644 --- a/vendor/github.com/letsencrypt/boulder/core/util.go +++ b/vendor/github.com/letsencrypt/boulder/core/util.go @@ -1,9 +1,10 @@ package core import ( - "bytes" "crypto" + "crypto/ecdsa" "crypto/rand" + "crypto/rsa" "crypto/sha256" "crypto/x509" "encoding/base64" @@ -16,6 +17,7 @@ import ( "math/big" mrand "math/rand" "os" + "path" "reflect" "regexp" "sort" @@ -23,7 +25,7 @@ import ( "time" "unicode" - jose "gopkg.in/go-jose/go-jose.v2" + "gopkg.in/go-jose/go-jose.v2" ) const Unspecified = "Unspecified" @@ -96,7 +98,7 @@ func KeyDigest(key crypto.PublicKey) (Sha256Digest, error) { switch t := key.(type) { case *jose.JSONWebKey: if t == nil { - return Sha256Digest{}, fmt.Errorf("Cannot compute digest of nil key") + return Sha256Digest{}, errors.New("cannot compute digest of nil key") } return KeyDigest(t.Key) case jose.JSONWebKey: @@ -132,21 +134,16 @@ func KeyDigestEquals(j, k crypto.PublicKey) bool { return digestJ == digestK } -// PublicKeysEqual determines whether two public keys have the same marshalled -// bytes as one another -func PublicKeysEqual(a, b interface{}) (bool, error) { - if a == nil || b == nil { - return false, errors.New("One or more nil arguments to PublicKeysEqual") - } - aBytes, err := x509.MarshalPKIXPublicKey(a) - if err != nil { - return false, err - } - bBytes, err := x509.MarshalPKIXPublicKey(b) - if err != nil { - return false, err +// PublicKeysEqual determines whether two public keys are identical. +func PublicKeysEqual(a, b crypto.PublicKey) (bool, error) { + switch ak := a.(type) { + case *rsa.PublicKey: + return ak.Equal(b), nil + case *ecdsa.PublicKey: + return ak.Equal(b), nil + default: + return false, fmt.Errorf("unsupported public key type %T", ak) } - return bytes.Equal(aBytes, bBytes), nil } // SerialToString converts a certificate serial number (big.Int) to a String @@ -160,7 +157,7 @@ func SerialToString(serial *big.Int) string { func StringToSerial(serial string) (*big.Int, error) { var serialNum big.Int if !ValidSerial(serial) { - return &serialNum, errors.New("Invalid serial number") + return &serialNum, fmt.Errorf("invalid serial number %q", serial) } _, err := fmt.Sscanf(serial, "%036x", &serialNum) return &serialNum, err @@ -245,6 +242,14 @@ func UniqueLowerNames(names []string) (unique []string) { return } +// HashNames returns a hash of the names requested. This is intended for use +// when interacting with the orderFqdnSets table and rate limiting. +func HashNames(names []string) []byte { + names = UniqueLowerNames(names) + hash := sha256.Sum256([]byte(strings.Join(names, ","))) + return hash[:] +} + // LoadCert loads a PEM certificate specified by filename or returns an error func LoadCert(filename string) (*x509.Certificate, error) { certPEM, err := os.ReadFile(filename) @@ -253,7 +258,7 @@ func LoadCert(filename string) (*x509.Certificate, error) { } block, _ := pem.Decode(certPEM) if block == nil { - return nil, fmt.Errorf("No data in cert PEM file %s", filename) + return nil, fmt.Errorf("no data in cert PEM file %q", filename) } cert, err := x509.ParseCertificate(block.Bytes) if err != nil { @@ -298,3 +303,7 @@ func IsASCII(str string) bool { } return true } + +func Command() string { + return path.Base(os.Args[0]) +} diff --git a/vendor/github.com/letsencrypt/boulder/errors/errors.go b/vendor/github.com/letsencrypt/boulder/errors/errors.go deleted file mode 100644 index 50be1087a..000000000 --- a/vendor/github.com/letsencrypt/boulder/errors/errors.go +++ /dev/null @@ -1,194 +0,0 @@ -// Package errors provides internal-facing error types for use in Boulder. Many -// of these are transformed directly into Problem Details documents by the WFE. -// Some, like NotFound, may be handled internally. We avoid using Problem -// Details documents as part of our internal error system to avoid layering -// confusions. -// -// These errors are specifically for use in errors that cross RPC boundaries. -// An error type that does not need to be passed through an RPC can use a plain -// Go type locally. Our gRPC code is aware of these error types and will -// serialize and deserialize them automatically. -package errors - -import ( - "fmt" - "time" - - "github.com/letsencrypt/boulder/identifier" -) - -// ErrorType provides a coarse category for BoulderErrors. -// Objects of type ErrorType should never be directly returned by other -// functions; instead use the methods below to create an appropriate -// BoulderError wrapping one of these types. -type ErrorType int - -// These numeric constants are used when sending berrors through gRPC. -const ( - // InternalServer is deprecated. Instead, pass a plain Go error. That will get - // turned into a probs.InternalServerError by the WFE. - InternalServer ErrorType = iota - _ - Malformed - Unauthorized - NotFound - RateLimit - RejectedIdentifier - InvalidEmail - ConnectionFailure - _ // Reserved, previously WrongAuthorizationState - CAA - MissingSCTs - Duplicate - OrderNotReady - DNS - BadPublicKey - BadCSR - AlreadyRevoked - BadRevocationReason -) - -func (ErrorType) Error() string { - return "urn:ietf:params:acme:error" -} - -// BoulderError represents internal Boulder errors -type BoulderError struct { - Type ErrorType - Detail string - SubErrors []SubBoulderError - - // RetryAfter the duration a client should wait before retrying the request - // which resulted in this error. - RetryAfter time.Duration -} - -// SubBoulderError represents sub-errors specific to an identifier that are -// related to a top-level internal Boulder error. -type SubBoulderError struct { - *BoulderError - Identifier identifier.ACMEIdentifier -} - -func (be *BoulderError) Error() string { - return be.Detail -} - -func (be *BoulderError) Unwrap() error { - return be.Type -} - -// WithSubErrors returns a new BoulderError instance created by adding the -// provided subErrs to the existing BoulderError. -func (be *BoulderError) WithSubErrors(subErrs []SubBoulderError) *BoulderError { - return &BoulderError{ - Type: be.Type, - Detail: be.Detail, - SubErrors: append(be.SubErrors, subErrs...), - RetryAfter: be.RetryAfter, - } -} - -// New is a convenience function for creating a new BoulderError -func New(errType ErrorType, msg string, args ...interface{}) error { - return &BoulderError{ - Type: errType, - Detail: fmt.Sprintf(msg, args...), - } -} - -func InternalServerError(msg string, args ...interface{}) error { - return New(InternalServer, msg, args...) -} - -func MalformedError(msg string, args ...interface{}) error { - return New(Malformed, msg, args...) -} - -func UnauthorizedError(msg string, args ...interface{}) error { - return New(Unauthorized, msg, args...) -} - -func NotFoundError(msg string, args ...interface{}) error { - return New(NotFound, msg, args...) -} - -func RateLimitError(retryAfter time.Duration, msg string, args ...interface{}) error { - return &BoulderError{ - Type: RateLimit, - Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/rate-limits/", args...), - RetryAfter: retryAfter, - } -} - -func DuplicateCertificateError(retryAfter time.Duration, msg string, args ...interface{}) error { - return &BoulderError{ - Type: RateLimit, - Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/duplicate-certificate-limit/", args...), - RetryAfter: retryAfter, - } -} - -func FailedValidationError(retryAfter time.Duration, msg string, args ...interface{}) error { - return &BoulderError{ - Type: RateLimit, - Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/failed-validation-limit/", args...), - RetryAfter: retryAfter, - } -} - -func RegistrationsPerIPError(retryAfter time.Duration, msg string, args ...interface{}) error { - return &BoulderError{ - Type: RateLimit, - Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/too-many-registrations-for-this-ip/", args...), - RetryAfter: retryAfter, - } -} - -func RejectedIdentifierError(msg string, args ...interface{}) error { - return New(RejectedIdentifier, msg, args...) -} - -func InvalidEmailError(msg string, args ...interface{}) error { - return New(InvalidEmail, msg, args...) -} - -func ConnectionFailureError(msg string, args ...interface{}) error { - return New(ConnectionFailure, msg, args...) -} - -func CAAError(msg string, args ...interface{}) error { - return New(CAA, msg, args...) -} - -func MissingSCTsError(msg string, args ...interface{}) error { - return New(MissingSCTs, msg, args...) -} - -func DuplicateError(msg string, args ...interface{}) error { - return New(Duplicate, msg, args...) -} - -func OrderNotReadyError(msg string, args ...interface{}) error { - return New(OrderNotReady, msg, args...) -} - -func DNSError(msg string, args ...interface{}) error { - return New(DNS, msg, args...) -} - -func BadPublicKeyError(msg string, args ...interface{}) error { - return New(BadPublicKey, msg, args...) -} - -func BadCSRError(msg string, args ...interface{}) error { - return New(BadCSR, msg, args...) -} - -func AlreadyRevokedError(msg string, args ...interface{}) error { - return New(AlreadyRevoked, msg, args...) -} - -func BadRevocationReasonError(reason int64) error { - return New(BadRevocationReason, "disallowed revocation reason: %d", reason) -} diff --git a/vendor/github.com/letsencrypt/boulder/goodkey/blocked.go b/vendor/github.com/letsencrypt/boulder/goodkey/blocked.go index fdcfe9a18..198c09db4 100644 --- a/vendor/github.com/letsencrypt/boulder/goodkey/blocked.go +++ b/vendor/github.com/letsencrypt/boulder/goodkey/blocked.go @@ -9,8 +9,7 @@ import ( "os" "github.com/letsencrypt/boulder/core" - - yaml "gopkg.in/yaml.v3" + "github.com/letsencrypt/boulder/strictyaml" ) // blockedKeys is a type for maintaining a map of SHA256 hashes @@ -58,7 +57,7 @@ func loadBlockedKeysList(filename string) (*blockedKeys, error) { BlockedHashes []string `yaml:"blocked"` BlockedHashesHex []string `yaml:"blockedHashesHex"` } - err = yaml.Unmarshal(yamlBytes, &list) + err = strictyaml.Unmarshal(yamlBytes, &list) if err != nil { return nil, err } diff --git a/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go b/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go index 741bbf263..087a01812 100644 --- a/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go +++ b/vendor/github.com/letsencrypt/boulder/goodkey/good_key.go @@ -12,7 +12,6 @@ import ( "sync" "github.com/letsencrypt/boulder/core" - berrors "github.com/letsencrypt/boulder/errors" "github.com/titanous/rocacheck" ) @@ -136,7 +135,7 @@ func (policy *KeyPolicy) GoodKey(ctx context.Context, key crypto.PublicKey) erro // that has been administratively blocked. if policy.blockedList != nil { if blocked, err := policy.blockedList.blocked(key); err != nil { - return berrors.InternalServerError("error checking blocklist for key: %v", key) + return fmt.Errorf("error checking blocklist for key: %v", key) } else if blocked { return badKey("public key is forbidden") } diff --git a/vendor/github.com/letsencrypt/boulder/probs/probs.go b/vendor/github.com/letsencrypt/boulder/probs/probs.go index 3736e8d39..2cc766237 100644 --- a/vendor/github.com/letsencrypt/boulder/probs/probs.go +++ b/vendor/github.com/letsencrypt/boulder/probs/probs.go @@ -7,29 +7,33 @@ import ( "github.com/letsencrypt/boulder/identifier" ) -// Error types that can be used in ACME payloads const ( + // Error types that can be used in ACME payloads. These are sorted in the + // same order as they are defined in RFC8555 Section 6.7. We do not implement + // the `compound`, `externalAccountRequired`, or `userActionRequired` errors, + // because we have no path that would return them. + AccountDoesNotExistProblem = ProblemType("accountDoesNotExist") + AlreadyRevokedProblem = ProblemType("alreadyRevoked") + BadCSRProblem = ProblemType("badCSR") + BadNonceProblem = ProblemType("badNonce") + BadPublicKeyProblem = ProblemType("badPublicKey") + BadRevocationReasonProblem = ProblemType("badRevocationReason") + BadSignatureAlgorithmProblem = ProblemType("badSignatureAlgorithm") + CAAProblem = ProblemType("caa") ConnectionProblem = ProblemType("connection") + DNSProblem = ProblemType("dns") + InvalidContactProblem = ProblemType("invalidContact") MalformedProblem = ProblemType("malformed") + OrderNotReadyProblem = ProblemType("orderNotReady") + RateLimitedProblem = ProblemType("rateLimited") + RejectedIdentifierProblem = ProblemType("rejectedIdentifier") ServerInternalProblem = ProblemType("serverInternal") TLSProblem = ProblemType("tls") UnauthorizedProblem = ProblemType("unauthorized") - RateLimitedProblem = ProblemType("rateLimited") - BadNonceProblem = ProblemType("badNonce") - InvalidEmailProblem = ProblemType("invalidEmail") - RejectedIdentifierProblem = ProblemType("rejectedIdentifier") - AccountDoesNotExistProblem = ProblemType("accountDoesNotExist") - CAAProblem = ProblemType("caa") - DNSProblem = ProblemType("dns") - AlreadyRevokedProblem = ProblemType("alreadyRevoked") - OrderNotReadyProblem = ProblemType("orderNotReady") - BadSignatureAlgorithmProblem = ProblemType("badSignatureAlgorithm") - BadPublicKeyProblem = ProblemType("badPublicKey") - BadRevocationReasonProblem = ProblemType("badRevocationReason") - BadCSRProblem = ProblemType("badCSR") + UnsupportedContactProblem = ProblemType("unsupportedContact") + UnsupportedIdentifierProblem = ProblemType("unsupportedIdentifier") - V1ErrorNS = "urn:acme:error:" - V2ErrorNS = "urn:ietf:params:acme:error:" + ErrorNS = "urn:ietf:params:acme:error:" ) // ProblemType defines the error types in the ACME protocol @@ -71,112 +75,71 @@ func (pd *ProblemDetails) WithSubProblems(subProbs []SubProblemDetails) *Problem } } -// statusTooManyRequests is the HTTP status code meant for rate limiting -// errors. It's not currently in the net/http library so we add it here. -const statusTooManyRequests = 429 +// Helper functions which construct the basic RFC8555 Problem Documents, with +// the Type already set and the Details supplied by the caller. -// ProblemDetailsToStatusCode inspects the given ProblemDetails to figure out -// what HTTP status code it should represent. It should only be used by the WFE -// but is included in this package because of its reliance on ProblemTypes. -func ProblemDetailsToStatusCode(prob *ProblemDetails) int { - if prob.HTTPStatus != 0 { - return prob.HTTPStatus - } - switch prob.Type { - case - ConnectionProblem, - MalformedProblem, - BadSignatureAlgorithmProblem, - BadPublicKeyProblem, - TLSProblem, - BadNonceProblem, - InvalidEmailProblem, - RejectedIdentifierProblem, - AccountDoesNotExistProblem, - BadRevocationReasonProblem: - return http.StatusBadRequest - case ServerInternalProblem: - return http.StatusInternalServerError - case - UnauthorizedProblem, - CAAProblem: - return http.StatusForbidden - case RateLimitedProblem: - return statusTooManyRequests - default: - return http.StatusInternalServerError - } -} - -// BadNonce returns a ProblemDetails with a BadNonceProblem and a 400 Bad -// Request status code. -func BadNonce(detail string) *ProblemDetails { +// AccountDoesNotExist returns a ProblemDetails representing an +// AccountDoesNotExistProblem error +func AccountDoesNotExist(detail string) *ProblemDetails { return &ProblemDetails{ - Type: BadNonceProblem, + Type: AccountDoesNotExistProblem, Detail: detail, HTTPStatus: http.StatusBadRequest, } } -// RejectedIdentifier returns a ProblemDetails with a RejectedIdentifierProblem and a 400 Bad +// AlreadyRevoked returns a ProblemDetails with a AlreadyRevokedProblem and a 400 Bad // Request status code. -func RejectedIdentifier(detail string) *ProblemDetails { +func AlreadyRevoked(detail string, a ...any) *ProblemDetails { return &ProblemDetails{ - Type: RejectedIdentifierProblem, - Detail: detail, + Type: AlreadyRevokedProblem, + Detail: fmt.Sprintf(detail, a...), HTTPStatus: http.StatusBadRequest, } } -// Conflict returns a ProblemDetails with a MalformedProblem and a 409 Conflict -// status code. -func Conflict(detail string) *ProblemDetails { +// BadCSR returns a ProblemDetails representing a BadCSRProblem. +func BadCSR(detail string, a ...any) *ProblemDetails { return &ProblemDetails{ - Type: MalformedProblem, - Detail: detail, - HTTPStatus: http.StatusConflict, + Type: BadCSRProblem, + Detail: fmt.Sprintf(detail, a...), + HTTPStatus: http.StatusBadRequest, } } -// AlreadyRevoked returns a ProblemDetails with a AlreadyRevokedProblem and a 400 Bad +// BadNonce returns a ProblemDetails with a BadNonceProblem and a 400 Bad // Request status code. -func AlreadyRevoked(detail string, a ...interface{}) *ProblemDetails { +func BadNonce(detail string) *ProblemDetails { return &ProblemDetails{ - Type: AlreadyRevokedProblem, - Detail: fmt.Sprintf(detail, a...), + Type: BadNonceProblem, + Detail: detail, HTTPStatus: http.StatusBadRequest, } } -// Malformed returns a ProblemDetails with a MalformedProblem and a 400 Bad +// BadPublicKey returns a ProblemDetails with a BadPublicKeyProblem and a 400 Bad // Request status code. -func Malformed(detail string, args ...interface{}) *ProblemDetails { - if len(args) > 0 { - detail = fmt.Sprintf(detail, args...) - } +func BadPublicKey(detail string, a ...any) *ProblemDetails { return &ProblemDetails{ - Type: MalformedProblem, - Detail: detail, + Type: BadPublicKeyProblem, + Detail: fmt.Sprintf(detail, a...), HTTPStatus: http.StatusBadRequest, } } -// Canceled returns a ProblemDetails with a MalformedProblem and a 408 Request -// Timeout status code. -func Canceled(detail string, args ...interface{}) *ProblemDetails { - if len(args) > 0 { - detail = fmt.Sprintf(detail, args...) - } +// BadRevocationReason returns a ProblemDetails representing +// a BadRevocationReasonProblem +func BadRevocationReason(detail string, a ...any) *ProblemDetails { return &ProblemDetails{ - Type: MalformedProblem, - Detail: detail, - HTTPStatus: http.StatusRequestTimeout, + Type: BadRevocationReasonProblem, + Detail: fmt.Sprintf(detail, a...), + HTTPStatus: http.StatusBadRequest, } } // BadSignatureAlgorithm returns a ProblemDetails with a BadSignatureAlgorithmProblem // and a 400 Bad Request status code. -func BadSignatureAlgorithm(detail string, a ...interface{}) *ProblemDetails { +func BadSignatureAlgorithm(detail string, a ...any) *ProblemDetails { return &ProblemDetails{ Type: BadSignatureAlgorithmProblem, Detail: fmt.Sprintf(detail, a...), @@ -184,166 +147,195 @@ func BadSignatureAlgorithm(detail string, a ...interface{}) *ProblemDetails { } } -// BadPublicKey returns a ProblemDetails with a BadPublicKeyProblem and a 400 Bad -// Request status code. -func BadPublicKey(detail string, a ...interface{}) *ProblemDetails { +// CAA returns a ProblemDetails representing a CAAProblem +func CAA(detail string) *ProblemDetails { return &ProblemDetails{ - Type: BadPublicKeyProblem, - Detail: fmt.Sprintf(detail, a...), - HTTPStatus: http.StatusBadRequest, + Type: CAAProblem, + Detail: detail, + HTTPStatus: http.StatusForbidden, } } -// NotFound returns a ProblemDetails with a MalformedProblem and a 404 Not Found -// status code. -func NotFound(detail string) *ProblemDetails { +// Connection returns a ProblemDetails representing a ConnectionProblem +// error +func Connection(detail string) *ProblemDetails { return &ProblemDetails{ - Type: MalformedProblem, + Type: ConnectionProblem, Detail: detail, - HTTPStatus: http.StatusNotFound, + HTTPStatus: http.StatusBadRequest, } } -// ServerInternal returns a ProblemDetails with a ServerInternalProblem and a -// 500 Internal Server Failure status code. -func ServerInternal(detail string) *ProblemDetails { +// DNS returns a ProblemDetails representing a DNSProblem +func DNS(detail string) *ProblemDetails { return &ProblemDetails{ - Type: ServerInternalProblem, + Type: DNSProblem, Detail: detail, - HTTPStatus: http.StatusInternalServerError, + HTTPStatus: http.StatusBadRequest, } } -// Unauthorized returns a ProblemDetails with an UnauthorizedProblem and a 403 -// Forbidden status code. -func Unauthorized(detail string) *ProblemDetails { +// InvalidContact returns a ProblemDetails representing an InvalidContactProblem. +func InvalidContact(detail string) *ProblemDetails { return &ProblemDetails{ - Type: UnauthorizedProblem, + Type: InvalidContactProblem, Detail: detail, - HTTPStatus: http.StatusForbidden, + HTTPStatus: http.StatusBadRequest, } } -// MethodNotAllowed returns a ProblemDetails representing a disallowed HTTP -// method error. -func MethodNotAllowed() *ProblemDetails { +// Malformed returns a ProblemDetails with a MalformedProblem and a 400 Bad +// Request status code. +func Malformed(detail string, a ...any) *ProblemDetails { + if len(a) > 0 { + detail = fmt.Sprintf(detail, a...) + } return &ProblemDetails{ Type: MalformedProblem, - Detail: "Method not allowed", - HTTPStatus: http.StatusMethodNotAllowed, + Detail: detail, + HTTPStatus: http.StatusBadRequest, } } -// ContentLengthRequired returns a ProblemDetails representing a missing -// Content-Length header error -func ContentLengthRequired() *ProblemDetails { +// OrderNotReady returns a ProblemDetails representing a OrderNotReadyProblem +func OrderNotReady(detail string, a ...any) *ProblemDetails { return &ProblemDetails{ - Type: MalformedProblem, - Detail: "missing Content-Length header", - HTTPStatus: http.StatusLengthRequired, + Type: OrderNotReadyProblem, + Detail: fmt.Sprintf(detail, a...), + HTTPStatus: http.StatusForbidden, } } -// InvalidContentType returns a ProblemDetails suitable for a missing -// ContentType header, or an incorrect ContentType header -func InvalidContentType(detail string) *ProblemDetails { +// RateLimited returns a ProblemDetails representing a RateLimitedProblem error +func RateLimited(detail string) *ProblemDetails { return &ProblemDetails{ - Type: MalformedProblem, + Type: RateLimitedProblem, Detail: detail, - HTTPStatus: http.StatusUnsupportedMediaType, + HTTPStatus: http.StatusTooManyRequests, } } -// InvalidEmail returns a ProblemDetails representing an invalid email address -// error -func InvalidEmail(detail string) *ProblemDetails { +// RejectedIdentifier returns a ProblemDetails with a RejectedIdentifierProblem and a 400 Bad +// Request status code. +func RejectedIdentifier(detail string) *ProblemDetails { return &ProblemDetails{ - Type: InvalidEmailProblem, + Type: RejectedIdentifierProblem, Detail: detail, HTTPStatus: http.StatusBadRequest, } } -// ConnectionFailure returns a ProblemDetails representing a ConnectionProblem -// error -func ConnectionFailure(detail string) *ProblemDetails { +// ServerInternal returns a ProblemDetails with a ServerInternalProblem and a +// 500 Internal Server Failure status code. +func ServerInternal(detail string) *ProblemDetails { return &ProblemDetails{ - Type: ConnectionProblem, + Type: ServerInternalProblem, + Detail: detail, + HTTPStatus: http.StatusInternalServerError, + } +} + +// TLS returns a ProblemDetails representing a TLSProblem error +func TLS(detail string) *ProblemDetails { + return &ProblemDetails{ + Type: TLSProblem, Detail: detail, HTTPStatus: http.StatusBadRequest, } } -// RateLimited returns a ProblemDetails representing a RateLimitedProblem error -func RateLimited(detail string) *ProblemDetails { +// Unauthorized returns a ProblemDetails with an UnauthorizedProblem and a 403 +// Forbidden status code. +func Unauthorized(detail string) *ProblemDetails { return &ProblemDetails{ - Type: RateLimitedProblem, + Type: UnauthorizedProblem, Detail: detail, - HTTPStatus: statusTooManyRequests, + HTTPStatus: http.StatusForbidden, } } -// TLSError returns a ProblemDetails representing a TLSProblem error -func TLSError(detail string) *ProblemDetails { +// UnsupportedContact returns a ProblemDetails representing an +// UnsupportedContactProblem +func UnsupportedContact(detail string) *ProblemDetails { return &ProblemDetails{ - Type: TLSProblem, + Type: UnsupportedContactProblem, Detail: detail, HTTPStatus: http.StatusBadRequest, } } -// AccountDoesNotExist returns a ProblemDetails representing an -// AccountDoesNotExistProblem error -func AccountDoesNotExist(detail string) *ProblemDetails { +// UnsupportedIdentifier returns a ProblemDetails representing an +// UnsupportedIdentifierProblem +func UnsupportedIdentifier(detail string, a ...any) *ProblemDetails { return &ProblemDetails{ - Type: AccountDoesNotExistProblem, - Detail: detail, + Type: UnsupportedIdentifierProblem, + Detail: fmt.Sprintf(detail, a...), HTTPStatus: http.StatusBadRequest, } } -// CAA returns a ProblemDetails representing a CAAProblem -func CAA(detail string) *ProblemDetails { +// Additional helper functions that return variations on MalformedProblem with +// different HTTP status codes set. + +// Canceled returns a ProblemDetails with a MalformedProblem and a 408 Request +// Timeout status code. +func Canceled(detail string, a ...any) *ProblemDetails { + if len(a) > 0 { + detail = fmt.Sprintf(detail, a...) + } return &ProblemDetails{ - Type: CAAProblem, + Type: MalformedProblem, Detail: detail, - HTTPStatus: http.StatusForbidden, + HTTPStatus: http.StatusRequestTimeout, } } -// DNS returns a ProblemDetails representing a DNSProblem -func DNS(detail string) *ProblemDetails { +// Conflict returns a ProblemDetails with a MalformedProblem and a 409 Conflict +// status code. +func Conflict(detail string) *ProblemDetails { return &ProblemDetails{ - Type: DNSProblem, + Type: MalformedProblem, Detail: detail, - HTTPStatus: http.StatusBadRequest, + HTTPStatus: http.StatusConflict, } } -// OrderNotReady returns a ProblemDetails representing a OrderNotReadyProblem -func OrderNotReady(detail string, a ...interface{}) *ProblemDetails { +// ContentLengthRequired returns a ProblemDetails representing a missing +// Content-Length header error +func ContentLengthRequired() *ProblemDetails { return &ProblemDetails{ - Type: OrderNotReadyProblem, - Detail: fmt.Sprintf(detail, a...), - HTTPStatus: http.StatusForbidden, + Type: MalformedProblem, + Detail: "missing Content-Length header", + HTTPStatus: http.StatusLengthRequired, } } -// BadRevocationReason returns a ProblemDetails representing -// a BadRevocationReasonProblem -func BadRevocationReason(detail string, a ...interface{}) *ProblemDetails { +// InvalidContentType returns a ProblemDetails suitable for a missing +// ContentType header, or an incorrect ContentType header +func InvalidContentType(detail string) *ProblemDetails { return &ProblemDetails{ - Type: BadRevocationReasonProblem, - Detail: fmt.Sprintf(detail, a...), - HTTPStatus: http.StatusBadRequest, + Type: MalformedProblem, + Detail: detail, + HTTPStatus: http.StatusUnsupportedMediaType, } } -// BadCSR returns a ProblemDetails representing a BadCSRProblem. -func BadCSR(detail string, a ...interface{}) *ProblemDetails { +// MethodNotAllowed returns a ProblemDetails representing a disallowed HTTP +// method error. +func MethodNotAllowed() *ProblemDetails { return &ProblemDetails{ - Type: BadCSRProblem, - Detail: fmt.Sprintf(detail, a...), - HTTPStatus: http.StatusBadRequest, + Type: MalformedProblem, + Detail: "Method not allowed", + HTTPStatus: http.StatusMethodNotAllowed, + } +} + +// NotFound returns a ProblemDetails with a MalformedProblem and a 404 Not Found +// status code. +func NotFound(detail string) *ProblemDetails { + return &ProblemDetails{ + Type: MalformedProblem, + Detail: detail, + HTTPStatus: http.StatusNotFound, } } diff --git a/vendor/github.com/letsencrypt/boulder/strictyaml/yaml.go b/vendor/github.com/letsencrypt/boulder/strictyaml/yaml.go new file mode 100644 index 000000000..8e3bae996 --- /dev/null +++ b/vendor/github.com/letsencrypt/boulder/strictyaml/yaml.go @@ -0,0 +1,46 @@ +// Package strictyaml provides a strict YAML unmarshaller based on `go-yaml/yaml` +package strictyaml + +import ( + "bytes" + "errors" + "fmt" + "io" + + "gopkg.in/yaml.v3" +) + +// Unmarshal takes a byte array and an interface passed by reference. The +// d.Decode will read the next YAML-encoded value from its input and store it in +// the value pointed to by yamlObj. Any config keys from the incoming YAML +// document which do not correspond to expected keys in the config struct will +// result in errors. +// +// TODO(https://github.com/go-yaml/yaml/issues/639): Replace this function with +// yaml.Unmarshal once a more ergonomic way to set unmarshal options is added +// upstream. +func Unmarshal(b []byte, yamlObj interface{}) error { + r := bytes.NewReader(b) + + d := yaml.NewDecoder(r) + d.KnownFields(true) + + // d.Decode will mutate yamlObj + err := d.Decode(yamlObj) + + if err != nil { + // io.EOF is returned when the YAML document is empty. + if errors.Is(err, io.EOF) { + return fmt.Errorf("unmarshalling YAML, bytes cannot be nil: %w", err) + } + return fmt.Errorf("unmarshalling YAML: %w", err) + } + + // As bytes are read by the decoder, the length of the byte buffer should + // decrease. If it doesn't, there's a problem. + if r.Len() != 0 { + return fmt.Errorf("yaml object of size %d bytes had %d bytes of unexpected unconsumed trailers", r.Size(), r.Len()) + } + + return nil +} diff --git a/vendor/github.com/mattn/go-sqlite3/callback.go b/vendor/github.com/mattn/go-sqlite3/callback.go index d30569100..b794bcd83 100644 --- a/vendor/github.com/mattn/go-sqlite3/callback.go +++ b/vendor/github.com/mattn/go-sqlite3/callback.go @@ -100,13 +100,13 @@ func preUpdateHookTrampoline(handle unsafe.Pointer, dbHandle uintptr, op int, db // Use handles to avoid passing Go pointers to C. type handleVal struct { db *SQLiteConn - val interface{} + val any } var handleLock sync.Mutex var handleVals = make(map[unsafe.Pointer]handleVal) -func newHandle(db *SQLiteConn, v interface{}) unsafe.Pointer { +func newHandle(db *SQLiteConn, v any) unsafe.Pointer { handleLock.Lock() defer handleLock.Unlock() val := handleVal{db: db, val: v} @@ -124,7 +124,7 @@ func lookupHandleVal(handle unsafe.Pointer) handleVal { return handleVals[handle] } -func lookupHandle(handle unsafe.Pointer) interface{} { +func lookupHandle(handle unsafe.Pointer) any { return lookupHandleVal(handle).val } @@ -238,7 +238,7 @@ func callbackArg(typ reflect.Type) (callbackArgConverter, error) { switch typ.Kind() { case reflect.Interface: if typ.NumMethod() != 0 { - return nil, errors.New("the only supported interface type is interface{}") + return nil, errors.New("the only supported interface type is any") } return callbackArgGeneric, nil case reflect.Slice: @@ -360,11 +360,11 @@ func callbackRetGeneric(ctx *C.sqlite3_context, v reflect.Value) error { } cb, err := callbackRet(v.Elem().Type()) - if err != nil { - return err - } + if err != nil { + return err + } - return cb(ctx, v.Elem()) + return cb(ctx, v.Elem()) } func callbackRet(typ reflect.Type) (callbackRetConverter, error) { diff --git a/vendor/github.com/mattn/go-sqlite3/convert.go b/vendor/github.com/mattn/go-sqlite3/convert.go index 03850737f..f7a9dcd72 100644 --- a/vendor/github.com/mattn/go-sqlite3/convert.go +++ b/vendor/github.com/mattn/go-sqlite3/convert.go @@ -23,7 +23,7 @@ var errNilPtr = errors.New("destination pointer is nil") // embedded in descript // convertAssign copies to dest the value in src, converting it if possible. // An error is returned if the copy would result in loss of information. // dest should be a pointer type. -func convertAssign(dest, src interface{}) error { +func convertAssign(dest, src any) error { // Common cases, without reflect. switch s := src.(type) { case string: @@ -55,7 +55,7 @@ func convertAssign(dest, src interface{}) error { } *d = string(s) return nil - case *interface{}: + case *any: if d == nil { return errNilPtr } @@ -97,7 +97,7 @@ func convertAssign(dest, src interface{}) error { } case nil: switch d := dest.(type) { - case *interface{}: + case *any: if d == nil { return errNilPtr } @@ -149,7 +149,7 @@ func convertAssign(dest, src interface{}) error { *d = bv.(bool) } return err - case *interface{}: + case *any: *d = src return nil } @@ -256,7 +256,7 @@ func cloneBytes(b []byte) []byte { return c } -func asString(src interface{}) string { +func asString(src any) string { switch v := src.(type) { case string: return v diff --git a/vendor/github.com/mattn/go-sqlite3/doc.go b/vendor/github.com/mattn/go-sqlite3/doc.go index ac27633b3..a3bcebbcb 100644 --- a/vendor/github.com/mattn/go-sqlite3/doc.go +++ b/vendor/github.com/mattn/go-sqlite3/doc.go @@ -5,63 +5,63 @@ This works as a driver for database/sql. Installation - go get github.com/mattn/go-sqlite3 + go get github.com/mattn/go-sqlite3 -Supported Types +# Supported Types Currently, go-sqlite3 supports the following data types. - +------------------------------+ - |go | sqlite3 | - |----------|-------------------| - |nil | null | - |int | integer | - |int64 | integer | - |float64 | float | - |bool | integer | - |[]byte | blob | - |string | text | - |time.Time | timestamp/datetime| - +------------------------------+ - -SQLite3 Extension + +------------------------------+ + |go | sqlite3 | + |----------|-------------------| + |nil | null | + |int | integer | + |int64 | integer | + |float64 | float | + |bool | integer | + |[]byte | blob | + |string | text | + |time.Time | timestamp/datetime| + +------------------------------+ + +# SQLite3 Extension You can write your own extension module for sqlite3. For example, below is an extension for a Regexp matcher operation. - #include - #include - #include - #include - - SQLITE_EXTENSION_INIT1 - static void regexp_func(sqlite3_context *context, int argc, sqlite3_value **argv) { - if (argc >= 2) { - const char *target = (const char *)sqlite3_value_text(argv[1]); - const char *pattern = (const char *)sqlite3_value_text(argv[0]); - const char* errstr = NULL; - int erroff = 0; - int vec[500]; - int n, rc; - pcre* re = pcre_compile(pattern, 0, &errstr, &erroff, NULL); - rc = pcre_exec(re, NULL, target, strlen(target), 0, 0, vec, 500); - if (rc <= 0) { - sqlite3_result_error(context, errstr, 0); - return; - } - sqlite3_result_int(context, 1); - } - } - - #ifdef _WIN32 - __declspec(dllexport) - #endif - int sqlite3_extension_init(sqlite3 *db, char **errmsg, - const sqlite3_api_routines *api) { - SQLITE_EXTENSION_INIT2(api); - return sqlite3_create_function(db, "regexp", 2, SQLITE_UTF8, - (void*)db, regexp_func, NULL, NULL); - } + #include + #include + #include + #include + + SQLITE_EXTENSION_INIT1 + static void regexp_func(sqlite3_context *context, int argc, sqlite3_value **argv) { + if (argc >= 2) { + const char *target = (const char *)sqlite3_value_text(argv[1]); + const char *pattern = (const char *)sqlite3_value_text(argv[0]); + const char* errstr = NULL; + int erroff = 0; + int vec[500]; + int n, rc; + pcre* re = pcre_compile(pattern, 0, &errstr, &erroff, NULL); + rc = pcre_exec(re, NULL, target, strlen(target), 0, 0, vec, 500); + if (rc <= 0) { + sqlite3_result_error(context, errstr, 0); + return; + } + sqlite3_result_int(context, 1); + } + } + + #ifdef _WIN32 + __declspec(dllexport) + #endif + int sqlite3_extension_init(sqlite3 *db, char **errmsg, + const sqlite3_api_routines *api) { + SQLITE_EXTENSION_INIT2(api); + return sqlite3_create_function(db, "regexp", 2, SQLITE_UTF8, + (void*)db, regexp_func, NULL, NULL); + } It needs to be built as a so/dll shared library. And you need to register the extension module like below. @@ -77,7 +77,7 @@ Then, you can use this extension. rows, err := db.Query("select text from mytable where name regexp '^golang'") -Connection Hook +# Connection Hook You can hook and inject your code when the connection is established by setting ConnectHook to get the SQLiteConn. @@ -95,13 +95,13 @@ You can also use database/sql.Conn.Raw (Go >= 1.13): conn, err := db.Conn(context.Background()) // if err != nil { ... } defer conn.Close() - err = conn.Raw(func (driverConn interface{}) error { + err = conn.Raw(func (driverConn any) error { sqliteConn := driverConn.(*sqlite3.SQLiteConn) // ... use sqliteConn }) // if err != nil { ... } -Go SQlite3 Extensions +# Go SQlite3 Extensions If you want to register Go functions as SQLite extension functions you can make a custom driver by calling RegisterFunction from @@ -130,6 +130,5 @@ You can then use the custom driver by passing its name to sql.Open. } See the documentation of RegisterFunc for more details. - */ package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c index 454512ad2..53d7560ec 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c @@ -1,7 +1,7 @@ #ifndef USE_LIBSQLITE3 /****************************************************************************** ** This file is an amalgamation of many separate C source files from SQLite -** version 3.44.0. By combining all the individual C code files into this +** version 3.45.1. By combining all the individual C code files into this ** single large file, the entire code can be compiled as a single translation ** unit. This allows many compilers to do optimizations that would not be ** possible if the files were compiled separately. Performance improvements @@ -19,7 +19,7 @@ ** separate file. This file contains only code for the core SQLite library. ** ** The content in this amalgamation comes from Fossil check-in -** 17129ba1ff7f0daf37100ee82d507aef7827. +** e876e51a0ed5c5b3126f52e532044363a014. */ #define SQLITE_CORE 1 #define SQLITE_AMALGAMATION 1 @@ -460,9 +460,9 @@ extern "C" { ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.44.0" -#define SQLITE_VERSION_NUMBER 3044000 -#define SQLITE_SOURCE_ID "2023-11-01 11:23:50 17129ba1ff7f0daf37100ee82d507aef7827cf38de1866e2633096ae6ad81301" +#define SQLITE_VERSION "3.45.1" +#define SQLITE_VERSION_NUMBER 3045001 +#define SQLITE_SOURCE_ID "2024-01-30 16:01:20 e876e51a0ed5c5b3126f52e532044363a014bc594cfefa87ffb5b82257cc467a" /* ** CAPI3REF: Run-Time Library Version Numbers @@ -4268,15 +4268,17 @@ SQLITE_API void sqlite3_free_filename(sqlite3_filename); ** ** ** ^The sqlite3_errmsg() and sqlite3_errmsg16() return English-language -** text that describes the error, as either UTF-8 or UTF-16 respectively. +** text that describes the error, as either UTF-8 or UTF-16 respectively, +** or NULL if no error message is available. ** (See how SQLite handles [invalid UTF] for exceptions to this rule.) ** ^(Memory to hold the error message string is managed internally. ** The application does not need to worry about freeing the result. ** However, the error string might be overwritten or deallocated by ** subsequent calls to other SQLite interface functions.)^ ** -** ^The sqlite3_errstr() interface returns the English-language text -** that describes the [result code], as UTF-8. +** ^The sqlite3_errstr(E) interface returns the English-language text +** that describes the [result code] E, as UTF-8, or NULL if E is not an +** result code for which a text error message is available. ** ^(Memory to hold the error message string is managed internally ** and must not be freed by the application)^. ** @@ -5887,13 +5889,27 @@ SQLITE_API int sqlite3_create_window_function( ** ** ** [[SQLITE_SUBTYPE]]
SQLITE_SUBTYPE
-** The SQLITE_SUBTYPE flag indicates to SQLite that a function may call +** The SQLITE_SUBTYPE flag indicates to SQLite that a function might call ** [sqlite3_value_subtype()] to inspect the sub-types of its arguments. -** Specifying this flag makes no difference for scalar or aggregate user -** functions. However, if it is not specified for a user-defined window -** function, then any sub-types belonging to arguments passed to the window -** function may be discarded before the window function is called (i.e. -** sqlite3_value_subtype() will always return 0). +** This flag instructs SQLite to omit some corner-case optimizations that +** might disrupt the operation of the [sqlite3_value_subtype()] function, +** causing it to return zero rather than the correct subtype(). +** SQL functions that invokes [sqlite3_value_subtype()] should have this +** property. If the SQLITE_SUBTYPE property is omitted, then the return +** value from [sqlite3_value_subtype()] might sometimes be zero even though +** a non-zero subtype was specified by the function argument expression. +** +** [[SQLITE_RESULT_SUBTYPE]]
SQLITE_RESULT_SUBTYPE
+** The SQLITE_RESULT_SUBTYPE flag indicates to SQLite that a function might call +** [sqlite3_result_subtype()] to cause a sub-type to be associated with its +** result. +** Every function that invokes [sqlite3_result_subtype()] should have this +** property. If it does not, then the call to [sqlite3_result_subtype()] +** might become a no-op if the function is used as term in an +** [expression index]. On the other hand, SQL functions that never invoke +** [sqlite3_result_subtype()] should avoid setting this property, as the +** purpose of this property is to disable certain optimizations that are +** incompatible with subtypes. **
** */ @@ -5901,6 +5917,7 @@ SQLITE_API int sqlite3_create_window_function( #define SQLITE_DIRECTONLY 0x000080000 #define SQLITE_SUBTYPE 0x000100000 #define SQLITE_INNOCUOUS 0x000200000 +#define SQLITE_RESULT_SUBTYPE 0x001000000 /* ** CAPI3REF: Deprecated Functions @@ -6097,6 +6114,12 @@ SQLITE_API int sqlite3_value_encoding(sqlite3_value*); ** information can be used to pass a limited amount of context from ** one SQL function to another. Use the [sqlite3_result_subtype()] ** routine to set the subtype for the return value of an SQL function. +** +** Every [application-defined SQL function] that invoke this interface +** should include the [SQLITE_SUBTYPE] property in the text +** encoding argument when the function is [sqlite3_create_function|registered]. +** If the [SQLITE_SUBTYPE] property is omitted, then sqlite3_value_subtype() +** might return zero instead of the upstream subtype in some corner cases. */ SQLITE_API unsigned int sqlite3_value_subtype(sqlite3_value*); @@ -6227,14 +6250,22 @@ SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context*); **
  • ^(when sqlite3_set_auxdata() is invoked again on the same ** parameter)^, or **
  • ^(during the original sqlite3_set_auxdata() call when a memory -** allocation error occurs.)^ +** allocation error occurs.)^ +**
  • ^(during the original sqlite3_set_auxdata() call if the function +** is evaluated during query planning instead of during query execution, +** as sometimes happens with [SQLITE_ENABLE_STAT4].)^ ** -** Note the last bullet in particular. The destructor X in +** Note the last two bullets in particular. The destructor X in ** sqlite3_set_auxdata(C,N,P,X) might be called immediately, before the ** sqlite3_set_auxdata() interface even returns. Hence sqlite3_set_auxdata() ** should be called near the end of the function implementation and the ** function implementation should not make any use of P after -** sqlite3_set_auxdata() has been called. +** sqlite3_set_auxdata() has been called. Furthermore, a call to +** sqlite3_get_auxdata() that occurs immediately after a corresponding call +** to sqlite3_set_auxdata() might still return NULL if an out-of-memory +** condition occurred during the sqlite3_set_auxdata() call or if the +** function is being evaluated during query planning rather than during +** query execution. ** ** ^(In practice, auxiliary data is preserved between function calls for ** function parameters that are compile-time constants, including literal @@ -6508,6 +6539,20 @@ SQLITE_API int sqlite3_result_zeroblob64(sqlite3_context*, sqlite3_uint64 n); ** higher order bits are discarded. ** The number of subtype bytes preserved by SQLite might increase ** in future releases of SQLite. +** +** Every [application-defined SQL function] that invokes this interface +** should include the [SQLITE_RESULT_SUBTYPE] property in its +** text encoding argument when the SQL function is +** [sqlite3_create_function|registered]. If the [SQLITE_RESULT_SUBTYPE] +** property is omitted from the function that invokes sqlite3_result_subtype(), +** then in some cases the sqlite3_result_subtype() might fail to set +** the result subtype. +** +** If SQLite is compiled with -DSQLITE_STRICT_SUBTYPE=1, then any +** SQL function that invokes the sqlite3_result_subtype() interface +** and that does not have the SQLITE_RESULT_SUBTYPE property will raise +** an error. Future versions of SQLite might enable -DSQLITE_STRICT_SUBTYPE=1 +** by default. */ SQLITE_API void sqlite3_result_subtype(sqlite3_context*,unsigned int); @@ -8308,9 +8353,11 @@ SQLITE_API int sqlite3_vfs_unregister(sqlite3_vfs*); ** ** ^(Some systems (for example, Windows 95) do not support the operation ** implemented by sqlite3_mutex_try(). On those systems, sqlite3_mutex_try() -** will always return SQLITE_BUSY. The SQLite core only ever uses -** sqlite3_mutex_try() as an optimization so this is acceptable -** behavior.)^ +** will always return SQLITE_BUSY. In most cases the SQLite core only uses +** sqlite3_mutex_try() as an optimization, so this is acceptable +** behavior. The exceptions are unix builds that set the +** SQLITE_ENABLE_SETLK_TIMEOUT build option. In that case a working +** sqlite3_mutex_try() is required.)^ ** ** ^The sqlite3_mutex_leave() routine exits a mutex that was ** previously entered by the same thread. The behavior @@ -8569,6 +8616,7 @@ SQLITE_API int sqlite3_test_control(int op, ...); #define SQLITE_TESTCTRL_ASSERT 12 #define SQLITE_TESTCTRL_ALWAYS 13 #define SQLITE_TESTCTRL_RESERVE 14 /* NOT USED */ +#define SQLITE_TESTCTRL_JSON_SELFCHECK 14 #define SQLITE_TESTCTRL_OPTIMIZATIONS 15 #define SQLITE_TESTCTRL_ISKEYWORD 16 /* NOT USED */ #define SQLITE_TESTCTRL_SCRATCHMALLOC 17 /* NOT USED */ @@ -13082,8 +13130,11 @@ struct Fts5PhraseIter { ** created with the "columnsize=0" option. ** ** xColumnText: -** This function attempts to retrieve the text of column iCol of the -** current document. If successful, (*pz) is set to point to a buffer +** If parameter iCol is less than zero, or greater than or equal to the +** number of columns in the table, SQLITE_RANGE is returned. +** +** Otherwise, this function attempts to retrieve the text of column iCol of +** the current document. If successful, (*pz) is set to point to a buffer ** containing the text in utf-8 encoding, (*pn) is set to the size in bytes ** (not characters) of the buffer and SQLITE_OK is returned. Otherwise, ** if an error occurs, an SQLite error code is returned and the final values @@ -13093,8 +13144,10 @@ struct Fts5PhraseIter { ** Returns the number of phrases in the current query expression. ** ** xPhraseSize: -** Returns the number of tokens in phrase iPhrase of the query. Phrases -** are numbered starting from zero. +** If parameter iCol is less than zero, or greater than or equal to the +** number of phrases in the current query, as returned by xPhraseCount, +** 0 is returned. Otherwise, this function returns the number of tokens in +** phrase iPhrase of the query. Phrases are numbered starting from zero. ** ** xInstCount: ** Set *pnInst to the total number of occurrences of all phrases within @@ -13110,12 +13163,13 @@ struct Fts5PhraseIter { ** Query for the details of phrase match iIdx within the current row. ** Phrase matches are numbered starting from zero, so the iIdx argument ** should be greater than or equal to zero and smaller than the value -** output by xInstCount(). +** output by xInstCount(). If iIdx is less than zero or greater than +** or equal to the value returned by xInstCount(), SQLITE_RANGE is returned. ** -** Usually, output parameter *piPhrase is set to the phrase number, *piCol +** Otherwise, output parameter *piPhrase is set to the phrase number, *piCol ** to the column in which it occurs and *piOff the token offset of the -** first token of the phrase. Returns SQLITE_OK if successful, or an error -** code (i.e. SQLITE_NOMEM) if an error occurs. +** first token of the phrase. SQLITE_OK is returned if successful, or an +** error code (i.e. SQLITE_NOMEM) if an error occurs. ** ** This API can be quite slow if used with an FTS5 table created with the ** "detail=none" or "detail=column" option. @@ -13141,6 +13195,10 @@ struct Fts5PhraseIter { ** Invoking Api.xUserData() returns a copy of the pointer passed as ** the third argument to pUserData. ** +** If parameter iPhrase is less than zero, or greater than or equal to +** the number of phrases in the query, as returned by xPhraseCount(), +** this function returns SQLITE_RANGE. +** ** If the callback function returns any value other than SQLITE_OK, the ** query is abandoned and the xQueryPhrase function returns immediately. ** If the returned value is SQLITE_DONE, xQueryPhrase returns SQLITE_OK. @@ -13255,9 +13313,42 @@ struct Fts5PhraseIter { ** ** xPhraseNextColumn() ** See xPhraseFirstColumn above. +** +** xQueryToken(pFts5, iPhrase, iToken, ppToken, pnToken) +** This is used to access token iToken of phrase iPhrase of the current +** query. Before returning, output parameter *ppToken is set to point +** to a buffer containing the requested token, and *pnToken to the +** size of this buffer in bytes. +** +** If iPhrase or iToken are less than zero, or if iPhrase is greater than +** or equal to the number of phrases in the query as reported by +** xPhraseCount(), or if iToken is equal to or greater than the number of +** tokens in the phrase, SQLITE_RANGE is returned and *ppToken and *pnToken + are both zeroed. +** +** The output text is not a copy of the query text that specified the +** token. It is the output of the tokenizer module. For tokendata=1 +** tables, this includes any embedded 0x00 and trailing data. +** +** xInstToken(pFts5, iIdx, iToken, ppToken, pnToken) +** This is used to access token iToken of phrase hit iIdx within the +** current row. If iIdx is less than zero or greater than or equal to the +** value returned by xInstCount(), SQLITE_RANGE is returned. Otherwise, +** output variable (*ppToken) is set to point to a buffer containing the +** matching document token, and (*pnToken) to the size of that buffer in +** bytes. This API is not available if the specified token matches a +** prefix query term. In that case both output variables are always set +** to 0. +** +** The output text is not a copy of the document text that was tokenized. +** It is the output of the tokenizer module. For tokendata=1 tables, this +** includes any embedded 0x00 and trailing data. +** +** This API can be quite slow if used with an FTS5 table created with the +** "detail=none" or "detail=column" option. */ struct Fts5ExtensionApi { - int iVersion; /* Currently always set to 2 */ + int iVersion; /* Currently always set to 3 */ void *(*xUserData)(Fts5Context*); @@ -13292,6 +13383,13 @@ struct Fts5ExtensionApi { int (*xPhraseFirstColumn)(Fts5Context*, int iPhrase, Fts5PhraseIter*, int*); void (*xPhraseNextColumn)(Fts5Context*, Fts5PhraseIter*, int *piCol); + + /* Below this point are iVersion>=3 only */ + int (*xQueryToken)(Fts5Context*, + int iPhrase, int iToken, + const char **ppToken, int *pnToken + ); + int (*xInstToken)(Fts5Context*, int iIdx, int iToken, const char**, int*); }; /* @@ -13778,7 +13876,7 @@ struct fts5_api { ** max_page_count macro. */ #ifndef SQLITE_MAX_PAGE_COUNT -# define SQLITE_MAX_PAGE_COUNT 1073741823 +# define SQLITE_MAX_PAGE_COUNT 0xfffffffe /* 4294967294 */ #endif /* @@ -13917,6 +14015,19 @@ struct fts5_api { # undef SQLITE_USE_SEH #endif +/* +** Enable SQLITE_DIRECT_OVERFLOW_READ, unless the build explicitly +** disables it using -DSQLITE_DIRECT_OVERFLOW_READ=0 +*/ +#if defined(SQLITE_DIRECT_OVERFLOW_READ) && SQLITE_DIRECT_OVERFLOW_READ+1==1 + /* Disable if -DSQLITE_DIRECT_OVERFLOW_READ=0 */ +# undef SQLITE_DIRECT_OVERFLOW_READ +#else + /* In all other cases, enable */ +# define SQLITE_DIRECT_OVERFLOW_READ 1 +#endif + + /* ** The SQLITE_THREADSAFE macro must be defined as 0, 1, or 2. ** 0 means mutexes are permanently disable and the library is never @@ -15799,7 +15910,7 @@ SQLITE_PRIVATE sqlite3_file *sqlite3PagerJrnlFile(Pager*); SQLITE_PRIVATE const char *sqlite3PagerJournalname(Pager*); SQLITE_PRIVATE void *sqlite3PagerTempSpace(Pager*); SQLITE_PRIVATE int sqlite3PagerIsMemdb(Pager*); -SQLITE_PRIVATE void sqlite3PagerCacheStat(Pager *, int, int, int *); +SQLITE_PRIVATE void sqlite3PagerCacheStat(Pager *, int, int, u64*); SQLITE_PRIVATE void sqlite3PagerClearCache(Pager*); SQLITE_PRIVATE int sqlite3SectorSize(sqlite3_file *); @@ -16386,6 +16497,7 @@ typedef struct VdbeOpList VdbeOpList; #define P4_INT64 (-13) /* P4 is a 64-bit signed integer */ #define P4_INTARRAY (-14) /* P4 is a vector of 32-bit integers */ #define P4_FUNCCTX (-15) /* P4 is a pointer to an sqlite3_context object */ +#define P4_TABLEREF (-16) /* Like P4_TABLE, but reference counted */ /* Error message codes for OP_Halt */ #define P5_ConstraintNotNull 1 @@ -16608,13 +16720,15 @@ typedef struct VdbeOpList VdbeOpList; #define OP_Pagecount 178 #define OP_MaxPgcnt 179 #define OP_ClrSubtype 180 /* synopsis: r[P1].subtype = 0 */ -#define OP_FilterAdd 181 /* synopsis: filter(P1) += key(P3@P4) */ -#define OP_Trace 182 -#define OP_CursorHint 183 -#define OP_ReleaseReg 184 /* synopsis: release r[P1@P2] mask P3 */ -#define OP_Noop 185 -#define OP_Explain 186 -#define OP_Abortable 187 +#define OP_GetSubtype 181 /* synopsis: r[P2] = r[P1].subtype */ +#define OP_SetSubtype 182 /* synopsis: r[P2].subtype = r[P1] */ +#define OP_FilterAdd 183 /* synopsis: filter(P1) += key(P3@P4) */ +#define OP_Trace 184 +#define OP_CursorHint 185 +#define OP_ReleaseReg 186 /* synopsis: release r[P1@P2] mask P3 */ +#define OP_Noop 187 +#define OP_Explain 188 +#define OP_Abortable 189 /* Properties such as "out2" or "jump" that are specified in ** comments following the "case" for each opcode in the vdbe.c @@ -16650,8 +16764,8 @@ typedef struct VdbeOpList VdbeOpList; /* 152 */ 0x00, 0x10, 0x00, 0x00, 0x06, 0x10, 0x00, 0x04,\ /* 160 */ 0x1a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\ /* 168 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x10, 0x50,\ -/* 176 */ 0x40, 0x00, 0x10, 0x10, 0x02, 0x00, 0x00, 0x00,\ -/* 184 */ 0x00, 0x00, 0x00, 0x00,} +/* 176 */ 0x40, 0x00, 0x10, 0x10, 0x02, 0x12, 0x12, 0x00,\ +/* 184 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,} /* The resolve3P2Values() routine is able to run faster if it knows ** the value of the largest JUMP opcode. The smaller the maximum @@ -17812,14 +17926,15 @@ struct FuncDestructor { #define SQLITE_FUNC_SLOCHNG 0x2000 /* "Slow Change". Value constant during a ** single query - might change over time */ #define SQLITE_FUNC_TEST 0x4000 /* Built-in testing functions */ -/* 0x8000 -- available for reuse */ +#define SQLITE_FUNC_RUNONLY 0x8000 /* Cannot be used by valueFromFunction */ #define SQLITE_FUNC_WINDOW 0x00010000 /* Built-in window-only function */ #define SQLITE_FUNC_INTERNAL 0x00040000 /* For use by NestedParse() only */ #define SQLITE_FUNC_DIRECT 0x00080000 /* Not for use in TRIGGERs or VIEWs */ -#define SQLITE_FUNC_SUBTYPE 0x00100000 /* Result likely to have sub-type */ +/* SQLITE_SUBTYPE 0x00100000 // Consumer of subtypes */ #define SQLITE_FUNC_UNSAFE 0x00200000 /* Function has side effects */ #define SQLITE_FUNC_INLINE 0x00400000 /* Functions implemented in-line */ #define SQLITE_FUNC_BUILTIN 0x00800000 /* This is a built-in function */ +/* SQLITE_RESULT_SUBTYPE 0x01000000 // Generator of subtypes */ #define SQLITE_FUNC_ANYORDER 0x08000000 /* count/min/max aggregate */ /* Identifier numbers for each in-line function */ @@ -17911,10 +18026,11 @@ struct FuncDestructor { #define MFUNCTION(zName, nArg, xPtr, xFunc) \ {nArg, SQLITE_FUNC_BUILTIN|SQLITE_FUNC_CONSTANT|SQLITE_UTF8, \ xPtr, 0, xFunc, 0, 0, 0, #zName, {0} } -#define JFUNCTION(zName, nArg, iArg, xFunc) \ - {nArg, SQLITE_FUNC_BUILTIN|SQLITE_DETERMINISTIC|\ - SQLITE_FUNC_CONSTANT|SQLITE_UTF8, \ - SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, 0, 0, #zName, {0} } +#define JFUNCTION(zName, nArg, bUseCache, bWS, bRS, bJsonB, iArg, xFunc) \ + {nArg, SQLITE_FUNC_BUILTIN|SQLITE_DETERMINISTIC|SQLITE_FUNC_CONSTANT|\ + SQLITE_UTF8|((bUseCache)*SQLITE_FUNC_RUNONLY)|\ + ((bRS)*SQLITE_SUBTYPE)|((bWS)*SQLITE_RESULT_SUBTYPE), \ + SQLITE_INT_TO_PTR(iArg|((bJsonB)*JSON_BLOB)),0,xFunc,0, 0, 0, #zName, {0} } #define INLINE_FUNC(zName, nArg, iArg, mFlags) \ {nArg, SQLITE_FUNC_BUILTIN|\ SQLITE_UTF8|SQLITE_FUNC_INLINE|SQLITE_FUNC_CONSTANT|(mFlags), \ @@ -18549,6 +18665,7 @@ struct Index { unsigned isCovering:1; /* True if this is a covering index */ unsigned noSkipScan:1; /* Do not try to use skip-scan if true */ unsigned hasStat1:1; /* aiRowLogEst values come from sqlite_stat1 */ + unsigned bLowQual:1; /* sqlite_stat1 says this is a low-quality index */ unsigned bNoQuery:1; /* Do not use this index to optimize queries */ unsigned bAscKeyBug:1; /* True if the bba7b69f9849b5bf bug applies */ unsigned bHasVCol:1; /* Index references one or more VIRTUAL columns */ @@ -18662,6 +18779,7 @@ struct AggInfo { int iOBTab; /* Ephemeral table to implement ORDER BY */ u8 bOBPayload; /* iOBTab has payload columns separate from key */ u8 bOBUnique; /* Enforce uniqueness on iOBTab keys */ + u8 bUseSubtype; /* Transfer subtype info through sorter */ } *aFunc; int nFunc; /* Number of entries in aFunc[] */ u32 selId; /* Select to which this AggInfo belongs */ @@ -19195,6 +19313,7 @@ struct NameContext { int nRef; /* Number of names resolved by this context */ int nNcErr; /* Number of errors encountered while resolving names */ int ncFlags; /* Zero or more NC_* flags defined below */ + u32 nNestedSelect; /* Number of nested selects using this NC */ Select *pWinSelect; /* SELECT statement for any window functions */ }; @@ -19911,6 +20030,9 @@ struct sqlite3_str { ** ** 3. Make a (read-only) copy of a read-only RCStr string using ** sqlite3RCStrRef(). +** +** "String" is in the name, but an RCStr object can also be used to hold +** binary data. */ struct RCStr { u64 nRCRef; /* Number of references */ @@ -19969,6 +20091,9 @@ struct Sqlite3Config { u8 bSmallMalloc; /* Avoid large memory allocations if true */ u8 bExtraSchemaChecks; /* Verify type,name,tbl_name in schema */ u8 bUseLongDouble; /* Make use of long double */ +#ifdef SQLITE_DEBUG + u8 bJsonSelfcheck; /* Double-check JSON parsing */ +#endif int mxStrlen; /* Maximum string length */ int neverCorrupt; /* Database is always well-formed */ int szLookaside; /* Default lookaside buffer size */ @@ -20595,6 +20720,7 @@ SQLITE_PRIVATE void sqlite3ExprOrderByAggregateError(Parse*,Expr*); SQLITE_PRIVATE void sqlite3ExprFunctionUsable(Parse*,const Expr*,const FuncDef*); SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse*, Expr*, u32); SQLITE_PRIVATE void sqlite3ExprDelete(sqlite3*, Expr*); +SQLITE_PRIVATE void sqlite3ExprDeleteGeneric(sqlite3*,void*); SQLITE_PRIVATE void sqlite3ExprDeferredDelete(Parse*, Expr*); SQLITE_PRIVATE void sqlite3ExprUnmapAndDelete(Parse*, Expr*); SQLITE_PRIVATE ExprList *sqlite3ExprListAppend(Parse*,ExprList*,Expr*); @@ -20604,6 +20730,7 @@ SQLITE_PRIVATE void sqlite3ExprListSetSortOrder(ExprList*,int,int); SQLITE_PRIVATE void sqlite3ExprListSetName(Parse*,ExprList*,const Token*,int); SQLITE_PRIVATE void sqlite3ExprListSetSpan(Parse*,ExprList*,const char*,const char*); SQLITE_PRIVATE void sqlite3ExprListDelete(sqlite3*, ExprList*); +SQLITE_PRIVATE void sqlite3ExprListDeleteGeneric(sqlite3*,void*); SQLITE_PRIVATE u32 sqlite3ExprListFlags(const ExprList*); SQLITE_PRIVATE int sqlite3IndexHasDuplicateRootPage(Index*); SQLITE_PRIVATE int sqlite3Init(sqlite3*, char**); @@ -20694,6 +20821,7 @@ SQLITE_PRIVATE int sqlite3DbMaskAllZero(yDbMask); SQLITE_PRIVATE void sqlite3DropTable(Parse*, SrcList*, int, int); SQLITE_PRIVATE void sqlite3CodeDropTable(Parse*, Table*, int, int); SQLITE_PRIVATE void sqlite3DeleteTable(sqlite3*, Table*); +SQLITE_PRIVATE void sqlite3DeleteTableGeneric(sqlite3*, void*); SQLITE_PRIVATE void sqlite3FreeIndex(sqlite3*, Index*); #ifndef SQLITE_OMIT_AUTOINCREMENT SQLITE_PRIVATE void sqlite3AutoincrementBegin(Parse *pParse); @@ -20730,6 +20858,7 @@ SQLITE_PRIVATE int sqlite3Select(Parse*, Select*, SelectDest*); SQLITE_PRIVATE Select *sqlite3SelectNew(Parse*,ExprList*,SrcList*,Expr*,ExprList*, Expr*,ExprList*,u32,Expr*); SQLITE_PRIVATE void sqlite3SelectDelete(sqlite3*, Select*); +SQLITE_PRIVATE void sqlite3SelectDeleteGeneric(sqlite3*,void*); SQLITE_PRIVATE Table *sqlite3SrcListLookup(Parse*, SrcList*); SQLITE_PRIVATE int sqlite3IsReadOnly(Parse*, Table*, Trigger*); SQLITE_PRIVATE void sqlite3OpenTable(Parse*, int iCur, int iDb, Table*, int); @@ -20956,6 +21085,7 @@ SQLITE_PRIVATE int sqlite3Utf16ByteLen(const void *pData, int nChar); #endif SQLITE_PRIVATE int sqlite3Utf8CharLen(const char *pData, int nByte); SQLITE_PRIVATE u32 sqlite3Utf8Read(const u8**); +SQLITE_PRIVATE int sqlite3Utf8ReadLimited(const u8*, int, u32*); SQLITE_PRIVATE LogEst sqlite3LogEst(u64); SQLITE_PRIVATE LogEst sqlite3LogEstAdd(LogEst,LogEst); SQLITE_PRIVATE LogEst sqlite3LogEstFromDouble(double); @@ -21302,6 +21432,7 @@ SQLITE_PRIVATE Cte *sqlite3CteNew(Parse*,Token*,ExprList*,Select*,u8); SQLITE_PRIVATE void sqlite3CteDelete(sqlite3*,Cte*); SQLITE_PRIVATE With *sqlite3WithAdd(Parse*,With*,Cte*); SQLITE_PRIVATE void sqlite3WithDelete(sqlite3*,With*); +SQLITE_PRIVATE void sqlite3WithDeleteGeneric(sqlite3*,void*); SQLITE_PRIVATE With *sqlite3WithPush(Parse*, With*, u8); #else # define sqlite3CteNew(P,T,E,S) ((void*)0) @@ -22679,6 +22810,9 @@ SQLITE_PRIVATE SQLITE_WSD struct Sqlite3Config sqlite3Config = { 0, /* bSmallMalloc */ 1, /* bExtraSchemaChecks */ sizeof(LONGDOUBLE_TYPE)>8, /* bUseLongDouble */ +#ifdef SQLITE_DEBUG + 0, /* bJsonSelfcheck */ +#endif 0x7ffffffe, /* mxStrlen */ 0, /* neverCorrupt */ SQLITE_DEFAULT_LOOKASIDE, /* szLookaside, nLookaside */ @@ -23931,7 +24065,7 @@ SQLITE_API int sqlite3_db_status( case SQLITE_DBSTATUS_CACHE_MISS: case SQLITE_DBSTATUS_CACHE_WRITE:{ int i; - int nRet = 0; + u64 nRet = 0; assert( SQLITE_DBSTATUS_CACHE_MISS==SQLITE_DBSTATUS_CACHE_HIT+1 ); assert( SQLITE_DBSTATUS_CACHE_WRITE==SQLITE_DBSTATUS_CACHE_HIT+2 ); @@ -23944,7 +24078,7 @@ SQLITE_API int sqlite3_db_status( *pHighwater = 0; /* IMP: R-42420-56072 */ /* IMP: R-54100-20147 */ /* IMP: R-29431-39229 */ - *pCurrent = nRet; + *pCurrent = (int)nRet & 0x7fffffff; break; } @@ -25013,6 +25147,12 @@ static int isDate( } computeJD(p); if( p->isError || !validJulianDay(p->iJD) ) return 1; + if( argc==1 && p->validYMD && p->D>28 ){ + /* Make sure a YYYY-MM-DD is normalized. + ** Example: 2023-02-31 -> 2023-03-03 */ + assert( p->validJD ); + p->validYMD = 0; + } return 0; } @@ -29454,7 +29594,7 @@ SQLITE_PRIVATE void sqlite3MemoryBarrier(void){ SQLITE_MEMORY_BARRIER; #elif defined(__GNUC__) __sync_synchronize(); -#elif MSVC_VERSION>=1300 +#elif MSVC_VERSION>=1400 _ReadWriteBarrier(); #elif defined(MemoryBarrier) MemoryBarrier(); @@ -32041,7 +32181,7 @@ SQLITE_API void sqlite3_str_appendf(StrAccum *p, const char *zFormat, ...){ /***************************************************************************** -** Reference counted string storage +** Reference counted string/blob storage *****************************************************************************/ /* @@ -32893,7 +33033,7 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m assert( pExpr->x.pList->nExpr==2 ); pY = pExpr->x.pList->a[0].pExpr; pZ = pExpr->x.pList->a[1].pExpr; - sqlite3TreeViewLine(pView, "BETWEEN"); + sqlite3TreeViewLine(pView, "BETWEEN%s", zFlgs); sqlite3TreeViewExpr(pView, pX, 1); sqlite3TreeViewExpr(pView, pY, 1); sqlite3TreeViewExpr(pView, pZ, 0); @@ -34028,7 +34168,38 @@ SQLITE_PRIVATE u32 sqlite3Utf8Read( return c; } - +/* +** Read a single UTF8 character out of buffer z[], but reading no +** more than n characters from the buffer. z[] is not zero-terminated. +** +** Return the number of bytes used to construct the character. +** +** Invalid UTF8 might generate a strange result. No effort is made +** to detect invalid UTF8. +** +** At most 4 bytes will be read out of z[]. The return value will always +** be between 1 and 4. +*/ +SQLITE_PRIVATE int sqlite3Utf8ReadLimited( + const u8 *z, + int n, + u32 *piOut +){ + u32 c; + int i = 1; + assert( n>0 ); + c = z[0]; + if( c>=0xc0 ){ + c = sqlite3Utf8Trans1[c-0xc0]; + if( n>4 ) n = 4; + while( iiBusyTimeout; +#if SQLITE_ENABLE_SETLK_TIMEOUT==1 pFile->iBusyTimeout = *(int*)pArg; +#elif SQLITE_ENABLE_SETLK_TIMEOUT==2 + pFile->iBusyTimeout = !!(*(int*)pArg); +#else +# error "SQLITE_ENABLE_SETLK_TIMEOUT must be set to 1 or 2" +#endif *(int*)pArg = iOld; return SQLITE_OK; } @@ -42102,6 +42281,25 @@ static int unixGetpagesize(void){ ** Either unixShmNode.pShmMutex must be held or unixShmNode.nRef==0 and ** unixMutexHeld() is true when reading or writing any other field ** in this structure. +** +** aLock[SQLITE_SHM_NLOCK]: +** This array records the various locks held by clients on each of the +** SQLITE_SHM_NLOCK slots. If the aLock[] entry is set to 0, then no +** locks are held by the process on this slot. If it is set to -1, then +** some client holds an EXCLUSIVE lock on the locking slot. If the aLock[] +** value is set to a positive value, then it is the number of shared +** locks currently held on the slot. +** +** aMutex[SQLITE_SHM_NLOCK]: +** Normally, when SQLITE_ENABLE_SETLK_TIMEOUT is not defined, mutex +** pShmMutex is used to protect the aLock[] array and the right to +** call fcntl() on unixShmNode.hShm to obtain or release locks. +** +** If SQLITE_ENABLE_SETLK_TIMEOUT is defined though, we use an array +** of mutexes - one for each locking slot. To read or write locking +** slot aLock[iSlot], the caller must hold the corresponding mutex +** aMutex[iSlot]. Similarly, to call fcntl() to obtain or release a +** lock corresponding to slot iSlot, mutex aMutex[iSlot] must be held. */ struct unixShmNode { unixInodeInfo *pInode; /* unixInodeInfo that owns this SHM node */ @@ -42115,10 +42313,11 @@ struct unixShmNode { char **apRegion; /* Array of mapped shared-memory regions */ int nRef; /* Number of unixShm objects pointing to this */ unixShm *pFirst; /* All unixShm objects pointing to this */ +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + sqlite3_mutex *aMutex[SQLITE_SHM_NLOCK]; +#endif int aLock[SQLITE_SHM_NLOCK]; /* # shared locks on slot, -1==excl lock */ #ifdef SQLITE_DEBUG - u8 exclMask; /* Mask of exclusive locks held */ - u8 sharedMask; /* Mask of shared locks held */ u8 nextShmId; /* Next available unixShm.id value */ #endif }; @@ -42201,16 +42400,35 @@ static int unixShmSystemLock( struct flock f; /* The posix advisory locking structure */ int rc = SQLITE_OK; /* Result code form fcntl() */ - /* Access to the unixShmNode object is serialized by the caller */ pShmNode = pFile->pInode->pShmNode; - assert( pShmNode->nRef==0 || sqlite3_mutex_held(pShmNode->pShmMutex) ); - assert( pShmNode->nRef>0 || unixMutexHeld() ); + + /* Assert that the parameters are within expected range and that the + ** correct mutex or mutexes are held. */ + assert( pShmNode->nRef>=0 ); + assert( (ofst==UNIX_SHM_DMS && n==1) + || (ofst>=UNIX_SHM_BASE && ofst+n<=(UNIX_SHM_BASE+SQLITE_SHM_NLOCK)) + ); + if( ofst==UNIX_SHM_DMS ){ + assert( pShmNode->nRef>0 || unixMutexHeld() ); + assert( pShmNode->nRef==0 || sqlite3_mutex_held(pShmNode->pShmMutex) ); + }else{ +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + int ii; + for(ii=ofst-UNIX_SHM_BASE; iiaMutex[ii]) ); + } +#else + assert( sqlite3_mutex_held(pShmNode->pShmMutex) ); + assert( pShmNode->nRef>0 ); +#endif + } /* Shared locks never span more than one byte */ assert( n==1 || lockType!=F_RDLCK ); /* Locks are within range */ assert( n>=1 && n<=SQLITE_SHM_NLOCK ); + assert( ofst>=UNIX_SHM_BASE && ofst<=(UNIX_SHM_DMS+SQLITE_SHM_NLOCK) ); if( pShmNode->hShm>=0 ){ int res; @@ -42221,7 +42439,7 @@ static int unixShmSystemLock( f.l_len = n; res = osSetPosixAdvisoryLock(pShmNode->hShm, &f, pFile); if( res==-1 ){ -#ifdef SQLITE_ENABLE_SETLK_TIMEOUT +#if defined(SQLITE_ENABLE_SETLK_TIMEOUT) && SQLITE_ENABLE_SETLK_TIMEOUT==1 rc = (pFile->iBusyTimeout ? SQLITE_BUSY_TIMEOUT : SQLITE_BUSY); #else rc = SQLITE_BUSY; @@ -42229,39 +42447,28 @@ static int unixShmSystemLock( } } - /* Update the global lock state and do debug tracing */ + /* Do debug tracing */ #ifdef SQLITE_DEBUG - { u16 mask; OSTRACE(("SHM-LOCK ")); - mask = ofst>31 ? 0xffff : (1<<(ofst+n)) - (1<exclMask &= ~mask; - pShmNode->sharedMask &= ~mask; + OSTRACE(("unlock %d..%d ok\n", ofst, ofst+n-1)); }else if( lockType==F_RDLCK ){ - OSTRACE(("read-lock %d ok", ofst)); - pShmNode->exclMask &= ~mask; - pShmNode->sharedMask |= mask; + OSTRACE(("read-lock %d..%d ok\n", ofst, ofst+n-1)); }else{ assert( lockType==F_WRLCK ); - OSTRACE(("write-lock %d ok", ofst)); - pShmNode->exclMask |= mask; - pShmNode->sharedMask &= ~mask; + OSTRACE(("write-lock %d..%d ok\n", ofst, ofst+n-1)); } }else{ if( lockType==F_UNLCK ){ - OSTRACE(("unlock %d failed", ofst)); + OSTRACE(("unlock %d..%d failed\n", ofst, ofst+n-1)); }else if( lockType==F_RDLCK ){ - OSTRACE(("read-lock failed")); + OSTRACE(("read-lock %d..%d failed\n", ofst, ofst+n-1)); }else{ assert( lockType==F_WRLCK ); - OSTRACE(("write-lock %d failed", ofst)); + OSTRACE(("write-lock %d..%d failed\n", ofst, ofst+n-1)); } } - OSTRACE((" - afterwards %03x,%03x\n", - pShmNode->sharedMask, pShmNode->exclMask)); - } #endif return rc; @@ -42298,6 +42505,11 @@ static void unixShmPurge(unixFile *pFd){ int i; assert( p->pInode==pFd->pInode ); sqlite3_mutex_free(p->pShmMutex); +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + for(i=0; iaMutex[i]); + } +#endif for(i=0; inRegion; i+=nShmPerMap){ if( p->hShm>=0 ){ osMunmap(p->apRegion[i], p->szRegion); @@ -42357,7 +42569,20 @@ static int unixLockSharedMemory(unixFile *pDbFd, unixShmNode *pShmNode){ pShmNode->isUnlocked = 1; rc = SQLITE_READONLY_CANTINIT; }else{ +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + /* Do not use a blocking lock here. If the lock cannot be obtained + ** immediately, it means some other connection is truncating the + ** *-shm file. And after it has done so, it will not release its + ** lock, but only downgrade it to a shared lock. So no point in + ** blocking here. The call below to obtain the shared DMS lock may + ** use a blocking lock. */ + int iSaveTimeout = pDbFd->iBusyTimeout; + pDbFd->iBusyTimeout = 0; +#endif rc = unixShmSystemLock(pDbFd, F_WRLCK, UNIX_SHM_DMS, 1); +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + pDbFd->iBusyTimeout = iSaveTimeout; +#endif /* The first connection to attach must truncate the -shm file. We ** truncate to 3 bytes (an arbitrary small number, less than the ** -shm header size) rather than 0 as a system debugging aid, to @@ -42478,6 +42703,18 @@ static int unixOpenSharedMemory(unixFile *pDbFd){ rc = SQLITE_NOMEM_BKPT; goto shm_open_err; } +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + { + int ii; + for(ii=0; iiaMutex[ii] = sqlite3_mutex_alloc(SQLITE_MUTEX_FAST); + if( pShmNode->aMutex[ii]==0 ){ + rc = SQLITE_NOMEM_BKPT; + goto shm_open_err; + } + } + } +#endif } if( pInode->bProcessLock==0 ){ @@ -42699,9 +42936,11 @@ static int unixShmMap( */ #ifdef SQLITE_DEBUG static int assertLockingArrayOk(unixShmNode *pShmNode){ +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + return 1; +#else unixShm *pX; int aLock[SQLITE_SHM_NLOCK]; - assert( sqlite3_mutex_held(pShmNode->pShmMutex) ); memset(aLock, 0, sizeof(aLock)); for(pX=pShmNode->pFirst; pX; pX=pX->pNext){ @@ -42719,13 +42958,14 @@ static int assertLockingArrayOk(unixShmNode *pShmNode){ assert( 0==memcmp(pShmNode->aLock, aLock, sizeof(aLock)) ); return (memcmp(pShmNode->aLock, aLock, sizeof(aLock))==0); +#endif } #endif /* ** Change the lock state for a shared-memory segment. ** -** Note that the relationship between SHAREd and EXCLUSIVE locks is a little +** Note that the relationship between SHARED and EXCLUSIVE locks is a little ** different here than in posix. In xShmLock(), one can go from unlocked ** to shared and back or from unlocked to exclusive and back. But one may ** not go from shared to exclusive or from exclusive to shared. @@ -42740,7 +42980,7 @@ static int unixShmLock( unixShm *p; /* The shared memory being locked */ unixShmNode *pShmNode; /* The underlying file iNode */ int rc = SQLITE_OK; /* Result code */ - u16 mask; /* Mask of locks to take or release */ + u16 mask = (1<<(ofst+n)) - (1<pShm; @@ -42775,88 +43015,151 @@ static int unixShmLock( ** It is not permitted to block on the RECOVER lock. */ #ifdef SQLITE_ENABLE_SETLK_TIMEOUT - assert( (flags & SQLITE_SHM_UNLOCK) || pDbFd->iBusyTimeout==0 || ( - (ofst!=2) /* not RECOVER */ - && (ofst!=1 || (p->exclMask|p->sharedMask)==0) - && (ofst!=0 || (p->exclMask|p->sharedMask)<3) - && (ofst<3 || (p->exclMask|p->sharedMask)<(1<exclMask|p->sharedMask); + assert( (flags & SQLITE_SHM_UNLOCK) || pDbFd->iBusyTimeout==0 || ( + (ofst!=2) /* not RECOVER */ + && (ofst!=1 || lockMask==0 || lockMask==2) + && (ofst!=0 || lockMask<3) + && (ofst<3 || lockMask<(1<1 || mask==(1<pShmMutex); - assert( assertLockingArrayOk(pShmNode) ); - if( flags & SQLITE_SHM_UNLOCK ){ - if( (p->exclMask|p->sharedMask) & mask ){ - int ii; - int bUnlock = 1; + /* Check if there is any work to do. There are three cases: + ** + ** a) An unlock operation where there are locks to unlock, + ** b) An shared lock where the requested lock is not already held + ** c) An exclusive lock where the requested lock is not already held + ** + ** The SQLite core never requests an exclusive lock that it already holds. + ** This is assert()ed below. + */ + assert( flags!=(SQLITE_SHM_EXCLUSIVE|SQLITE_SHM_LOCK) + || 0==(p->exclMask & mask) + ); + if( ((flags & SQLITE_SHM_UNLOCK) && ((p->exclMask|p->sharedMask) & mask)) + || (flags==(SQLITE_SHM_SHARED|SQLITE_SHM_LOCK) && 0==(p->sharedMask & mask)) + || (flags==(SQLITE_SHM_EXCLUSIVE|SQLITE_SHM_LOCK)) + ){ - for(ii=ofst; ii((p->sharedMask & (1<aMutex[iMutex]); + if( rc!=SQLITE_OK ) goto leave_shmnode_mutexes; + }else{ + sqlite3_mutex_enter(pShmNode->aMutex[iMutex]); } + } +#else + sqlite3_mutex_enter(pShmNode->pShmMutex); +#endif - if( bUnlock ){ - rc = unixShmSystemLock(pDbFd, F_UNLCK, ofst+UNIX_SHM_BASE, n); - if( rc==SQLITE_OK ){ - memset(&aLock[ofst], 0, sizeof(int)*n); + if( ALWAYS(rc==SQLITE_OK) ){ + if( flags & SQLITE_SHM_UNLOCK ){ + /* Case (a) - unlock. */ + int bUnlock = 1; + assert( (p->exclMask & p->sharedMask)==0 ); + assert( !(flags & SQLITE_SHM_EXCLUSIVE) || (p->exclMask & mask)==mask ); + assert( !(flags & SQLITE_SHM_SHARED) || (p->sharedMask & mask)==mask ); + + /* If this is a SHARED lock being unlocked, it is possible that other + ** clients within this process are holding the same SHARED lock. In + ** this case, set bUnlock to 0 so that the posix lock is not removed + ** from the file-descriptor below. */ + if( flags & SQLITE_SHM_SHARED ){ + assert( n==1 ); + assert( aLock[ofst]>=1 ); + if( aLock[ofst]>1 ){ + bUnlock = 0; + aLock[ofst]--; + p->sharedMask &= ~mask; + } } - }else if( ALWAYS(p->sharedMask & (1<1 ); - aLock[ofst]--; - } - /* Undo the local locks */ - if( rc==SQLITE_OK ){ - p->exclMask &= ~mask; - p->sharedMask &= ~mask; - } - } - }else if( flags & SQLITE_SHM_SHARED ){ - assert( n==1 ); - assert( (p->exclMask & (1<sharedMask & mask)==0 ){ - if( aLock[ofst]<0 ){ - rc = SQLITE_BUSY; - }else if( aLock[ofst]==0 ){ - rc = unixShmSystemLock(pDbFd, F_RDLCK, ofst+UNIX_SHM_BASE, n); - } + if( bUnlock ){ + rc = unixShmSystemLock(pDbFd, F_UNLCK, ofst+UNIX_SHM_BASE, n); + if( rc==SQLITE_OK ){ + memset(&aLock[ofst], 0, sizeof(int)*n); + p->sharedMask &= ~mask; + p->exclMask &= ~mask; + } + } + }else if( flags & SQLITE_SHM_SHARED ){ + /* Case (b) - a shared lock. */ - /* Get the local shared locks */ - if( rc==SQLITE_OK ){ - p->sharedMask |= mask; - aLock[ofst]++; - } - } - }else{ - /* Make sure no sibling connections hold locks that will block this - ** lock. If any do, return SQLITE_BUSY right away. */ - int ii; - for(ii=ofst; iisharedMask & mask)==0 ); - if( ALWAYS((p->exclMask & (1<sharedMask |= mask; + aLock[ofst]++; + } + }else{ + /* Case (c) - an exclusive lock. */ + int ii; + + assert( flags==(SQLITE_SHM_LOCK|SQLITE_SHM_EXCLUSIVE) ); assert( (p->sharedMask & mask)==0 ); - p->exclMask |= mask; + assert( (p->exclMask & mask)==0 ); + + /* Make sure no sibling connections hold locks that will block this + ** lock. If any do, return SQLITE_BUSY right away. */ for(ii=ofst; iiexclMask |= mask; + for(ii=ofst; ii=ofst; iMutex--){ + sqlite3_mutex_leave(pShmNode->aMutex[iMutex]); + } +#else + sqlite3_mutex_leave(pShmNode->pShmMutex); +#endif } - assert( assertLockingArrayOk(pShmNode) ); - sqlite3_mutex_leave(pShmNode->pShmMutex); + OSTRACE(("SHM-LOCK shmid-%d, pid-%d got %03x,%03x\n", p->id, osGetpid(0), p->sharedMask, p->exclMask)); return rc; @@ -43106,11 +43409,16 @@ static int unixFetch(sqlite3_file *fd, i64 iOff, int nAmt, void **pp){ #if SQLITE_MAX_MMAP_SIZE>0 if( pFd->mmapSizeMax>0 ){ + /* Ensure that there is always at least a 256 byte buffer of addressable + ** memory following the returned page. If the database is corrupt, + ** SQLite may overread the page slightly (in practice only a few bytes, + ** but 256 is safe, round, number). */ + const int nEofBuffer = 256; if( pFd->pMapRegion==0 ){ int rc = unixMapfile(pFd, -1); if( rc!=SQLITE_OK ) return rc; } - if( pFd->mmapSize >= iOff+nAmt ){ + if( pFd->mmapSize >= (iOff+nAmt+nEofBuffer) ){ *pp = &((u8 *)pFd->pMapRegion)[iOff]; pFd->nFetchOut++; } @@ -50463,6 +50771,11 @@ static int winFetch(sqlite3_file *fd, i64 iOff, int nAmt, void **pp){ #if SQLITE_MAX_MMAP_SIZE>0 if( pFd->mmapSizeMax>0 ){ + /* Ensure that there is always at least a 256 byte buffer of addressable + ** memory following the returned page. If the database is corrupt, + ** SQLite may overread the page slightly (in practice only a few bytes, + ** but 256 is safe, round, number). */ + const int nEofBuffer = 256; if( pFd->pMapRegion==0 ){ int rc = winMapfile(pFd, -1); if( rc!=SQLITE_OK ){ @@ -50471,7 +50784,7 @@ static int winFetch(sqlite3_file *fd, i64 iOff, int nAmt, void **pp){ return rc; } } - if( pFd->mmapSize >= iOff+nAmt ){ + if( pFd->mmapSize >= (iOff+nAmt+nEofBuffer) ){ assert( pFd->pMapRegion!=0 ); *pp = &((u8 *)pFd->pMapRegion)[iOff]; pFd->nFetchOut++; @@ -57074,7 +57387,7 @@ struct Pager { char *zJournal; /* Name of the journal file */ int (*xBusyHandler)(void*); /* Function to call when busy */ void *pBusyHandlerArg; /* Context argument for xBusyHandler */ - int aStat[4]; /* Total cache hits, misses, writes, spills */ + u32 aStat[4]; /* Total cache hits, misses, writes, spills */ #ifdef SQLITE_TEST int nRead; /* Database pages read */ #endif @@ -57204,9 +57517,8 @@ SQLITE_PRIVATE int sqlite3PagerDirectReadOk(Pager *pPager, Pgno pgno){ #ifndef SQLITE_OMIT_WAL if( pPager->pWal ){ u32 iRead = 0; - int rc; - rc = sqlite3WalFindFrame(pPager->pWal, pgno, &iRead); - return (rc==SQLITE_OK && iRead==0); + (void)sqlite3WalFindFrame(pPager->pWal, pgno, &iRead); + return iRead==0; } #endif return 1; @@ -61448,10 +61760,13 @@ SQLITE_PRIVATE int sqlite3PagerOpen( */ SQLITE_API sqlite3_file *sqlite3_database_file_object(const char *zName){ Pager *pPager; + const char *p; while( zName[-1]!=0 || zName[-2]!=0 || zName[-3]!=0 || zName[-4]!=0 ){ zName--; } - pPager = *(Pager**)(zName - 4 - sizeof(Pager*)); + p = zName - 4 - sizeof(Pager*); + assert( EIGHT_BYTE_ALIGNMENT(p) ); + pPager = *(Pager**)p; return pPager->fd; } @@ -63215,11 +63530,11 @@ SQLITE_PRIVATE int *sqlite3PagerStats(Pager *pPager){ a[3] = pPager->eState==PAGER_OPEN ? -1 : (int) pPager->dbSize; a[4] = pPager->eState; a[5] = pPager->errCode; - a[6] = pPager->aStat[PAGER_STAT_HIT]; - a[7] = pPager->aStat[PAGER_STAT_MISS]; + a[6] = (int)pPager->aStat[PAGER_STAT_HIT] & 0x7fffffff; + a[7] = (int)pPager->aStat[PAGER_STAT_MISS] & 0x7fffffff; a[8] = 0; /* Used to be pPager->nOvfl */ a[9] = pPager->nRead; - a[10] = pPager->aStat[PAGER_STAT_WRITE]; + a[10] = (int)pPager->aStat[PAGER_STAT_WRITE] & 0x7fffffff; return a; } #endif @@ -63235,7 +63550,7 @@ SQLITE_PRIVATE int *sqlite3PagerStats(Pager *pPager){ ** reset parameter is non-zero, the cache hit or miss count is zeroed before ** returning. */ -SQLITE_PRIVATE void sqlite3PagerCacheStat(Pager *pPager, int eStat, int reset, int *pnVal){ +SQLITE_PRIVATE void sqlite3PagerCacheStat(Pager *pPager, int eStat, int reset, u64 *pnVal){ assert( eStat==SQLITE_DBSTATUS_CACHE_HIT || eStat==SQLITE_DBSTATUS_CACHE_MISS @@ -64175,7 +64490,7 @@ SQLITE_PRIVATE int sqlite3PagerWalFramesize(Pager *pPager){ } #endif -#ifdef SQLITE_USE_SEH +#if defined(SQLITE_USE_SEH) && !defined(SQLITE_OMIT_WAL) SQLITE_PRIVATE int sqlite3PagerWalSystemErrno(Pager *pPager){ return sqlite3WalSystemErrno(pPager->pWal); } @@ -66191,6 +66506,19 @@ static int walIteratorInit(Wal *pWal, u32 nBackfill, WalIterator **pp){ } #ifdef SQLITE_ENABLE_SETLK_TIMEOUT + + +/* +** Attempt to enable blocking locks that block for nMs ms. Return 1 if +** blocking locks are successfully enabled, or 0 otherwise. +*/ +static int walEnableBlockingMs(Wal *pWal, int nMs){ + int rc = sqlite3OsFileControl( + pWal->pDbFd, SQLITE_FCNTL_LOCK_TIMEOUT, (void*)&nMs + ); + return (rc==SQLITE_OK); +} + /* ** Attempt to enable blocking locks. Blocking locks are enabled only if (a) ** they are supported by the VFS, and (b) the database handle is configured @@ -66202,11 +66530,7 @@ static int walEnableBlocking(Wal *pWal){ if( pWal->db ){ int tmout = pWal->db->busyTimeout; if( tmout ){ - int rc; - rc = sqlite3OsFileControl( - pWal->pDbFd, SQLITE_FCNTL_LOCK_TIMEOUT, (void*)&tmout - ); - res = (rc==SQLITE_OK); + res = walEnableBlockingMs(pWal, tmout); } } return res; @@ -66255,20 +66579,10 @@ SQLITE_PRIVATE void sqlite3WalDb(Wal *pWal, sqlite3 *db){ pWal->db = db; } -/* -** Take an exclusive WRITE lock. Blocking if so configured. -*/ -static int walLockWriter(Wal *pWal){ - int rc; - walEnableBlocking(pWal); - rc = walLockExclusive(pWal, WAL_WRITE_LOCK, 1); - walDisableBlocking(pWal); - return rc; -} #else # define walEnableBlocking(x) 0 # define walDisableBlocking(x) -# define walLockWriter(pWal) walLockExclusive((pWal), WAL_WRITE_LOCK, 1) +# define walEnableBlockingMs(pWal, ms) 0 # define sqlite3WalDb(pWal, db) #endif /* ifdef SQLITE_ENABLE_SETLK_TIMEOUT */ @@ -66869,7 +67183,9 @@ static int walIndexReadHdr(Wal *pWal, int *pChanged){ } }else{ int bWriteLock = pWal->writeLock; - if( bWriteLock || SQLITE_OK==(rc = walLockWriter(pWal)) ){ + if( bWriteLock + || SQLITE_OK==(rc = walLockExclusive(pWal, WAL_WRITE_LOCK, 1)) + ){ pWal->writeLock = 1; if( SQLITE_OK==(rc = walIndexPage(pWal, 0, &page0)) ){ badHdr = walIndexTryHdr(pWal, pChanged); @@ -66877,7 +67193,8 @@ static int walIndexReadHdr(Wal *pWal, int *pChanged){ /* If the wal-index header is still malformed even while holding ** a WRITE lock, it can only mean that the header is corrupted and ** needs to be reconstructed. So run recovery to do exactly that. - */ + ** Disable blocking locks first. */ + walDisableBlocking(pWal); rc = walIndexRecover(pWal); *pChanged = 1; } @@ -67087,6 +67404,37 @@ static int walBeginShmUnreliable(Wal *pWal, int *pChanged){ return rc; } +/* +** The final argument passed to walTryBeginRead() is of type (int*). The +** caller should invoke walTryBeginRead as follows: +** +** int cnt = 0; +** do { +** rc = walTryBeginRead(..., &cnt); +** }while( rc==WAL_RETRY ); +** +** The final value of "cnt" is of no use to the caller. It is used by +** the implementation of walTryBeginRead() as follows: +** +** + Each time walTryBeginRead() is called, it is incremented. Once +** it reaches WAL_RETRY_PROTOCOL_LIMIT - indicating that walTryBeginRead() +** has many times been invoked and failed with WAL_RETRY - walTryBeginRead() +** returns SQLITE_PROTOCOL. +** +** + If SQLITE_ENABLE_SETLK_TIMEOUT is defined and walTryBeginRead() failed +** because a blocking lock timed out (SQLITE_BUSY_TIMEOUT from the OS +** layer), the WAL_RETRY_BLOCKED_MASK bit is set in "cnt". In this case +** the next invocation of walTryBeginRead() may omit an expected call to +** sqlite3OsSleep(). There has already been a delay when the previous call +** waited on a lock. +*/ +#define WAL_RETRY_PROTOCOL_LIMIT 100 +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT +# define WAL_RETRY_BLOCKED_MASK 0x10000000 +#else +# define WAL_RETRY_BLOCKED_MASK 0 +#endif + /* ** Attempt to start a read transaction. This might fail due to a race or ** other transient condition. When that happens, it returns WAL_RETRY to @@ -67137,13 +67485,16 @@ static int walBeginShmUnreliable(Wal *pWal, int *pChanged){ ** so it takes care to hold an exclusive lock on the corresponding ** WAL_READ_LOCK() while changing values. */ -static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){ +static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int *pCnt){ volatile WalCkptInfo *pInfo; /* Checkpoint information in wal-index */ u32 mxReadMark; /* Largest aReadMark[] value */ int mxI; /* Index of largest aReadMark[] value */ int i; /* Loop counter */ int rc = SQLITE_OK; /* Return code */ u32 mxFrame; /* Wal frame to lock to */ +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + int nBlockTmout = 0; +#endif assert( pWal->readLock<0 ); /* Not currently locked */ @@ -67167,14 +67518,34 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){ ** so that on the 100th (and last) RETRY we delay for 323 milliseconds. ** The total delay time before giving up is less than 10 seconds. */ - if( cnt>5 ){ + (*pCnt)++; + if( *pCnt>5 ){ int nDelay = 1; /* Pause time in microseconds */ - if( cnt>100 ){ + int cnt = (*pCnt & ~WAL_RETRY_BLOCKED_MASK); + if( cnt>WAL_RETRY_PROTOCOL_LIMIT ){ VVA_ONLY( pWal->lockError = 1; ) return SQLITE_PROTOCOL; } - if( cnt>=10 ) nDelay = (cnt-9)*(cnt-9)*39; + if( *pCnt>=10 ) nDelay = (cnt-9)*(cnt-9)*39; +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + /* In SQLITE_ENABLE_SETLK_TIMEOUT builds, configure the file-descriptor + ** to block for locks for approximately nDelay us. This affects three + ** locks: (a) the shared lock taken on the DMS slot in os_unix.c (if + ** using os_unix.c), (b) the WRITER lock taken in walIndexReadHdr() if the + ** first attempted read fails, and (c) the shared lock taken on the + ** read-mark. + ** + ** If the previous call failed due to an SQLITE_BUSY_TIMEOUT error, + ** then sleep for the minimum of 1us. The previous call already provided + ** an extra delay while it was blocking on the lock. + */ + nBlockTmout = (nDelay+998) / 1000; + if( !useWal && walEnableBlockingMs(pWal, nBlockTmout) ){ + if( *pCnt & WAL_RETRY_BLOCKED_MASK ) nDelay = 1; + } +#endif sqlite3OsSleep(pWal->pVfs, nDelay); + *pCnt &= ~WAL_RETRY_BLOCKED_MASK; } if( !useWal ){ @@ -67182,6 +67553,13 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){ if( pWal->bShmUnreliable==0 ){ rc = walIndexReadHdr(pWal, pChanged); } +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + walDisableBlocking(pWal); + if( rc==SQLITE_BUSY_TIMEOUT ){ + rc = SQLITE_BUSY; + *pCnt |= WAL_RETRY_BLOCKED_MASK; + } +#endif if( rc==SQLITE_BUSY ){ /* If there is not a recovery running in another thread or process ** then convert BUSY errors to WAL_RETRY. If recovery is known to @@ -67296,9 +67674,19 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){ return rc==SQLITE_BUSY ? WAL_RETRY : SQLITE_READONLY_CANTINIT; } + (void)walEnableBlockingMs(pWal, nBlockTmout); rc = walLockShared(pWal, WAL_READ_LOCK(mxI)); + walDisableBlocking(pWal); if( rc ){ - return rc==SQLITE_BUSY ? WAL_RETRY : rc; +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + if( rc==SQLITE_BUSY_TIMEOUT ){ + *pCnt |= WAL_RETRY_BLOCKED_MASK; + } +#else + assert( rc!=SQLITE_BUSY_TIMEOUT ); +#endif + assert( (rc&0xFF)!=SQLITE_BUSY||rc==SQLITE_BUSY||rc==SQLITE_BUSY_TIMEOUT ); + return (rc&0xFF)==SQLITE_BUSY ? WAL_RETRY : rc; } /* Now that the read-lock has been obtained, check that neither the ** value in the aReadMark[] array or the contents of the wal-index @@ -67486,7 +67874,7 @@ static int walBeginReadTransaction(Wal *pWal, int *pChanged){ #endif do{ - rc = walTryBeginRead(pWal, pChanged, 0, ++cnt); + rc = walTryBeginRead(pWal, pChanged, 0, &cnt); }while( rc==WAL_RETRY ); testcase( (rc&0xff)==SQLITE_BUSY ); testcase( (rc&0xff)==SQLITE_IOERR ); @@ -67667,6 +68055,7 @@ static int walFindFrame( iRead = iFrame; } if( (nCollide--)==0 ){ + *piRead = 0; return SQLITE_CORRUPT_BKPT; } iKey = walNextHash(iKey); @@ -67970,7 +68359,7 @@ static int walRestartLog(Wal *pWal){ cnt = 0; do{ int notUsed; - rc = walTryBeginRead(pWal, ¬Used, 1, ++cnt); + rc = walTryBeginRead(pWal, ¬Used, 1, &cnt); }while( rc==WAL_RETRY ); assert( (rc&0xff)!=SQLITE_BUSY ); /* BUSY not possible when useWal==1 */ testcase( (rc&0xff)==SQLITE_IOERR ); @@ -68391,10 +68780,9 @@ SQLITE_PRIVATE int sqlite3WalCheckpoint( if( pWal->readOnly ) return SQLITE_READONLY; WALTRACE(("WAL%p: checkpoint begins\n", pWal)); - /* Enable blocking locks, if possible. If blocking locks are successfully - ** enabled, set xBusy2=0 so that the busy-handler is never invoked. */ + /* Enable blocking locks, if possible. */ sqlite3WalDb(pWal, db); - (void)walEnableBlocking(pWal); + if( xBusy2 ) (void)walEnableBlocking(pWal); /* IMPLEMENTATION-OF: R-62028-47212 All calls obtain an exclusive ** "checkpoint" lock on the database file. @@ -68435,9 +68823,14 @@ SQLITE_PRIVATE int sqlite3WalCheckpoint( /* Read the wal-index header. */ SEH_TRY { if( rc==SQLITE_OK ){ + /* For a passive checkpoint, do not re-enable blocking locks after + ** reading the wal-index header. A passive checkpoint should not block + ** or invoke the busy handler. The only lock such a checkpoint may + ** attempt to obtain is a lock on a read-slot, and it should give up + ** immediately and do a partial checkpoint if it cannot obtain it. */ walDisableBlocking(pWal); rc = walIndexReadHdr(pWal, &isChanged); - (void)walEnableBlocking(pWal); + if( eMode2!=SQLITE_CHECKPOINT_PASSIVE ) (void)walEnableBlocking(pWal); if( isChanged && pWal->pDbFd->pMethods->iVersion>=3 ){ sqlite3OsUnfetch(pWal->pDbFd, 0, 0); } @@ -68774,7 +69167,7 @@ SQLITE_PRIVATE sqlite3_file *sqlite3WalFile(Wal *pWal){ ** 22 1 Min embedded payload fraction (must be 32) ** 23 1 Min leaf payload fraction (must be 32) ** 24 4 File change counter -** 28 4 Reserved for future use +** 28 4 The size of the database in pages ** 32 4 First freelist page ** 36 4 Number of freelist pages in the file ** 40 60 15 4-byte meta values passed to higher layers @@ -74901,7 +75294,6 @@ static int accessPayload( assert( aWrite>=pBufStart ); /* due to (6) */ memcpy(aSave, aWrite, 4); rc = sqlite3OsRead(fd, aWrite, a+4, (i64)pBt->pageSize*(nextPage-1)); - if( rc && nextPage>pBt->nPage ) rc = SQLITE_CORRUPT_BKPT; nextPage = get4byte(aWrite); memcpy(aWrite, aSave, 4); }else @@ -76021,7 +76413,10 @@ static SQLITE_NOINLINE int btreePrevious(BtCursor *pCur){ } pPage = pCur->pPage; - assert( pPage->isInit ); + if( sqlite3FaultSim(412) ) pPage->isInit = 0; + if( !pPage->isInit ){ + return SQLITE_CORRUPT_BKPT; + } if( !pPage->leaf ){ int idx = pCur->ix; rc = moveToChild(pCur, get4byte(findCell(pPage, idx))); @@ -83412,7 +83807,7 @@ static int valueFromFunction( #endif assert( pFunc ); if( (pFunc->funcFlags & (SQLITE_FUNC_CONSTANT|SQLITE_FUNC_SLOCHNG))==0 - || (pFunc->funcFlags & SQLITE_FUNC_NEEDCOLL) + || (pFunc->funcFlags & (SQLITE_FUNC_NEEDCOLL|SQLITE_FUNC_RUNONLY))!=0 ){ return SQLITE_OK; } @@ -84136,10 +84531,11 @@ static int growOpArray(Vdbe *v, int nOp){ ** sqlite3CantopenError(lineno) */ static void test_addop_breakpoint(int pc, Op *pOp){ - static int n = 0; + static u64 n = 0; (void)pc; (void)pOp; n++; + if( n==LARGEST_UINT64 ) abort(); /* so that n is used, preventing a warning */ } #endif @@ -85324,6 +85720,10 @@ static void freeP4(sqlite3 *db, int p4type, void *p4){ if( db->pnBytesFreed==0 ) sqlite3VtabUnlock((VTable *)p4); break; } + case P4_TABLEREF: { + if( db->pnBytesFreed==0 ) sqlite3DeleteTable(db, (Table*)p4); + break; + } } } @@ -85451,7 +85851,7 @@ static void SQLITE_NOINLINE vdbeChangeP4Full( int n ){ if( pOp->p4type ){ - freeP4(p->db, pOp->p4type, pOp->p4.p); + assert( pOp->p4type > P4_FREE_IF_LE ); pOp->p4type = 0; pOp->p4.p = 0; } @@ -89574,7 +89974,15 @@ SQLITE_API int sqlite3_clear_bindings(sqlite3_stmt *pStmt){ int rc = SQLITE_OK; Vdbe *p = (Vdbe*)pStmt; #if SQLITE_THREADSAFE - sqlite3_mutex *mutex = ((Vdbe*)pStmt)->db->mutex; + sqlite3_mutex *mutex; +#endif +#ifdef SQLITE_ENABLE_API_ARMOR + if( pStmt==0 ){ + return SQLITE_MISUSE_BKPT; + } +#endif +#if SQLITE_THREADSAFE + mutex = p->db->mutex; #endif sqlite3_mutex_enter(mutex); for(i=0; inVar; i++){ @@ -89953,6 +90361,18 @@ SQLITE_API void sqlite3_result_subtype(sqlite3_context *pCtx, unsigned int eSubt #ifdef SQLITE_ENABLE_API_ARMOR if( pCtx==0 ) return; #endif +#if defined(SQLITE_STRICT_SUBTYPE) && SQLITE_STRICT_SUBTYPE+0!=0 + if( pCtx->pFunc!=0 + && (pCtx->pFunc->funcFlags & SQLITE_RESULT_SUBTYPE)==0 + ){ + char zErr[200]; + sqlite3_snprintf(sizeof(zErr), zErr, + "misuse of sqlite3_result_subtype() by %s()", + pCtx->pFunc->zName); + sqlite3_result_error(pCtx, zErr, -1); + return; + } +#endif /* SQLITE_STRICT_SUBTYPE */ pOut = pCtx->pOut; assert( sqlite3_mutex_held(pOut->db->mutex) ); pOut->eSubtype = eSubtype & 0xff; @@ -90352,9 +90772,8 @@ SQLITE_API int sqlite3_step(sqlite3_stmt *pStmt){ SQLITE_API void *sqlite3_user_data(sqlite3_context *p){ #ifdef SQLITE_ENABLE_API_ARMOR if( p==0 ) return 0; -#else - assert( p && p->pFunc ); #endif + assert( p && p->pFunc ); return p->pFunc->pUserData; } @@ -92271,11 +92690,12 @@ SQLITE_API int sqlite3_found_count = 0; ** sqlite3CantopenError(lineno) */ static void test_trace_breakpoint(int pc, Op *pOp, Vdbe *v){ - static int n = 0; + static u64 n = 0; (void)pc; (void)pOp; (void)v; n++; + if( n==LARGEST_UINT64 ) abort(); /* So that n is used, preventing a warning */ } #endif @@ -94172,7 +94592,7 @@ case OP_AddImm: { /* in1 */ pIn1 = &aMem[pOp->p1]; memAboutToChange(p, pIn1); sqlite3VdbeMemIntegerify(pIn1); - pIn1->u.i += pOp->p2; + *(u64*)&pIn1->u.i += (u64)pOp->p2; break; } @@ -100318,24 +100738,23 @@ case OP_VCheck: { /* out2 */ pOut = &aMem[pOp->p2]; sqlite3VdbeMemSetNull(pOut); /* Innocent until proven guilty */ - assert( pOp->p4type==P4_TABLE ); + assert( pOp->p4type==P4_TABLEREF ); pTab = pOp->p4.pTab; assert( pTab!=0 ); + assert( pTab->nTabRef>0 ); assert( IsVirtual(pTab) ); - assert( pTab->u.vtab.p!=0 ); + if( pTab->u.vtab.p==0 ) break; pVtab = pTab->u.vtab.p->pVtab; assert( pVtab!=0 ); pModule = pVtab->pModule; assert( pModule!=0 ); assert( pModule->iVersion>=4 ); assert( pModule->xIntegrity!=0 ); - pTab->nTabRef++; sqlite3VtabLock(pTab->u.vtab.p); assert( pOp->p1>=0 && pOp->p1nDb ); rc = pModule->xIntegrity(pVtab, db->aDb[pOp->p1].zDbSName, pTab->zName, pOp->p3, &zErr); sqlite3VtabUnlock(pTab->u.vtab.p); - sqlite3DeleteTable(db, pTab); if( rc ){ sqlite3_free(zErr); goto abort_due_to_error; @@ -100460,6 +100879,7 @@ case OP_VColumn: { /* ncycle */ const sqlite3_module *pModule; Mem *pDest; sqlite3_context sContext; + FuncDef nullFunc; VdbeCursor *pCur = p->apCsr[pOp->p1]; assert( pCur!=0 ); @@ -100477,6 +100897,9 @@ case OP_VColumn: { /* ncycle */ memset(&sContext, 0, sizeof(sContext)); sContext.pOut = pDest; sContext.enc = encoding; + nullFunc.pUserData = 0; + nullFunc.funcFlags = SQLITE_RESULT_SUBTYPE; + sContext.pFunc = &nullFunc; assert( pOp->p5==OPFLAG_NOCHNG || pOp->p5==0 ); if( pOp->p5 & OPFLAG_NOCHNG ){ sqlite3VdbeMemSetNull(pDest); @@ -100809,6 +101232,42 @@ case OP_ClrSubtype: { /* in1 */ break; } +/* Opcode: GetSubtype P1 P2 * * * +** Synopsis: r[P2] = r[P1].subtype +** +** Extract the subtype value from register P1 and write that subtype +** into register P2. If P1 has no subtype, then P1 gets a NULL. +*/ +case OP_GetSubtype: { /* in1 out2 */ + pIn1 = &aMem[pOp->p1]; + pOut = &aMem[pOp->p2]; + if( pIn1->flags & MEM_Subtype ){ + sqlite3VdbeMemSetInt64(pOut, pIn1->eSubtype); + }else{ + sqlite3VdbeMemSetNull(pOut); + } + break; +} + +/* Opcode: SetSubtype P1 P2 * * * +** Synopsis: r[P2].subtype = r[P1] +** +** Set the subtype value of register P2 to the integer from register P1. +** If P1 is NULL, clear the subtype from p2. +*/ +case OP_SetSubtype: { /* in1 out2 */ + pIn1 = &aMem[pOp->p1]; + pOut = &aMem[pOp->p2]; + if( pIn1->flags & MEM_Null ){ + pOut->flags &= ~MEM_Subtype; + }else{ + assert( pIn1->flags & MEM_Int ); + pOut->flags |= MEM_Subtype; + pOut->eSubtype = (u8)(pIn1->u.i & 0xff); + } + break; +} + /* Opcode: FilterAdd P1 * P3 P4 * ** Synopsis: filter(P1) += key(P3@P4) ** @@ -105857,6 +106316,7 @@ SQLITE_PRIVATE Bitmask sqlite3ExprColUsed(Expr *pExpr){ assert( ExprUseYTab(pExpr) ); pExTab = pExpr->y.pTab; assert( pExTab!=0 ); + assert( n < pExTab->nCol ); if( (pExTab->tabFlags & TF_HasGenerated)!=0 && (pExTab->aCol[n].colFlags & COLFLAG_GENERATED)!=0 ){ @@ -106433,6 +106893,7 @@ static int lookupName( sqlite3RecordErrorOffsetOfExpr(pParse->db, pExpr); pParse->checkSchema = 1; pTopNC->nNcErr++; + eNewExprOp = TK_NULL; } assert( pFJMatch==0 ); @@ -106459,7 +106920,7 @@ static int lookupName( ** If a generated column is referenced, set bits for every column ** of the table. */ - if( pExpr->iColumn>=0 && pMatch!=0 ){ + if( pExpr->iColumn>=0 && cnt==1 && pMatch!=0 ){ pMatch->colUsed |= sqlite3ExprColUsed(pExpr); } @@ -106924,11 +107385,12 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ while( pNC2 && sqlite3ReferencesSrcList(pParse, pExpr, pNC2->pSrcList)==0 ){ - pExpr->op2++; + pExpr->op2 += (1 + pNC2->nNestedSelect); pNC2 = pNC2->pNext; } assert( pDef!=0 || IN_RENAME_OBJECT ); if( pNC2 && pDef ){ + pExpr->op2 += pNC2->nNestedSelect; assert( SQLITE_FUNC_MINMAX==NC_MinMaxAgg ); assert( SQLITE_FUNC_ANYORDER==NC_OrderAgg ); testcase( (pDef->funcFlags & SQLITE_FUNC_MINMAX)!=0 ); @@ -107487,6 +107949,7 @@ static int resolveSelectStep(Walker *pWalker, Select *p){ /* Recursively resolve names in all subqueries in the FROM clause */ + if( pOuterNC ) pOuterNC->nNestedSelect++; for(i=0; ipSrc->nSrc; i++){ SrcItem *pItem = &p->pSrc->a[i]; if( pItem->pSelect && (pItem->pSelect->selFlags & SF_Resolved)==0 ){ @@ -107511,6 +107974,9 @@ static int resolveSelectStep(Walker *pWalker, Select *p){ } } } + if( pOuterNC && ALWAYS(pOuterNC->nNestedSelect>0) ){ + pOuterNC->nNestedSelect--; + } /* Set up the local name-context to pass to sqlite3ResolveExprNames() to ** resolve the result-set expression list. @@ -109098,9 +109564,7 @@ SQLITE_PRIVATE void sqlite3ExprAddFunctionOrderBy( assert( ExprUseXList(pExpr) ); if( pExpr->x.pList==0 || NEVER(pExpr->x.pList->nExpr==0) ){ /* Ignore ORDER BY on zero-argument aggregates */ - sqlite3ParserAddCleanup(pParse, - (void(*)(sqlite3*,void*))sqlite3ExprListDelete, - pOrderBy); + sqlite3ParserAddCleanup(pParse, sqlite3ExprListDeleteGeneric, pOrderBy); return; } if( IsWindowFunc(pExpr) ){ @@ -109281,6 +109745,9 @@ static SQLITE_NOINLINE void sqlite3ExprDeleteNN(sqlite3 *db, Expr *p){ SQLITE_PRIVATE void sqlite3ExprDelete(sqlite3 *db, Expr *p){ if( p ) sqlite3ExprDeleteNN(db, p); } +SQLITE_PRIVATE void sqlite3ExprDeleteGeneric(sqlite3 *db, void *p){ + if( ALWAYS(p) ) sqlite3ExprDeleteNN(db, (Expr*)p); +} /* ** Clear both elements of an OnOrUsing object @@ -109306,9 +109773,7 @@ SQLITE_PRIVATE void sqlite3ClearOnOrUsing(sqlite3 *db, OnOrUsing *p){ ** pExpr to the pParse->pConstExpr list with a register number of 0. */ SQLITE_PRIVATE void sqlite3ExprDeferredDelete(Parse *pParse, Expr *pExpr){ - sqlite3ParserAddCleanup(pParse, - (void(*)(sqlite3*,void*))sqlite3ExprDelete, - pExpr); + sqlite3ParserAddCleanup(pParse, sqlite3ExprDeleteGeneric, pExpr); } /* Invoke sqlite3RenameExprUnmap() and sqlite3ExprDelete() on the @@ -110114,6 +110579,9 @@ static SQLITE_NOINLINE void exprListDeleteNN(sqlite3 *db, ExprList *pList){ SQLITE_PRIVATE void sqlite3ExprListDelete(sqlite3 *db, ExprList *pList){ if( pList ) exprListDeleteNN(db, pList); } +SQLITE_PRIVATE void sqlite3ExprListDeleteGeneric(sqlite3 *db, void *pList){ + if( ALWAYS(pList) ) exprListDeleteNN(db, (ExprList*)pList); +} /* ** Return the bitwise-OR of all Expr.flags fields in the given @@ -110613,9 +111081,10 @@ SQLITE_PRIVATE int sqlite3ExprCanBeNull(const Expr *p){ case TK_COLUMN: assert( ExprUseYTab(p) ); return ExprHasProperty(p, EP_CanBeNull) || - p->y.pTab==0 || /* Reference to column of index on expression */ + NEVER(p->y.pTab==0) || /* Reference to column of index on expr */ (p->iColumn>=0 && p->y.pTab->aCol!=0 /* Possible due to prior error */ + && ALWAYS(p->iColumny.pTab->nCol) && p->y.pTab->aCol[p->iColumn].notNull==0); default: return 1; @@ -113197,8 +113666,10 @@ SQLITE_PRIVATE void sqlite3ExprCode(Parse *pParse, Expr *pExpr, int target){ inReg = sqlite3ExprCodeTarget(pParse, pExpr, target); if( inReg!=target ){ u8 op; - if( ALWAYS(pExpr) - && (ExprHasProperty(pExpr,EP_Subquery) || pExpr->op==TK_REGISTER) + Expr *pX = sqlite3ExprSkipCollateAndLikely(pExpr); + testcase( pX!=pExpr ); + if( ALWAYS(pX) + && (ExprHasProperty(pX,EP_Subquery) || pX->op==TK_REGISTER) ){ op = OP_Copy; }else{ @@ -113918,8 +114389,8 @@ SQLITE_PRIVATE int sqlite3ExprListCompare(const ExprList *pA, const ExprList *pB */ SQLITE_PRIVATE int sqlite3ExprCompareSkip(Expr *pA,Expr *pB, int iTab){ return sqlite3ExprCompare(0, - sqlite3ExprSkipCollateAndLikely(pA), - sqlite3ExprSkipCollateAndLikely(pB), + sqlite3ExprSkipCollate(pA), + sqlite3ExprSkipCollate(pB), iTab); } @@ -114644,13 +115115,14 @@ static int analyzeAggregate(Walker *pWalker, Expr *pExpr){ case TK_AGG_FUNCTION: { if( (pNC->ncFlags & NC_InAggFunc)==0 && pWalker->walkerDepth==pExpr->op2 + && pExpr->pAggInfo==0 ){ /* Check to see if pExpr is a duplicate of another aggregate ** function that is already in the pAggInfo structure */ struct AggInfo_func *pItem = pAggInfo->aFunc; for(i=0; inFunc; i++, pItem++){ - if( pItem->pFExpr==pExpr ) break; + if( NEVER(pItem->pFExpr==pExpr) ) break; if( sqlite3ExprCompare(0, pItem->pFExpr, pExpr, -1)==0 ){ break; } @@ -114693,6 +115165,8 @@ static int analyzeAggregate(Walker *pWalker, Expr *pExpr){ }else{ pItem->bOBPayload = 1; } + pItem->bUseSubtype = + (pItem->pFunc->funcFlags & SQLITE_SUBTYPE)!=0; }else{ pItem->iOBTab = -1; } @@ -117459,9 +117933,9 @@ static void openStatTable( typedef struct StatAccum StatAccum; typedef struct StatSample StatSample; struct StatSample { - tRowcnt *anEq; /* sqlite_stat4.nEq */ tRowcnt *anDLt; /* sqlite_stat4.nDLt */ #ifdef SQLITE_ENABLE_STAT4 + tRowcnt *anEq; /* sqlite_stat4.nEq */ tRowcnt *anLt; /* sqlite_stat4.nLt */ union { i64 iRowid; /* Rowid in main table of the key */ @@ -117619,9 +118093,9 @@ static void statInit( /* Allocate the space required for the StatAccum object */ n = sizeof(*p) - + sizeof(tRowcnt)*nColUp /* StatAccum.anEq */ - + sizeof(tRowcnt)*nColUp; /* StatAccum.anDLt */ + + sizeof(tRowcnt)*nColUp; /* StatAccum.anDLt */ #ifdef SQLITE_ENABLE_STAT4 + n += sizeof(tRowcnt)*nColUp; /* StatAccum.anEq */ if( mxSample ){ n += sizeof(tRowcnt)*nColUp /* StatAccum.anLt */ + sizeof(StatSample)*(nCol+mxSample) /* StatAccum.aBest[], a[] */ @@ -117642,9 +118116,9 @@ static void statInit( p->nKeyCol = nKeyCol; p->nSkipAhead = 0; p->current.anDLt = (tRowcnt*)&p[1]; - p->current.anEq = &p->current.anDLt[nColUp]; #ifdef SQLITE_ENABLE_STAT4 + p->current.anEq = &p->current.anDLt[nColUp]; p->mxSample = p->nLimit==0 ? mxSample : 0; if( mxSample ){ u8 *pSpace; /* Allocated space not yet assigned */ @@ -117911,7 +118385,9 @@ static void statPush( if( p->nRow==0 ){ /* This is the first call to this function. Do initialization. */ +#ifdef SQLITE_ENABLE_STAT4 for(i=0; inCol; i++) p->current.anEq[i] = 1; +#endif }else{ /* Second and subsequent calls get processed here */ #ifdef SQLITE_ENABLE_STAT4 @@ -117920,15 +118396,17 @@ static void statPush( /* Update anDLt[], anLt[] and anEq[] to reflect the values that apply ** to the current row of the index. */ +#ifdef SQLITE_ENABLE_STAT4 for(i=0; icurrent.anEq[i]++; } +#endif for(i=iChng; inCol; i++){ p->current.anDLt[i]++; #ifdef SQLITE_ENABLE_STAT4 if( p->mxSample ) p->current.anLt[i] += p->current.anEq[i]; -#endif p->current.anEq[i] = 1; +#endif } } @@ -118062,7 +118540,9 @@ static void statGet( u64 iVal = (p->nRow + nDistinct - 1) / nDistinct; if( iVal==2 && p->nRow*10 <= nDistinct*11 ) iVal = 1; sqlite3_str_appendf(&sStat, " %llu", iVal); +#ifdef SQLITE_ENABLE_STAT4 assert( p->current.anEq[i] ); +#endif } sqlite3ResultStrAccum(context, &sStat); } @@ -118751,6 +119231,16 @@ static void decodeIntArray( while( z[0]!=0 && z[0]!=' ' ) z++; while( z[0]==' ' ) z++; } + + /* Set the bLowQual flag if the peak number of rows obtained + ** from a full equality match is so large that a full table scan + ** seems likely to be faster than using the index. + */ + if( aLog[0] > 66 /* Index has more than 100 rows */ + && aLog[0] <= aLog[nOut-1] /* And only a single value seen */ + ){ + pIndex->bLowQual = 1; + } } } @@ -120797,7 +121287,7 @@ SQLITE_PRIVATE void sqlite3ColumnSetExpr( */ SQLITE_PRIVATE Expr *sqlite3ColumnExpr(Table *pTab, Column *pCol){ if( pCol->iDflt==0 ) return 0; - if( NEVER(!IsOrdinaryTable(pTab)) ) return 0; + if( !IsOrdinaryTable(pTab) ) return 0; if( NEVER(pTab->u.tab.pDfltList==0) ) return 0; if( NEVER(pTab->u.tab.pDfltList->nExpriDflt) ) return 0; return pTab->u.tab.pDfltList->a[pCol->iDflt-1].pExpr; @@ -120950,6 +121440,9 @@ SQLITE_PRIVATE void sqlite3DeleteTable(sqlite3 *db, Table *pTable){ if( db->pnBytesFreed==0 && (--pTable->nTabRef)>0 ) return; deleteTable(db, pTable); } +SQLITE_PRIVATE void sqlite3DeleteTableGeneric(sqlite3 *db, void *pTable){ + sqlite3DeleteTable(db, (Table*)pTable); +} /* @@ -121487,7 +121980,8 @@ SQLITE_PRIVATE void sqlite3ColumnPropertiesFromName(Table *pTab, Column *pCol){ /* ** Clean up the data structures associated with the RETURNING clause. */ -static void sqlite3DeleteReturning(sqlite3 *db, Returning *pRet){ +static void sqlite3DeleteReturning(sqlite3 *db, void *pArg){ + Returning *pRet = (Returning*)pArg; Hash *pHash; pHash = &(db->aDb[1].pSchema->trigHash); sqlite3HashInsert(pHash, pRet->zName, 0); @@ -121529,8 +122023,7 @@ SQLITE_PRIVATE void sqlite3AddReturning(Parse *pParse, ExprList *pList){ pParse->u1.pReturning = pRet; pRet->pParse = pParse; pRet->pReturnEL = pList; - sqlite3ParserAddCleanup(pParse, - (void(*)(sqlite3*,void*))sqlite3DeleteReturning, pRet); + sqlite3ParserAddCleanup(pParse, sqlite3DeleteReturning, pRet); testcase( pParse->earlyCleanup ); if( db->mallocFailed ) return; sqlite3_snprintf(sizeof(pRet->zName), pRet->zName, @@ -121729,7 +122222,8 @@ SQLITE_PRIVATE char sqlite3AffinityType(const char *zIn, Column *pCol){ assert( zIn!=0 ); while( zIn[0] ){ - h = (h<<8) + sqlite3UpperToLower[(*zIn)&0xff]; + u8 x = *(u8*)zIn; + h = (h<<8) + sqlite3UpperToLower[x]; zIn++; if( h==(('c'<<24)+('h'<<16)+('a'<<8)+'r') ){ /* CHAR */ aff = SQLITE_AFF_TEXT; @@ -125594,7 +126088,7 @@ SQLITE_PRIVATE void sqlite3Reindex(Parse *pParse, Token *pName1, Token *pName2){ if( iDb<0 ) return; z = sqlite3NameFromToken(db, pObjName); if( z==0 ) return; - zDb = db->aDb[iDb].zDbSName; + zDb = pName2->n ? db->aDb[iDb].zDbSName : 0; pTab = sqlite3FindTable(db, z, zDb); if( pTab ){ reindexTable(pParse, pTab, 0); @@ -125604,6 +126098,7 @@ SQLITE_PRIVATE void sqlite3Reindex(Parse *pParse, Token *pName1, Token *pName2){ pIndex = sqlite3FindIndex(db, z, zDb); sqlite3DbFree(db, z); if( pIndex ){ + iDb = sqlite3SchemaToIndex(db, pIndex->pTable->pSchema); sqlite3BeginWriteOperation(pParse, 0, iDb); sqlite3RefillIndex(pParse, pIndex, -1); return; @@ -125769,6 +126264,9 @@ SQLITE_PRIVATE void sqlite3WithDelete(sqlite3 *db, With *pWith){ sqlite3DbFree(db, pWith); } } +SQLITE_PRIVATE void sqlite3WithDeleteGeneric(sqlite3 *db, void *pWith){ + sqlite3WithDelete(db, (With*)pWith); +} #endif /* !defined(SQLITE_OMIT_CTE) */ /************** End of build.c ***********************************************/ @@ -138994,7 +139492,8 @@ SQLITE_PRIVATE void sqlite3Pragma( if( pVTab->pModule->iVersion<4 ) continue; if( pVTab->pModule->xIntegrity==0 ) continue; sqlite3VdbeAddOp3(v, OP_VCheck, i, 3, isQuick); - sqlite3VdbeAppendP4(v, pTab, P4_TABLE); + pTab->nTabRef++; + sqlite3VdbeAppendP4(v, pTab, P4_TABLEREF); a1 = sqlite3VdbeAddOp1(v, OP_IsNull, 3); VdbeCoverage(v); integrityCheckResultRow(v); sqlite3VdbeJumpHere(v, a1); @@ -141021,6 +141520,7 @@ static int sqlite3LockAndPrepare( assert( (rc&db->errMask)==rc ); db->busyHandler.nBusy = 0; sqlite3_mutex_leave(db->mutex); + assert( rc==SQLITE_OK || (*ppStmt)==0 ); return rc; } @@ -141418,6 +141918,9 @@ SQLITE_PRIVATE Select *sqlite3SelectNew( SQLITE_PRIVATE void sqlite3SelectDelete(sqlite3 *db, Select *p){ if( OK_IF_ALWAYS_TRUE(p) ) clearSelect(db, p, 1); } +SQLITE_PRIVATE void sqlite3SelectDeleteGeneric(sqlite3 *db, void *p){ + if( ALWAYS(p) ) clearSelect(db, (Select*)p, 1); +} /* ** Return a pointer to the right-most SELECT statement in a compound. @@ -143553,7 +144056,8 @@ SQLITE_PRIVATE void sqlite3SubqueryColumnTypes( NameContext sNC; assert( pSelect!=0 ); - assert( (pSelect->selFlags & SF_Resolved)!=0 ); + testcase( (pSelect->selFlags & SF_Resolved)==0 ); + assert( (pSelect->selFlags & SF_Resolved)!=0 || IN_RENAME_OBJECT ); assert( pTab->nCol==pSelect->pEList->nExpr || pParse->nErr>0 ); assert( aff==SQLITE_AFF_NONE || aff==SQLITE_AFF_BLOB ); if( db->mallocFailed || IN_RENAME_OBJECT ) return; @@ -144437,9 +144941,7 @@ static int multiSelect( pDest->iSdst = dest.iSdst; pDest->nSdst = dest.nSdst; if( pDelete ){ - sqlite3ParserAddCleanup(pParse, - (void(*)(sqlite3*,void*))sqlite3SelectDelete, - pDelete); + sqlite3ParserAddCleanup(pParse, sqlite3SelectDeleteGeneric, pDelete); } return rc; } @@ -144990,8 +145492,7 @@ static int multiSelectOrderBy( /* Make arrangements to free the 2nd and subsequent arms of the compound ** after the parse has finished */ if( pSplit->pPrior ){ - sqlite3ParserAddCleanup(pParse, - (void(*)(sqlite3*,void*))sqlite3SelectDelete, pSplit->pPrior); + sqlite3ParserAddCleanup(pParse, sqlite3SelectDeleteGeneric, pSplit->pPrior); } pSplit->pPrior = pPrior; pPrior->pNext = pSplit; @@ -145812,9 +146313,7 @@ static int flattenSubquery( Table *pTabToDel = pSubitem->pTab; if( pTabToDel->nTabRef==1 ){ Parse *pToplevel = sqlite3ParseToplevel(pParse); - sqlite3ParserAddCleanup(pToplevel, - (void(*)(sqlite3*,void*))sqlite3DeleteTable, - pTabToDel); + sqlite3ParserAddCleanup(pToplevel, sqlite3DeleteTableGeneric, pTabToDel); testcase( pToplevel->earlyCleanup ); }else{ pTabToDel->nTabRef--; @@ -146861,8 +147360,7 @@ static struct Cte *searchWith( SQLITE_PRIVATE With *sqlite3WithPush(Parse *pParse, With *pWith, u8 bFree){ if( pWith ){ if( bFree ){ - pWith = (With*)sqlite3ParserAddCleanup(pParse, - (void(*)(sqlite3*,void*))sqlite3WithDelete, + pWith = (With*)sqlite3ParserAddCleanup(pParse, sqlite3WithDeleteGeneric, pWith); if( pWith==0 ) return 0; } @@ -147606,10 +148104,11 @@ static void selectAddSubqueryTypeInfo(Walker *pWalker, Select *p){ SrcList *pTabList; SrcItem *pFrom; - assert( p->selFlags & SF_Resolved ); if( p->selFlags & SF_HasTypeInfo ) return; p->selFlags |= SF_HasTypeInfo; pParse = pWalker->pParse; + testcase( (p->selFlags & SF_Resolved)==0 ); + assert( (p->selFlags & SF_Resolved) || IN_RENAME_OBJECT ); pTabList = p->pSrc; for(i=0, pFrom=pTabList->a; inSrc; i++, pFrom++){ Table *pTab = pFrom->pTab; @@ -147894,6 +148393,7 @@ static void resetAccumulator(Parse *pParse, AggInfo *pAggInfo){ assert( pFunc->pFExpr->pLeft!=0 ); assert( pFunc->pFExpr->pLeft->op==TK_ORDER ); assert( ExprUseXList(pFunc->pFExpr->pLeft) ); + assert( pFunc->pFunc!=0 ); pOBList = pFunc->pFExpr->pLeft->x.pList; if( !pFunc->bOBUnique ){ nExtra++; /* One extra column for the OP_Sequence */ @@ -147903,6 +148403,9 @@ static void resetAccumulator(Parse *pParse, AggInfo *pAggInfo){ assert( ExprUseXList(pFunc->pFExpr) ); nExtra += pFunc->pFExpr->x.pList->nExpr; } + if( pFunc->bUseSubtype ){ + nExtra += pFunc->pFExpr->x.pList->nExpr; + } pKeyInfo = sqlite3KeyInfoFromExprList(pParse, pOBList, 0, nExtra); if( !pFunc->bOBUnique && pParse->nErr==0 ){ pKeyInfo->nKeyField++; @@ -147929,9 +148432,9 @@ static void finalizeAggFunctions(Parse *pParse, AggInfo *pAggInfo){ assert( ExprUseXList(pF->pFExpr) ); pList = pF->pFExpr->x.pList; if( pF->iOBTab>=0 ){ - /* For an ORDER BY aggregate, calls to OP_AggStep where deferred and - ** all content was stored in emphermal table pF->iOBTab. Extract that - ** content now (in ORDER BY order) and make all calls to OP_AggStep + /* For an ORDER BY aggregate, calls to OP_AggStep were deferred. Inputs + ** were stored in emphermal table pF->iOBTab. Here, we extract those + ** inputs (in ORDER BY order) and make all calls to OP_AggStep ** before doing the OP_AggFinal call. */ int iTop; /* Start of loop for extracting columns */ int nArg; /* Number of columns to extract */ @@ -147939,6 +148442,7 @@ static void finalizeAggFunctions(Parse *pParse, AggInfo *pAggInfo){ int regAgg; /* Extract into this array */ int j; /* Loop counter */ + assert( pF->pFunc!=0 ); nArg = pList->nExpr; regAgg = sqlite3GetTempRange(pParse, nArg); @@ -147955,6 +148459,15 @@ static void finalizeAggFunctions(Parse *pParse, AggInfo *pAggInfo){ for(j=nArg-1; j>=0; j--){ sqlite3VdbeAddOp3(v, OP_Column, pF->iOBTab, nKey+j, regAgg+j); } + if( pF->bUseSubtype ){ + int regSubtype = sqlite3GetTempReg(pParse); + int iBaseCol = nKey + nArg + (pF->bOBPayload==0 && pF->bOBUnique==0); + for(j=nArg-1; j>=0; j--){ + sqlite3VdbeAddOp3(v, OP_Column, pF->iOBTab, iBaseCol+j, regSubtype); + sqlite3VdbeAddOp2(v, OP_SetSubtype, regSubtype, regAgg+j); + } + sqlite3ReleaseTempReg(pParse, regSubtype); + } sqlite3VdbeAddOp3(v, OP_AggStep, 0, regAgg, AggInfoFuncReg(pAggInfo,i)); sqlite3VdbeAppendP4(v, pF->pFunc, P4_FUNCDEF); sqlite3VdbeChangeP5(v, (u8)nArg); @@ -148009,6 +148522,7 @@ static void updateAccumulator( ExprList *pList; assert( ExprUseXList(pF->pFExpr) ); assert( !IsWindowFunc(pF->pFExpr) ); + assert( pF->pFunc!=0 ); pList = pF->pFExpr->x.pList; if( ExprHasProperty(pF->pFExpr, EP_WinFunc) ){ Expr *pFilter = pF->pFExpr->y.pWin->pFilter; @@ -148053,6 +148567,9 @@ static void updateAccumulator( if( pF->bOBPayload ){ regAggSz += nArg; } + if( pF->bUseSubtype ){ + regAggSz += nArg; + } regAggSz++; /* One extra register to hold result of MakeRecord */ regAgg = sqlite3GetTempRange(pParse, regAggSz); regDistinct = regAgg; @@ -148065,6 +148582,14 @@ static void updateAccumulator( if( pF->bOBPayload ){ regDistinct = regAgg+jj; sqlite3ExprCodeExprList(pParse, pList, regDistinct, 0, SQLITE_ECEL_DUP); + jj += nArg; + } + if( pF->bUseSubtype ){ + int kk; + int regBase = pF->bOBPayload ? regDistinct : regAgg; + for(kk=0; kknExpr; @@ -148269,7 +148794,8 @@ static SrcItem *isSelfJoinView( /* ** Deallocate a single AggInfo object */ -static void agginfoFree(sqlite3 *db, AggInfo *p){ +static void agginfoFree(sqlite3 *db, void *pArg){ + AggInfo *p = (AggInfo*)pArg; sqlite3DbFree(db, p->aCol); sqlite3DbFree(db, p->aFunc); sqlite3DbFreeNN(db, p); @@ -148343,7 +148869,7 @@ static int countOfViewOptimization(Parse *pParse, Select *p){ pSub->selFlags |= SF_Aggregate; pSub->selFlags &= ~SF_Compound; pSub->nSelectRow = 0; - sqlite3ExprListDelete(db, pSub->pEList); + sqlite3ParserAddCleanup(pParse, sqlite3ExprListDeleteGeneric, pSub->pEList); pTerm = pPrior ? sqlite3ExprDup(db, pCount, 0) : pCount; pSub->pEList = sqlite3ExprListAppend(pParse, 0, pTerm); pTerm = sqlite3PExpr(pParse, TK_SELECT, 0, 0); @@ -148523,9 +149049,8 @@ SQLITE_PRIVATE int sqlite3Select( sqlite3TreeViewExprList(0, p->pOrderBy, 0, "ORDERBY"); } #endif - sqlite3ParserAddCleanup(pParse, - (void(*)(sqlite3*,void*))sqlite3ExprListDelete, - p->pOrderBy); + sqlite3ParserAddCleanup(pParse, sqlite3ExprListDeleteGeneric, + p->pOrderBy); testcase( pParse->earlyCleanup ); p->pOrderBy = 0; } @@ -148631,6 +149156,7 @@ SQLITE_PRIVATE int sqlite3Select( TREETRACE(0x1000,pParse,p, ("LEFT-JOIN simplifies to JOIN on term %d\n",i)); pItem->fg.jointype &= ~(JT_LEFT|JT_OUTER); + unsetJoinExpr(p->pWhere, pItem->iCursor, 0); } } if( pItem->fg.jointype & JT_LTORJ ){ @@ -148645,17 +149171,15 @@ SQLITE_PRIVATE int sqlite3Select( TREETRACE(0x1000,pParse,p, ("RIGHT-JOIN simplifies to JOIN on term %d\n",j)); pI2->fg.jointype &= ~(JT_RIGHT|JT_OUTER); + unsetJoinExpr(p->pWhere, pI2->iCursor, 1); } } } - for(j=pTabList->nSrc-1; j>=i; j--){ + for(j=pTabList->nSrc-1; j>=0; j--){ pTabList->a[j].fg.jointype &= ~JT_LTORJ; if( pTabList->a[j].fg.jointype & JT_RIGHT ) break; } } - assert( pItem->iCursor>=0 ); - unsetJoinExpr(p->pWhere, pItem->iCursor, - pTabList->a[0].fg.jointype & JT_LTORJ); } /* No further action if this term of the FROM clause is not a subquery */ @@ -148718,9 +149242,8 @@ SQLITE_PRIVATE int sqlite3Select( ){ TREETRACE(0x800,pParse,p, ("omit superfluous ORDER BY on %r FROM-clause subquery\n",i+1)); - sqlite3ParserAddCleanup(pParse, - (void(*)(sqlite3*,void*))sqlite3ExprListDelete, - pSub->pOrderBy); + sqlite3ParserAddCleanup(pParse, sqlite3ExprListDeleteGeneric, + pSub->pOrderBy); pSub->pOrderBy = 0; } @@ -149249,8 +149772,7 @@ SQLITE_PRIVATE int sqlite3Select( */ pAggInfo = sqlite3DbMallocZero(db, sizeof(*pAggInfo) ); if( pAggInfo ){ - sqlite3ParserAddCleanup(pParse, - (void(*)(sqlite3*,void*))agginfoFree, pAggInfo); + sqlite3ParserAddCleanup(pParse, agginfoFree, pAggInfo); testcase( pParse->earlyCleanup ); } if( db->mallocFailed ){ @@ -153899,7 +154421,6 @@ SQLITE_PRIVATE void sqlite3VtabUnlockList(sqlite3 *db){ if( p ){ db->pDisconnect = 0; - sqlite3ExpirePreparedStatements(db, 0); do { VTable *pNext = p->pNext; sqlite3VtabUnlock(p); @@ -155465,7 +155986,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereGetMask(WhereMaskSet*,int); #ifdef WHERETRACE_ENABLED SQLITE_PRIVATE void sqlite3WhereClausePrint(WhereClause *pWC); SQLITE_PRIVATE void sqlite3WhereTermPrint(WhereTerm *pTerm, int iTerm); -SQLITE_PRIVATE void sqlite3WhereLoopPrint(WhereLoop *p, WhereClause *pWC); +SQLITE_PRIVATE void sqlite3WhereLoopPrint(const WhereLoop *p, const WhereClause *pWC); #endif SQLITE_PRIVATE WhereTerm *sqlite3WhereFindTerm( WhereClause *pWC, /* The WHERE clause to be searched */ @@ -160927,12 +161448,22 @@ static void translateColumnToCopy( for(; iStartp1!=iTabCur ) continue; if( pOp->opcode==OP_Column ){ +#ifdef SQLITE_DEBUG + if( pParse->db->flags & SQLITE_VdbeAddopTrace ){ + printf("TRANSLATE OP_Column to OP_Copy at %d\n", iStart); + } +#endif pOp->opcode = OP_Copy; pOp->p1 = pOp->p2 + iRegister; pOp->p2 = pOp->p3; pOp->p3 = 0; pOp->p5 = 2; /* Cause the MEM_Subtype flag to be cleared */ }else if( pOp->opcode==OP_Rowid ){ +#ifdef SQLITE_DEBUG + if( pParse->db->flags & SQLITE_VdbeAddopTrace ){ + printf("TRANSLATE OP_Rowid to OP_Sequence at %d\n", iStart); + } +#endif pOp->opcode = OP_Sequence; pOp->p1 = iAutoidxCur; #ifdef SQLITE_ALLOW_ROWID_IN_VIEW @@ -162259,7 +162790,8 @@ static int whereRangeScanEst( ** sample, then assume they are 4x more selective. This brings ** the estimated selectivity more in line with what it would be ** if estimated without the use of STAT4 tables. */ - if( iLwrIdx==iUprIdx ) nNew -= 20; assert( 20==sqlite3LogEst(4) ); + if( iLwrIdx==iUprIdx ){ nNew -= 20; } + assert( 20==sqlite3LogEst(4) ); }else{ nNew = 10; assert( 10==sqlite3LogEst(2) ); } @@ -162483,17 +163015,34 @@ SQLITE_PRIVATE void sqlite3WhereClausePrint(WhereClause *pWC){ #ifdef WHERETRACE_ENABLED /* ** Print a WhereLoop object for debugging purposes -*/ -SQLITE_PRIVATE void sqlite3WhereLoopPrint(WhereLoop *p, WhereClause *pWC){ - WhereInfo *pWInfo = pWC->pWInfo; - int nb = 1+(pWInfo->pTabList->nSrc+3)/4; - SrcItem *pItem = pWInfo->pTabList->a + p->iTab; - Table *pTab = pItem->pTab; - Bitmask mAll = (((Bitmask)1)<<(nb*4)) - 1; - sqlite3DebugPrintf("%c%2d.%0*llx.%0*llx", p->cId, - p->iTab, nb, p->maskSelf, nb, p->prereq & mAll); - sqlite3DebugPrintf(" %12s", - pItem->zAlias ? pItem->zAlias : pTab->zName); +** +** Format example: +** +** .--- Position in WHERE clause rSetup, rRun, nOut ---. +** | | +** | .--- selfMask nTerm ------. | +** | | | | +** | | .-- prereq Idx wsFlags----. | | +** | | | Name | | | +** | | | __|__ nEq ---. ___|__ | __|__ +** | / \ / \ / \ | / \ / \ / \ +** 1.002.001 t2.t2xy 2 f 010241 N 2 cost 0,56,31 +*/ +SQLITE_PRIVATE void sqlite3WhereLoopPrint(const WhereLoop *p, const WhereClause *pWC){ + if( pWC ){ + WhereInfo *pWInfo = pWC->pWInfo; + int nb = 1+(pWInfo->pTabList->nSrc+3)/4; + SrcItem *pItem = pWInfo->pTabList->a + p->iTab; + Table *pTab = pItem->pTab; + Bitmask mAll = (((Bitmask)1)<<(nb*4)) - 1; + sqlite3DebugPrintf("%c%2d.%0*llx.%0*llx", p->cId, + p->iTab, nb, p->maskSelf, nb, p->prereq & mAll); + sqlite3DebugPrintf(" %12s", + pItem->zAlias ? pItem->zAlias : pTab->zName); + }else{ + sqlite3DebugPrintf("%c%2d.%03llx.%03llx %c%d", + p->cId, p->iTab, p->maskSelf, p->prereq & 0xfff, p->cId, p->iTab); + } if( (p->wsFlags & WHERE_VIRTUALTABLE)==0 ){ const char *zName; if( p->u.btree.pIndex && (zName = p->u.btree.pIndex->zName)!=0 ){ @@ -162530,6 +163079,15 @@ SQLITE_PRIVATE void sqlite3WhereLoopPrint(WhereLoop *p, WhereClause *pWC){ } } } +SQLITE_PRIVATE void sqlite3ShowWhereLoop(const WhereLoop *p){ + if( p ) sqlite3WhereLoopPrint(p, 0); +} +SQLITE_PRIVATE void sqlite3ShowWhereLoopList(const WhereLoop *p){ + while( p ){ + sqlite3ShowWhereLoop(p); + p = p->pNextLoop; + } +} #endif /* @@ -162642,46 +163200,60 @@ static void whereInfoFree(sqlite3 *db, WhereInfo *pWInfo){ } /* -** Return TRUE if all of the following are true: +** Return TRUE if X is a proper subset of Y but is of equal or less cost. +** In other words, return true if all constraints of X are also part of Y +** and Y has additional constraints that might speed the search that X lacks +** but the cost of running X is not more than the cost of running Y. +** +** In other words, return true if the cost relationwship between X and Y +** is inverted and needs to be adjusted. +** +** Case 1: ** -** (1) X has the same or lower cost, or returns the same or fewer rows, -** than Y. -** (2) X uses fewer WHERE clause terms than Y -** (3) Every WHERE clause term used by X is also used by Y -** (4) X skips at least as many columns as Y -** (5) If X is a covering index, than Y is too +** (1a) X and Y use the same index. +** (1b) X has fewer == terms than Y +** (1c) Neither X nor Y use skip-scan +** (1d) X does not have a a greater cost than Y ** -** Conditions (2) and (3) mean that X is a "proper subset" of Y. -** If X is a proper subset of Y then Y is a better choice and ought -** to have a lower cost. This routine returns TRUE when that cost -** relationship is inverted and needs to be adjusted. Constraint (4) -** was added because if X uses skip-scan less than Y it still might -** deserve a lower cost even if it is a proper subset of Y. Constraint (5) -** was added because a covering index probably deserves to have a lower cost -** than a non-covering index even if it is a proper subset. +** Case 2: +** +** (2a) X has the same or lower cost, or returns the same or fewer rows, +** than Y. +** (2b) X uses fewer WHERE clause terms than Y +** (2c) Every WHERE clause term used by X is also used by Y +** (2d) X skips at least as many columns as Y +** (2e) If X is a covering index, than Y is too */ static int whereLoopCheaperProperSubset( const WhereLoop *pX, /* First WhereLoop to compare */ const WhereLoop *pY /* Compare against this WhereLoop */ ){ int i, j; + if( pX->rRun>pY->rRun && pX->nOut>pY->nOut ) return 0; /* (1d) and (2a) */ + assert( (pX->wsFlags & WHERE_VIRTUALTABLE)==0 ); + assert( (pY->wsFlags & WHERE_VIRTUALTABLE)==0 ); + if( pX->u.btree.nEq < pY->u.btree.nEq /* (1b) */ + && pX->u.btree.pIndex==pY->u.btree.pIndex /* (1a) */ + && pX->nSkip==0 && pY->nSkip==0 /* (1c) */ + ){ + return 1; /* Case 1 is true */ + } if( pX->nLTerm-pX->nSkip >= pY->nLTerm-pY->nSkip ){ - return 0; /* X is not a subset of Y */ + return 0; /* (2b) */ } - if( pX->rRun>pY->rRun && pX->nOut>pY->nOut ) return 0; - if( pY->nSkip > pX->nSkip ) return 0; + if( pY->nSkip > pX->nSkip ) return 0; /* (2d) */ for(i=pX->nLTerm-1; i>=0; i--){ if( pX->aLTerm[i]==0 ) continue; for(j=pY->nLTerm-1; j>=0; j--){ if( pY->aLTerm[j]==pX->aLTerm[i] ) break; } - if( j<0 ) return 0; /* X not a subset of Y since term X[i] not used by Y */ + if( j<0 ) return 0; /* (2c) */ } if( (pX->wsFlags&WHERE_IDX_ONLY)!=0 && (pY->wsFlags&WHERE_IDX_ONLY)==0 ){ - return 0; /* Constraint (5) */ + return 0; /* (2e) */ } - return 1; /* All conditions meet */ + return 1; /* Case 2 is true */ } /* @@ -163171,7 +163743,10 @@ static int whereLoopAddBtreeIndex( assert( pNew->u.btree.nBtm==0 ); opMask = WO_EQ|WO_IN|WO_GT|WO_GE|WO_LT|WO_LE|WO_ISNULL|WO_IS; } - if( pProbe->bUnordered ) opMask &= ~(WO_GT|WO_GE|WO_LT|WO_LE); + if( pProbe->bUnordered || pProbe->bLowQual ){ + if( pProbe->bUnordered ) opMask &= ~(WO_GT|WO_GE|WO_LT|WO_LE); + if( pProbe->bLowQual ) opMask &= ~(WO_EQ|WO_IN|WO_IS); + } assert( pNew->u.btree.nEqnColumn ); assert( pNew->u.btree.nEqnKeyCol @@ -166059,6 +166634,20 @@ static SQLITE_NOINLINE void whereAddIndexedExpr( continue; } if( sqlite3ExprIsConstant(pExpr) ) continue; + if( pExpr->op==TK_FUNCTION ){ + /* Functions that might set a subtype should not be replaced by the + ** value taken from an expression index since the index omits the + ** subtype. https://sqlite.org/forum/forumpost/68d284c86b082c3e */ + int n; + FuncDef *pDef; + sqlite3 *db = pParse->db; + assert( ExprUseXList(pExpr) ); + n = pExpr->x.pList ? pExpr->x.pList->nExpr : 0; + pDef = sqlite3FindFunction(db, pExpr->u.zToken, n, ENC(db), 0); + if( pDef==0 || (pDef->funcFlags & SQLITE_RESULT_SUBTYPE)!=0 ){ + continue; + } + } p = sqlite3DbMallocRaw(pParse->db, sizeof(IndexedExpr)); if( p==0 ) break; p->pIENext = pParse->pIdxEpr; @@ -166237,7 +166826,10 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( /* An ORDER/GROUP BY clause of more than 63 terms cannot be optimized */ testcase( pOrderBy && pOrderBy->nExpr==BMS-1 ); - if( pOrderBy && pOrderBy->nExpr>=BMS ) pOrderBy = 0; + if( pOrderBy && pOrderBy->nExpr>=BMS ){ + pOrderBy = 0; + wctrlFlags &= ~WHERE_WANT_DISTINCT; + } /* The number of tables in the FROM clause is limited by the number of ** bits in a Bitmask @@ -166262,7 +166854,10 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( ** field (type Bitmask) it must be aligned on an 8-byte boundary on ** some architectures. Hence the ROUND8() below. */ - nByteWInfo = ROUND8P(sizeof(WhereInfo)+(nTabList-1)*sizeof(WhereLevel)); + nByteWInfo = ROUND8P(sizeof(WhereInfo)); + if( nTabList>1 ){ + nByteWInfo = ROUND8P(nByteWInfo + (nTabList-1)*sizeof(WhereLevel)); + } pWInfo = sqlite3DbMallocRawNN(db, nByteWInfo + sizeof(WhereLoop)); if( db->mallocFailed ){ sqlite3DbFree(db, pWInfo); @@ -166824,6 +167419,11 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( pParse->nQueryLoop = pWInfo->savedNQueryLoop; whereInfoFree(db, pWInfo); } +#ifdef WHERETRACE_ENABLED + /* Prevent harmless compiler warnings about debugging routines + ** being declared but never used */ + sqlite3ShowWhereLoopList(0); +#endif /* WHERETRACE_ENABLED */ return 0; } @@ -168241,7 +168841,7 @@ SQLITE_PRIVATE int sqlite3WindowRewrite(Parse *pParse, Select *p){ assert( ExprUseXList(pWin->pOwner) ); assert( pWin->pWFunc!=0 ); pArgs = pWin->pOwner->x.pList; - if( pWin->pWFunc->funcFlags & SQLITE_FUNC_SUBTYPE ){ + if( pWin->pWFunc->funcFlags & SQLITE_SUBTYPE ){ selectWindowRewriteEList(pParse, pMWin, pSrc, pArgs, pTab, &pSublist); pWin->iArgCol = (pSublist ? pSublist->nExpr : 0); pWin->bExprArgs = 1; @@ -179415,7 +180015,7 @@ SQLITE_PRIVATE int sqlite3CreateFunc( assert( SQLITE_FUNC_CONSTANT==SQLITE_DETERMINISTIC ); assert( SQLITE_FUNC_DIRECT==SQLITE_DIRECTONLY ); extraFlags = enc & (SQLITE_DETERMINISTIC|SQLITE_DIRECTONLY| - SQLITE_SUBTYPE|SQLITE_INNOCUOUS); + SQLITE_SUBTYPE|SQLITE_INNOCUOUS|SQLITE_RESULT_SUBTYPE); enc &= (SQLITE_FUNC_ENCMASK|SQLITE_ANY); /* The SQLITE_INNOCUOUS flag is the same bit as SQLITE_FUNC_UNSAFE. But @@ -182160,6 +182760,28 @@ SQLITE_API int sqlite3_test_control(int op, ...){ break; } #endif + + /* sqlite3_test_control(SQLITE_TESTCTRL_JSON_SELFCHECK, &onOff); + ** + ** Activate or deactivate validation of JSONB that is generated from + ** text. Off by default, as the validation is slow. Validation is + ** only available if compiled using SQLITE_DEBUG. + ** + ** If onOff is initially 1, then turn it on. If onOff is initially + ** off, turn it off. If onOff is initially -1, then change onOff + ** to be the current setting. + */ + case SQLITE_TESTCTRL_JSON_SELFCHECK: { +#if defined(SQLITE_DEBUG) && !defined(SQLITE_OMIT_WSD) + int *pOnOff = va_arg(ap, int*); + if( *pOnOff<0 ){ + *pOnOff = sqlite3Config.bJsonSelfcheck; + }else{ + sqlite3Config.bJsonSelfcheck = (u8)((*pOnOff)&0xff); + } +#endif + break; + } } va_end(ap); #endif /* SQLITE_UNTESTABLE */ @@ -184144,6 +184766,8 @@ SQLITE_PRIVATE int sqlite3FtsUnicodeIsdiacritic(int); SQLITE_PRIVATE int sqlite3Fts3ExprIterate(Fts3Expr*, int (*x)(Fts3Expr*,int,void*), void*); +SQLITE_PRIVATE int sqlite3Fts3IntegrityCheck(Fts3Table *p, int *pbOk); + #endif /* !SQLITE_CORE || SQLITE_ENABLE_FTS3 */ #endif /* _FTSINT_H */ @@ -187866,7 +188490,7 @@ static int fts3ShadowName(const char *zName){ ** Implementation of the xIntegrity() method on the FTS3/FTS4 virtual ** table. */ -static int fts3Integrity( +static int fts3IntegrityMethod( sqlite3_vtab *pVtab, /* The virtual table to be checked */ const char *zSchema, /* Name of schema in which pVtab lives */ const char *zTabname, /* Name of the pVTab table */ @@ -187874,30 +188498,21 @@ static int fts3Integrity( char **pzErr /* Write error message here */ ){ Fts3Table *p = (Fts3Table*)pVtab; - char *zSql; int rc; - char *zErr = 0; + int bOk = 0; - assert( pzErr!=0 ); - assert( *pzErr==0 ); UNUSED_PARAMETER(isQuick); - zSql = sqlite3_mprintf( - "INSERT INTO \"%w\".\"%w\"(\"%w\") VALUES('integrity-check');", - zSchema, zTabname, zTabname); - if( zSql==0 ){ - return SQLITE_NOMEM; - } - rc = sqlite3_exec(p->db, zSql, 0, 0, &zErr); - sqlite3_free(zSql); - if( (rc&0xff)==SQLITE_CORRUPT ){ - *pzErr = sqlite3_mprintf("malformed inverted index for FTS%d table %s.%s", - p->bFts4 ? 4 : 3, zSchema, zTabname); - }else if( rc!=SQLITE_OK ){ + rc = sqlite3Fts3IntegrityCheck(p, &bOk); + assert( rc!=SQLITE_CORRUPT_VTAB || bOk==0 ); + if( rc!=SQLITE_OK && rc!=SQLITE_CORRUPT_VTAB ){ *pzErr = sqlite3_mprintf("unable to validate the inverted index for" " FTS%d table %s.%s: %s", - p->bFts4 ? 4 : 3, zSchema, zTabname, zErr); + p->bFts4 ? 4 : 3, zSchema, zTabname, sqlite3_errstr(rc)); + }else if( bOk==0 ){ + *pzErr = sqlite3_mprintf("malformed inverted index for FTS%d table %s.%s", + p->bFts4 ? 4 : 3, zSchema, zTabname); } - sqlite3_free(zErr); + sqlite3Fts3SegmentsClose(p); return SQLITE_OK; } @@ -187928,7 +188543,7 @@ static const sqlite3_module fts3Module = { /* xRelease */ fts3ReleaseMethod, /* xRollbackTo */ fts3RollbackToMethod, /* xShadowName */ fts3ShadowName, - /* xIntegrity */ fts3Integrity, + /* xIntegrity */ fts3IntegrityMethod, }; /* @@ -199482,7 +200097,7 @@ static u64 fts3ChecksumIndex( ** If an error occurs (e.g. an OOM or IO error), return an SQLite error ** code. The final value of *pbOk is undefined in this case. */ -static int fts3IntegrityCheck(Fts3Table *p, int *pbOk){ +SQLITE_PRIVATE int sqlite3Fts3IntegrityCheck(Fts3Table *p, int *pbOk){ int rc = SQLITE_OK; /* Return code */ u64 cksum1 = 0; /* Checksum based on FTS index contents */ u64 cksum2 = 0; /* Checksum based on %_content contents */ @@ -199560,7 +200175,7 @@ static int fts3IntegrityCheck(Fts3Table *p, int *pbOk){ sqlite3_finalize(pStmt); } - *pbOk = (cksum1==cksum2); + *pbOk = (rc==SQLITE_OK && cksum1==cksum2); return rc; } @@ -199600,7 +200215,7 @@ static int fts3DoIntegrityCheck( ){ int rc; int bOk = 0; - rc = fts3IntegrityCheck(p, &bOk); + rc = sqlite3Fts3IntegrityCheck(p, &bOk); if( rc==SQLITE_OK && bOk==0 ) rc = FTS_CORRUPT_VTAB; return rc; } @@ -202574,24 +203189,145 @@ SQLITE_PRIVATE int sqlite3FtsUnicodeFold(int c, int eRemoveDiacritic){ ** ****************************************************************************** ** -** This SQLite JSON functions. +** SQLite JSON functions. ** ** This file began as an extension in ext/misc/json1.c in 2015. That ** extension proved so useful that it has now been moved into the core. ** -** For the time being, all JSON is stored as pure text. (We might add -** a JSONB type in the future which stores a binary encoding of JSON in -** a BLOB, but there is no support for JSONB in the current implementation. -** This implementation parses JSON text at 250 MB/s, so it is hard to see -** how JSONB might improve on that.) +** The original design stored all JSON as pure text, canonical RFC-8259. +** Support for JSON-5 extensions was added with version 3.42.0 (2023-05-16). +** All generated JSON text still conforms strictly to RFC-8259, but text +** with JSON-5 extensions is accepted as input. +** +** Beginning with version 3.45.0 (circa 2024-01-01), these routines also +** accept BLOB values that have JSON encoded using a binary representation +** called "JSONB". The name JSONB comes from PostgreSQL, however the on-disk +** format SQLite JSONB is completely different and incompatible with +** PostgreSQL JSONB. +** +** Decoding and interpreting JSONB is still O(N) where N is the size of +** the input, the same as text JSON. However, the constant of proportionality +** for JSONB is much smaller due to faster parsing. The size of each +** element in JSONB is encoded in its header, so there is no need to search +** for delimiters using persnickety syntax rules. JSONB seems to be about +** 3x faster than text JSON as a result. JSONB is also tends to be slightly +** smaller than text JSON, by 5% or 10%, but there are corner cases where +** JSONB can be slightly larger. So you are not far mistaken to say that +** a JSONB blob is the same size as the equivalent RFC-8259 text. +** +** +** THE JSONB ENCODING: +** +** Every JSON element is encoded in JSONB as a header and a payload. +** The header is between 1 and 9 bytes in size. The payload is zero +** or more bytes. +** +** The lower 4 bits of the first byte of the header determines the +** element type: +** +** 0: NULL +** 1: TRUE +** 2: FALSE +** 3: INT -- RFC-8259 integer literal +** 4: INT5 -- JSON5 integer literal +** 5: FLOAT -- RFC-8259 floating point literal +** 6: FLOAT5 -- JSON5 floating point literal +** 7: TEXT -- Text literal acceptable to both SQL and JSON +** 8: TEXTJ -- Text containing RFC-8259 escapes +** 9: TEXT5 -- Text containing JSON5 and/or RFC-8259 escapes +** 10: TEXTRAW -- Text containing unescaped syntax characters +** 11: ARRAY +** 12: OBJECT +** +** The other three possible values (13-15) are reserved for future +** enhancements. +** +** The upper 4 bits of the first byte determine the size of the header +** and sometimes also the size of the payload. If X is the first byte +** of the element and if X>>4 is between 0 and 11, then the payload +** will be that many bytes in size and the header is exactly one byte +** in size. Other four values for X>>4 (12-15) indicate that the header +** is more than one byte in size and that the payload size is determined +** by the remainder of the header, interpreted as a unsigned big-endian +** integer. +** +** Value of X>>4 Size integer Total header size +** ------------- -------------------- ----------------- +** 12 1 byte (0-255) 2 +** 13 2 byte (0-65535) 3 +** 14 4 byte (0-4294967295) 5 +** 15 8 byte (0-1.8e19) 9 +** +** The payload size need not be expressed in its minimal form. For example, +** if the payload size is 10, the size can be expressed in any of 5 different +** ways: (1) (X>>4)==10, (2) (X>>4)==12 following by on 0x0a byte, +** (3) (X>>4)==13 followed by 0x00 and 0x0a, (4) (X>>4)==14 followed by +** 0x00 0x00 0x00 0x0a, or (5) (X>>4)==15 followed by 7 bytes of 0x00 and +** a single byte of 0x0a. The shorter forms are preferred, of course, but +** sometimes when generating JSONB, the payload size is not known in advance +** and it is convenient to reserve sufficient header space to cover the +** largest possible payload size and then come back later and patch up +** the size when it becomes known, resulting in a non-minimal encoding. +** +** The value (X>>4)==15 is not actually used in the current implementation +** (as SQLite is currently unable handle BLOBs larger than about 2GB) +** but is included in the design to allow for future enhancements. +** +** The payload follows the header. NULL, TRUE, and FALSE have no payload and +** their payload size must always be zero. The payload for INT, INT5, +** FLOAT, FLOAT5, TEXT, TEXTJ, TEXT5, and TEXTROW is text. Note that the +** "..." or '...' delimiters are omitted from the various text encodings. +** The payload for ARRAY and OBJECT is a list of additional elements that +** are the content for the array or object. The payload for an OBJECT +** must be an even number of elements. The first element of each pair is +** the label and must be of type TEXT, TEXTJ, TEXT5, or TEXTRAW. +** +** A valid JSONB blob consists of a single element, as described above. +** Usually this will be an ARRAY or OBJECT element which has many more +** elements as its content. But the overall blob is just a single element. +** +** Input validation for JSONB blobs simply checks that the element type +** code is between 0 and 12 and that the total size of the element +** (header plus payload) is the same as the size of the BLOB. If those +** checks are true, the BLOB is assumed to be JSONB and processing continues. +** Errors are only raised if some other miscoding is discovered during +** processing. +** +** Additional information can be found in the doc/jsonb.md file of the +** canonical SQLite source tree. */ #ifndef SQLITE_OMIT_JSON /* #include "sqliteInt.h" */ +/* JSONB element types +*/ +#define JSONB_NULL 0 /* "null" */ +#define JSONB_TRUE 1 /* "true" */ +#define JSONB_FALSE 2 /* "false" */ +#define JSONB_INT 3 /* integer acceptable to JSON and SQL */ +#define JSONB_INT5 4 /* integer in 0x000 notation */ +#define JSONB_FLOAT 5 /* float acceptable to JSON and SQL */ +#define JSONB_FLOAT5 6 /* float with JSON5 extensions */ +#define JSONB_TEXT 7 /* Text compatible with both JSON and SQL */ +#define JSONB_TEXTJ 8 /* Text with JSON escapes */ +#define JSONB_TEXT5 9 /* Text with JSON-5 escape */ +#define JSONB_TEXTRAW 10 /* SQL text that needs escaping for JSON */ +#define JSONB_ARRAY 11 /* An array */ +#define JSONB_OBJECT 12 /* An object */ + +/* Human-readable names for the JSONB values. The index for each +** string must correspond to the JSONB_* integer above. +*/ +static const char * const jsonbType[] = { + "null", "true", "false", "integer", "integer", + "real", "real", "text", "text", "text", + "text", "array", "object", "", "", "", "" +}; + /* ** Growing our own isspace() routine this way is twice as fast as ** the library isspace() function, resulting in a 7% overall performance -** increase for the parser. (Ubuntu14.10 gcc 4.8.4 x64 with -Os). +** increase for the text-JSON parser. (Ubuntu14.10 gcc 4.8.4 x64 with -Os). */ static const char jsonIsSpace[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, @@ -202612,11 +203348,19 @@ static const char jsonIsSpace[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; -#define fast_isspace(x) (jsonIsSpace[(unsigned char)x]) +#define jsonIsspace(x) (jsonIsSpace[(unsigned char)x]) /* -** Characters that are special to JSON. Control charaters, -** '"' and '\\'. +** The set of all space characters recognized by jsonIsspace(). +** Useful as the second argument to strspn(). +*/ +static const char jsonSpaces[] = "\011\012\015\040"; + +/* +** Characters that are special to JSON. Control characters, +** '"' and '\\' and '\''. Actually, '\'' is not special to +** canonical JSON, but it is special in JSON-5, so we include +** it in the set of special characters. */ static const char jsonIsOk[256] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -202638,22 +203382,49 @@ static const char jsonIsOk[256] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; - -#if !defined(SQLITE_DEBUG) && !defined(SQLITE_COVERAGE_TEST) -# define VVA(X) -#else -# define VVA(X) X -#endif - /* Objects */ +typedef struct JsonCache JsonCache; typedef struct JsonString JsonString; -typedef struct JsonNode JsonNode; typedef struct JsonParse JsonParse; -typedef struct JsonCleanup JsonCleanup; + +/* +** Magic number used for the JSON parse cache in sqlite3_get_auxdata() +*/ +#define JSON_CACHE_ID (-429938) /* Cache entry */ +#define JSON_CACHE_SIZE 4 /* Max number of cache entries */ + +/* +** jsonUnescapeOneChar() returns this invalid code point if it encounters +** a syntax error. +*/ +#define JSON_INVALID_CHAR 0x99999 + +/* A cache mapping JSON text into JSONB blobs. +** +** Each cache entry is a JsonParse object with the following restrictions: +** +** * The bReadOnly flag must be set +** +** * The aBlob[] array must be owned by the JsonParse object. In other +** words, nBlobAlloc must be non-zero. +** +** * eEdit and delta must be zero. +** +** * zJson must be an RCStr. In other words bJsonIsRCStr must be true. +*/ +struct JsonCache { + sqlite3 *db; /* Database connection */ + int nUsed; /* Number of active entries in the cache */ + JsonParse *a[JSON_CACHE_SIZE]; /* One line for each cache entry */ +}; /* An instance of this object represents a JSON string ** under construction. Really, this is a generic string accumulator ** that can be and is used to create strings other than JSON. +** +** If the generated string is longer than will fit into the zSpace[] buffer, +** then it will be an RCStr string. This aids with caching of large +** JSON strings. */ struct JsonString { sqlite3_context *pCtx; /* Function context - put error messages here */ @@ -202661,121 +203432,75 @@ struct JsonString { u64 nAlloc; /* Bytes of storage available in zBuf[] */ u64 nUsed; /* Bytes of zBuf[] currently used */ u8 bStatic; /* True if zBuf is static space */ - u8 bErr; /* True if an error has been encountered */ + u8 eErr; /* True if an error has been encountered */ char zSpace[100]; /* Initial static space */ }; -/* A deferred cleanup task. A list of JsonCleanup objects might be -** run when the JsonParse object is destroyed. -*/ -struct JsonCleanup { - JsonCleanup *pJCNext; /* Next in a list */ - void (*xOp)(void*); /* Routine to run */ - void *pArg; /* Argument to xOp() */ -}; +/* Allowed values for JsonString.eErr */ +#define JSTRING_OOM 0x01 /* Out of memory */ +#define JSTRING_MALFORMED 0x02 /* Malformed JSONB */ +#define JSTRING_ERR 0x04 /* Error already sent to sqlite3_result */ -/* JSON type values +/* The "subtype" set for text JSON values passed through using +** sqlite3_result_subtype() and sqlite3_value_subtype(). */ -#define JSON_SUBST 0 /* Special edit node. Uses u.iPrev */ -#define JSON_NULL 1 -#define JSON_TRUE 2 -#define JSON_FALSE 3 -#define JSON_INT 4 -#define JSON_REAL 5 -#define JSON_STRING 6 -#define JSON_ARRAY 7 -#define JSON_OBJECT 8 - -/* The "subtype" set for JSON values */ #define JSON_SUBTYPE 74 /* Ascii for "J" */ /* -** Names of the various JSON types: +** Bit values for the flags passed into various SQL function implementations +** via the sqlite3_user_data() value. */ -static const char * const jsonType[] = { - "subst", - "null", "true", "false", "integer", "real", "text", "array", "object" -}; - -/* Bit values for the JsonNode.jnFlag field -*/ -#define JNODE_RAW 0x01 /* Content is raw, not JSON encoded */ -#define JNODE_ESCAPE 0x02 /* Content is text with \ escapes */ -#define JNODE_REMOVE 0x04 /* Do not output */ -#define JNODE_REPLACE 0x08 /* Target of a JSON_SUBST node */ -#define JNODE_APPEND 0x10 /* More ARRAY/OBJECT entries at u.iAppend */ -#define JNODE_LABEL 0x20 /* Is a label of an object */ -#define JNODE_JSON5 0x40 /* Node contains JSON5 enhancements */ - - -/* A single node of parsed JSON. An array of these nodes describes -** a parse of JSON + edits. -** -** Use the json_parse() SQL function (available when compiled with -** -DSQLITE_DEBUG) to see a dump of complete JsonParse objects, including -** a complete listing and decoding of the array of JsonNodes. -*/ -struct JsonNode { - u8 eType; /* One of the JSON_ type values */ - u8 jnFlags; /* JNODE flags */ - u8 eU; /* Which union element to use */ - u32 n; /* Bytes of content for INT, REAL or STRING - ** Number of sub-nodes for ARRAY and OBJECT - ** Node that SUBST applies to */ - union { - const char *zJContent; /* 1: Content for INT, REAL, and STRING */ - u32 iAppend; /* 2: More terms for ARRAY and OBJECT */ - u32 iKey; /* 3: Key for ARRAY objects in json_tree() */ - u32 iPrev; /* 4: Previous SUBST node, or 0 */ - } u; -}; +#define JSON_JSON 0x01 /* Result is always JSON */ +#define JSON_SQL 0x02 /* Result is always SQL */ +#define JSON_ABPATH 0x03 /* Allow abbreviated JSON path specs */ +#define JSON_ISSET 0x04 /* json_set(), not json_insert() */ +#define JSON_BLOB 0x08 /* Use the BLOB output format */ -/* A parsed and possibly edited JSON string. Lifecycle: -** -** 1. JSON comes in and is parsed into an array aNode[]. The original -** JSON text is stored in zJson. +/* A parsed JSON value. Lifecycle: ** -** 2. Zero or more changes are made (via json_remove() or json_replace() -** or similar) to the aNode[] array. +** 1. JSON comes in and is parsed into a JSONB value in aBlob. The +** original text is stored in zJson. This step is skipped if the +** input is JSONB instead of text JSON. ** -** 3. A new, edited and mimified JSON string is generated from aNode -** and stored in zAlt. The JsonParse object always owns zAlt. +** 2. The aBlob[] array is searched using the JSON path notation, if needed. ** -** Step 1 always happens. Step 2 and 3 may or may not happen, depending -** on the operation. +** 3. Zero or more changes are made to aBlob[] (via json_remove() or +** json_replace() or json_patch() or similar). ** -** aNode[].u.zJContent entries typically point into zJson. Hence zJson -** must remain valid for the lifespan of the parse. For edits, -** aNode[].u.zJContent might point to malloced space other than zJson. -** Entries in pClup are responsible for freeing that extra malloced space. -** -** When walking the parse tree in aNode[], edits are ignored if useMod is -** false. +** 4. New JSON text is generated from the aBlob[] for output. This step +** is skipped if the function is one of the jsonb_* functions that +** returns JSONB instead of text JSON. */ struct JsonParse { - u32 nNode; /* Number of slots of aNode[] used */ - u32 nAlloc; /* Number of slots of aNode[] allocated */ - JsonNode *aNode; /* Array of nodes containing the parse */ - char *zJson; /* Original JSON string (before edits) */ - char *zAlt; /* Revised and/or mimified JSON */ - u32 *aUp; /* Index of parent of each node */ - JsonCleanup *pClup;/* Cleanup operations prior to freeing this object */ + u8 *aBlob; /* JSONB representation of JSON value */ + u32 nBlob; /* Bytes of aBlob[] actually used */ + u32 nBlobAlloc; /* Bytes allocated to aBlob[]. 0 if aBlob is external */ + char *zJson; /* Json text used for parsing */ + sqlite3 *db; /* The database connection to which this object belongs */ + int nJson; /* Length of the zJson string in bytes */ + u32 nJPRef; /* Number of references to this object */ + u32 iErr; /* Error location in zJson[] */ u16 iDepth; /* Nesting depth */ u8 nErr; /* Number of errors seen */ u8 oom; /* Set to true if out of memory */ u8 bJsonIsRCStr; /* True if zJson is an RCStr */ u8 hasNonstd; /* True if input uses non-standard features like JSON5 */ - u8 useMod; /* Actually use the edits contain inside aNode */ - u8 hasMod; /* aNode contains edits from the original zJson */ - u32 nJPRef; /* Number of references to this object */ - int nJson; /* Length of the zJson string in bytes */ - int nAlt; /* Length of alternative JSON string zAlt, in bytes */ - u32 iErr; /* Error location in zJson[] */ - u32 iSubst; /* Last JSON_SUBST entry in aNode[] */ - u32 iHold; /* Age of this entry in the cache for LRU replacement */ + u8 bReadOnly; /* Do not modify. */ + /* Search and edit information. See jsonLookupStep() */ + u8 eEdit; /* Edit operation to apply */ + int delta; /* Size change due to the edit */ + u32 nIns; /* Number of bytes to insert */ + u32 iLabel; /* Location of label if search landed on an object value */ + u8 *aIns; /* Content to be inserted */ }; +/* Allowed values for JsonParse.eEdit */ +#define JEDIT_DEL 1 /* Delete if exists */ +#define JEDIT_REPL 2 /* Overwrite if exists */ +#define JEDIT_INS 3 /* Insert if not exists */ +#define JEDIT_SET 4 /* Insert or overwrite */ + /* ** Maximum nesting depth of JSON for this implementation. ** @@ -202783,15 +203508,151 @@ struct JsonParse { ** descent parser. A depth of 1000 is far deeper than any sane JSON ** should go. Historical note: This limit was 2000 prior to version 3.42.0 */ -#define JSON_MAX_DEPTH 1000 +#ifndef SQLITE_JSON_MAX_DEPTH +# define JSON_MAX_DEPTH 1000 +#else +# define JSON_MAX_DEPTH SQLITE_JSON_MAX_DEPTH +#endif + +/* +** Allowed values for the flgs argument to jsonParseFuncArg(); +*/ +#define JSON_EDITABLE 0x01 /* Generate a writable JsonParse object */ +#define JSON_KEEPERROR 0x02 /* Return non-NULL even if there is an error */ + +/************************************************************************** +** Forward references +**************************************************************************/ +static void jsonReturnStringAsBlob(JsonString*); +static int jsonFuncArgMightBeBinary(sqlite3_value *pJson); +static u32 jsonTranslateBlobToText(const JsonParse*,u32,JsonString*); +static void jsonReturnParse(sqlite3_context*,JsonParse*); +static JsonParse *jsonParseFuncArg(sqlite3_context*,sqlite3_value*,u32); +static void jsonParseFree(JsonParse*); +static u32 jsonbPayloadSize(const JsonParse*, u32, u32*); +static u32 jsonUnescapeOneChar(const char*, u32, u32*); + +/************************************************************************** +** Utility routines for dealing with JsonCache objects +**************************************************************************/ + +/* +** Free a JsonCache object. +*/ +static void jsonCacheDelete(JsonCache *p){ + int i; + for(i=0; inUsed; i++){ + jsonParseFree(p->a[i]); + } + sqlite3DbFree(p->db, p); +} +static void jsonCacheDeleteGeneric(void *p){ + jsonCacheDelete((JsonCache*)p); +} + +/* +** Insert a new entry into the cache. If the cache is full, expel +** the least recently used entry. Return SQLITE_OK on success or a +** result code otherwise. +** +** Cache entries are stored in age order, oldest first. +*/ +static int jsonCacheInsert( + sqlite3_context *ctx, /* The SQL statement context holding the cache */ + JsonParse *pParse /* The parse object to be added to the cache */ +){ + JsonCache *p; + + assert( pParse->zJson!=0 ); + assert( pParse->bJsonIsRCStr ); + assert( pParse->delta==0 ); + p = sqlite3_get_auxdata(ctx, JSON_CACHE_ID); + if( p==0 ){ + sqlite3 *db = sqlite3_context_db_handle(ctx); + p = sqlite3DbMallocZero(db, sizeof(*p)); + if( p==0 ) return SQLITE_NOMEM; + p->db = db; + sqlite3_set_auxdata(ctx, JSON_CACHE_ID, p, jsonCacheDeleteGeneric); + p = sqlite3_get_auxdata(ctx, JSON_CACHE_ID); + if( p==0 ) return SQLITE_NOMEM; + } + if( p->nUsed >= JSON_CACHE_SIZE ){ + jsonParseFree(p->a[0]); + memmove(p->a, &p->a[1], (JSON_CACHE_SIZE-1)*sizeof(p->a[0])); + p->nUsed = JSON_CACHE_SIZE-1; + } + assert( pParse->nBlobAlloc>0 ); + pParse->eEdit = 0; + pParse->nJPRef++; + pParse->bReadOnly = 1; + p->a[p->nUsed] = pParse; + p->nUsed++; + return SQLITE_OK; +} + +/* +** Search for a cached translation the json text supplied by pArg. Return +** the JsonParse object if found. Return NULL if not found. +** +** When a match if found, the matching entry is moved to become the +** most-recently used entry if it isn't so already. +** +** The JsonParse object returned still belongs to the Cache and might +** be deleted at any moment. If the caller whants the JsonParse to +** linger, it needs to increment the nPJRef reference counter. +*/ +static JsonParse *jsonCacheSearch( + sqlite3_context *ctx, /* The SQL statement context holding the cache */ + sqlite3_value *pArg /* Function argument containing SQL text */ +){ + JsonCache *p; + int i; + const char *zJson; + int nJson; + + if( sqlite3_value_type(pArg)!=SQLITE_TEXT ){ + return 0; + } + zJson = (const char*)sqlite3_value_text(pArg); + if( zJson==0 ) return 0; + nJson = sqlite3_value_bytes(pArg); + + p = sqlite3_get_auxdata(ctx, JSON_CACHE_ID); + if( p==0 ){ + return 0; + } + for(i=0; inUsed; i++){ + if( p->a[i]->zJson==zJson ) break; + } + if( i>=p->nUsed ){ + for(i=0; inUsed; i++){ + if( p->a[i]->nJson!=nJson ) continue; + if( memcmp(p->a[i]->zJson, zJson, nJson)==0 ) break; + } + } + if( inUsed ){ + if( inUsed-1 ){ + /* Make the matching entry the most recently used entry */ + JsonParse *tmp = p->a[i]; + memmove(&p->a[i], &p->a[i+1], (p->nUsed-i-1)*sizeof(tmp)); + p->a[p->nUsed-1] = tmp; + i = p->nUsed - 1; + } + assert( p->a[i]->delta==0 ); + return p->a[i]; + }else{ + return 0; + } +} /************************************************************************** ** Utility routines for dealing with JsonString objects **************************************************************************/ -/* Set the JsonString object to an empty string +/* Turn uninitialized bulk memory into a valid JsonString object +** holding a zero-length string. */ -static void jsonZero(JsonString *p){ +static void jsonStringZero(JsonString *p){ p->zBuf = p->zSpace; p->nAlloc = sizeof(p->zSpace); p->nUsed = 0; @@ -202800,39 +203661,39 @@ static void jsonZero(JsonString *p){ /* Initialize the JsonString object */ -static void jsonInit(JsonString *p, sqlite3_context *pCtx){ +static void jsonStringInit(JsonString *p, sqlite3_context *pCtx){ p->pCtx = pCtx; - p->bErr = 0; - jsonZero(p); + p->eErr = 0; + jsonStringZero(p); } /* Free all allocated memory and reset the JsonString object back to its ** initial state. */ -static void jsonReset(JsonString *p){ +static void jsonStringReset(JsonString *p){ if( !p->bStatic ) sqlite3RCStrUnref(p->zBuf); - jsonZero(p); + jsonStringZero(p); } /* Report an out-of-memory (OOM) condition */ -static void jsonOom(JsonString *p){ - p->bErr = 1; - sqlite3_result_error_nomem(p->pCtx); - jsonReset(p); +static void jsonStringOom(JsonString *p){ + p->eErr |= JSTRING_OOM; + if( p->pCtx ) sqlite3_result_error_nomem(p->pCtx); + jsonStringReset(p); } /* Enlarge pJson->zBuf so that it can hold at least N more bytes. ** Return zero on success. Return non-zero on an OOM error */ -static int jsonGrow(JsonString *p, u32 N){ +static int jsonStringGrow(JsonString *p, u32 N){ u64 nTotal = NnAlloc ? p->nAlloc*2 : p->nAlloc+N+10; char *zNew; if( p->bStatic ){ - if( p->bErr ) return 1; + if( p->eErr ) return 1; zNew = sqlite3RCStrNew(nTotal); if( zNew==0 ){ - jsonOom(p); + jsonStringOom(p); return SQLITE_NOMEM; } memcpy(zNew, p->zBuf, (size_t)p->nUsed); @@ -202841,8 +203702,8 @@ static int jsonGrow(JsonString *p, u32 N){ }else{ p->zBuf = sqlite3RCStrResize(p->zBuf, nTotal); if( p->zBuf==0 ){ - p->bErr = 1; - jsonZero(p); + p->eErr |= JSTRING_OOM; + jsonStringZero(p); return SQLITE_NOMEM; } } @@ -202852,20 +203713,20 @@ static int jsonGrow(JsonString *p, u32 N){ /* Append N bytes from zIn onto the end of the JsonString string. */ -static SQLITE_NOINLINE void jsonAppendExpand( +static SQLITE_NOINLINE void jsonStringExpandAndAppend( JsonString *p, const char *zIn, u32 N ){ assert( N>0 ); - if( jsonGrow(p,N) ) return; + if( jsonStringGrow(p,N) ) return; memcpy(p->zBuf+p->nUsed, zIn, N); p->nUsed += N; } static void jsonAppendRaw(JsonString *p, const char *zIn, u32 N){ if( N==0 ) return; if( N+p->nUsed >= p->nAlloc ){ - jsonAppendExpand(p,zIn,N); + jsonStringExpandAndAppend(p,zIn,N); }else{ memcpy(p->zBuf+p->nUsed, zIn, N); p->nUsed += N; @@ -202874,7 +203735,7 @@ static void jsonAppendRaw(JsonString *p, const char *zIn, u32 N){ static void jsonAppendRawNZ(JsonString *p, const char *zIn, u32 N){ assert( N>0 ); if( N+p->nUsed >= p->nAlloc ){ - jsonAppendExpand(p,zIn,N); + jsonStringExpandAndAppend(p,zIn,N); }else{ memcpy(p->zBuf+p->nUsed, zIn, N); p->nUsed += N; @@ -202886,7 +203747,7 @@ static void jsonAppendRawNZ(JsonString *p, const char *zIn, u32 N){ */ static void jsonPrintf(int N, JsonString *p, const char *zFormat, ...){ va_list ap; - if( (p->nUsed + N >= p->nAlloc) && jsonGrow(p, N) ) return; + if( (p->nUsed + N >= p->nAlloc) && jsonStringGrow(p, N) ) return; va_start(ap, zFormat); sqlite3_vsnprintf(N, p->zBuf+p->nUsed, zFormat, ap); va_end(ap); @@ -202896,7 +203757,7 @@ static void jsonPrintf(int N, JsonString *p, const char *zFormat, ...){ /* Append a single character */ static SQLITE_NOINLINE void jsonAppendCharExpand(JsonString *p, char c){ - if( jsonGrow(p,1) ) return; + if( jsonStringGrow(p,1) ) return; p->zBuf[p->nUsed++] = c; } static void jsonAppendChar(JsonString *p, char c){ @@ -202907,24 +203768,27 @@ static void jsonAppendChar(JsonString *p, char c){ } } -/* Try to force the string to be a zero-terminated RCStr string. +/* Remove a single character from the end of the string +*/ +static void jsonStringTrimOneChar(JsonString *p){ + if( p->eErr==0 ){ + assert( p->nUsed>0 ); + p->nUsed--; + } +} + + +/* Make sure there is a zero terminator on p->zBuf[] ** ** Return true on success. Return false if an OOM prevents this ** from happening. */ -static int jsonForceRCStr(JsonString *p){ +static int jsonStringTerminate(JsonString *p){ jsonAppendChar(p, 0); - if( p->bErr ) return 0; - p->nUsed--; - if( p->bStatic==0 ) return 1; - p->nAlloc = 0; - p->nUsed++; - jsonGrow(p, p->nUsed); - p->nUsed--; - return p->bStatic==0; + jsonStringTrimOneChar(p); + return p->eErr==0; } - /* Append a comma separator to the output buffer, if the previous ** character is not '[' or '{'. */ @@ -202937,21 +203801,66 @@ static void jsonAppendSeparator(JsonString *p){ } /* Append the N-byte string in zIn to the end of the JsonString string -** under construction. Enclose the string in "..." and escape -** any double-quotes or backslash characters contained within the +** under construction. Enclose the string in double-quotes ("...") and +** escape any double-quotes or backslash characters contained within the ** string. +** +** This routine is a high-runner. There is a measurable performance +** increase associated with unwinding the jsonIsOk[] loop. */ static void jsonAppendString(JsonString *p, const char *zIn, u32 N){ - u32 i; - if( zIn==0 || ((N+p->nUsed+2 >= p->nAlloc) && jsonGrow(p,N+2)!=0) ) return; + u32 k; + u8 c; + const u8 *z = (const u8*)zIn; + if( z==0 ) return; + if( (N+p->nUsed+2 >= p->nAlloc) && jsonStringGrow(p,N+2)!=0 ) return; p->zBuf[p->nUsed++] = '"'; - for(i=0; izBuf[p->nUsed++] = c; - }else if( c=='"' || c=='\\' ){ + while( 1 /*exit-by-break*/ ){ + k = 0; + /* The following while() is the 4-way unwound equivalent of + ** + ** while( k=N ){ + while( k=N ){ + if( k>0 ){ + memcpy(&p->zBuf[p->nUsed], z, k); + p->nUsed += k; + } + break; + } + if( k>0 ){ + memcpy(&p->zBuf[p->nUsed], z, k); + p->nUsed += k; + z += k; + N -= k; + } + c = z[0]; + if( c=='"' || c=='\\' ){ json_simple_escape: - if( (p->nUsed+N+3-i > p->nAlloc) && jsonGrow(p,N+3-i)!=0 ) return; + if( (p->nUsed+N+3 > p->nAlloc) && jsonStringGrow(p,N+3)!=0 ) return; p->zBuf[p->nUsed++] = '\\'; p->zBuf[p->nUsed++] = c; }else if( c=='\'' ){ @@ -202972,7 +203881,7 @@ static void jsonAppendString(JsonString *p, const char *zIn, u32 N){ c = aSpecial[c]; goto json_simple_escape; } - if( (p->nUsed+N+7+i > p->nAlloc) && jsonGrow(p,N+7-i)!=0 ) return; + if( (p->nUsed+N+7 > p->nAlloc) && jsonStringGrow(p,N+7)!=0 ) return; p->zBuf[p->nUsed++] = '\\'; p->zBuf[p->nUsed++] = 'u'; p->zBuf[p->nUsed++] = '0'; @@ -202980,140 +203889,18 @@ static void jsonAppendString(JsonString *p, const char *zIn, u32 N){ p->zBuf[p->nUsed++] = "0123456789abcdef"[c>>4]; p->zBuf[p->nUsed++] = "0123456789abcdef"[c&0xf]; } + z++; + N--; } p->zBuf[p->nUsed++] = '"'; assert( p->nUsednAlloc ); } /* -** The zIn[0..N] string is a JSON5 string literal. Append to p a translation -** of the string literal that standard JSON and that omits all JSON5 -** features. -*/ -static void jsonAppendNormalizedString(JsonString *p, const char *zIn, u32 N){ - u32 i; - jsonAppendChar(p, '"'); - zIn++; - N -= 2; - while( N>0 ){ - for(i=0; i0 ){ - jsonAppendRawNZ(p, zIn, i); - zIn += i; - N -= i; - if( N==0 ) break; - } - assert( zIn[0]=='\\' ); - switch( (u8)zIn[1] ){ - case '\'': - jsonAppendChar(p, '\''); - break; - case 'v': - jsonAppendRawNZ(p, "\\u0009", 6); - break; - case 'x': - jsonAppendRawNZ(p, "\\u00", 4); - jsonAppendRawNZ(p, &zIn[2], 2); - zIn += 2; - N -= 2; - break; - case '0': - jsonAppendRawNZ(p, "\\u0000", 6); - break; - case '\r': - if( zIn[2]=='\n' ){ - zIn++; - N--; - } - break; - case '\n': - break; - case 0xe2: - assert( N>=4 ); - assert( 0x80==(u8)zIn[2] ); - assert( 0xa8==(u8)zIn[3] || 0xa9==(u8)zIn[3] ); - zIn += 2; - N -= 2; - break; - default: - jsonAppendRawNZ(p, zIn, 2); - break; - } - zIn += 2; - N -= 2; - } - jsonAppendChar(p, '"'); -} - -/* -** The zIn[0..N] string is a JSON5 integer literal. Append to p a translation -** of the string literal that standard JSON and that omits all JSON5 -** features. -*/ -static void jsonAppendNormalizedInt(JsonString *p, const char *zIn, u32 N){ - if( zIn[0]=='+' ){ - zIn++; - N--; - }else if( zIn[0]=='-' ){ - jsonAppendChar(p, '-'); - zIn++; - N--; - } - if( zIn[0]=='0' && (zIn[1]=='x' || zIn[1]=='X') ){ - sqlite3_int64 i = 0; - int rc = sqlite3DecOrHexToI64(zIn, &i); - if( rc<=1 ){ - jsonPrintf(100,p,"%lld",i); - }else{ - assert( rc==2 ); - jsonAppendRawNZ(p, "9.0e999", 7); - } - return; - } - assert( N>0 ); - jsonAppendRawNZ(p, zIn, N); -} - -/* -** The zIn[0..N] string is a JSON5 real literal. Append to p a translation -** of the string literal that standard JSON and that omits all JSON5 -** features. -*/ -static void jsonAppendNormalizedReal(JsonString *p, const char *zIn, u32 N){ - u32 i; - if( zIn[0]=='+' ){ - zIn++; - N--; - }else if( zIn[0]=='-' ){ - jsonAppendChar(p, '-'); - zIn++; - N--; - } - if( zIn[0]=='.' ){ - jsonAppendChar(p, '0'); - } - for(i=0; i0 ){ - jsonAppendRawNZ(p, zIn, N); - } -} - - - -/* -** Append a function parameter value to the JSON string under -** construction. +** Append an sqlite3_value (such as a function parameter) to the JSON +** string under construction in p. */ -static void jsonAppendValue( +static void jsonAppendSqlValue( JsonString *p, /* Append to this JSON string */ sqlite3_value *pValue /* Value to append */ ){ @@ -203143,290 +203930,127 @@ static void jsonAppendValue( break; } default: { - if( p->bErr==0 ){ + if( jsonFuncArgMightBeBinary(pValue) ){ + JsonParse px; + memset(&px, 0, sizeof(px)); + px.aBlob = (u8*)sqlite3_value_blob(pValue); + px.nBlob = sqlite3_value_bytes(pValue); + jsonTranslateBlobToText(&px, 0, p); + }else if( p->eErr==0 ){ sqlite3_result_error(p->pCtx, "JSON cannot hold BLOB values", -1); - p->bErr = 2; - jsonReset(p); + p->eErr = JSTRING_ERR; + jsonStringReset(p); } break; } } } - -/* Make the JSON in p the result of the SQL function. +/* Make the text in p (which is probably a generated JSON text string) +** the result of the SQL function. +** +** The JsonString is reset. ** -** The JSON string is reset. +** If pParse and ctx are both non-NULL, then the SQL string in p is +** loaded into the zJson field of the pParse object as a RCStr and the +** pParse is added to the cache. */ -static void jsonResult(JsonString *p){ - if( p->bErr==0 ){ - if( p->bStatic ){ +static void jsonReturnString( + JsonString *p, /* String to return */ + JsonParse *pParse, /* JSONB source or NULL */ + sqlite3_context *ctx /* Where to cache */ +){ + assert( (pParse!=0)==(ctx!=0) ); + assert( ctx==0 || ctx==p->pCtx ); + if( p->eErr==0 ){ + int flags = SQLITE_PTR_TO_INT(sqlite3_user_data(p->pCtx)); + if( flags & JSON_BLOB ){ + jsonReturnStringAsBlob(p); + }else if( p->bStatic ){ sqlite3_result_text64(p->pCtx, p->zBuf, p->nUsed, SQLITE_TRANSIENT, SQLITE_UTF8); - }else if( jsonForceRCStr(p) ){ - sqlite3RCStrRef(p->zBuf); - sqlite3_result_text64(p->pCtx, p->zBuf, p->nUsed, + }else if( jsonStringTerminate(p) ){ + if( pParse && pParse->bJsonIsRCStr==0 && pParse->nBlobAlloc>0 ){ + int rc; + pParse->zJson = sqlite3RCStrRef(p->zBuf); + pParse->nJson = p->nUsed; + pParse->bJsonIsRCStr = 1; + rc = jsonCacheInsert(ctx, pParse); + if( rc==SQLITE_NOMEM ){ + sqlite3_result_error_nomem(ctx); + jsonStringReset(p); + return; + } + } + sqlite3_result_text64(p->pCtx, sqlite3RCStrRef(p->zBuf), p->nUsed, sqlite3RCStrUnref, SQLITE_UTF8); + }else{ + sqlite3_result_error_nomem(p->pCtx); } - } - if( p->bErr==1 ){ + }else if( p->eErr & JSTRING_OOM ){ sqlite3_result_error_nomem(p->pCtx); + }else if( p->eErr & JSTRING_MALFORMED ){ + sqlite3_result_error(p->pCtx, "malformed JSON", -1); } - jsonReset(p); + jsonStringReset(p); } /************************************************************************** -** Utility routines for dealing with JsonNode and JsonParse objects +** Utility routines for dealing with JsonParse objects **************************************************************************/ -/* -** Return the number of consecutive JsonNode slots need to represent -** the parsed JSON at pNode. The minimum answer is 1. For ARRAY and -** OBJECT types, the number might be larger. -** -** Appended elements are not counted. The value returned is the number -** by which the JsonNode counter should increment in order to go to the -** next peer value. -*/ -static u32 jsonNodeSize(JsonNode *pNode){ - return pNode->eType>=JSON_ARRAY ? pNode->n+1 : 1; -} - /* ** Reclaim all memory allocated by a JsonParse object. But do not ** delete the JsonParse object itself. */ static void jsonParseReset(JsonParse *pParse){ - while( pParse->pClup ){ - JsonCleanup *pTask = pParse->pClup; - pParse->pClup = pTask->pJCNext; - pTask->xOp(pTask->pArg); - sqlite3_free(pTask); - } assert( pParse->nJPRef<=1 ); - if( pParse->aNode ){ - sqlite3_free(pParse->aNode); - pParse->aNode = 0; - } - pParse->nNode = 0; - pParse->nAlloc = 0; - if( pParse->aUp ){ - sqlite3_free(pParse->aUp); - pParse->aUp = 0; - } if( pParse->bJsonIsRCStr ){ sqlite3RCStrUnref(pParse->zJson); pParse->zJson = 0; + pParse->nJson = 0; pParse->bJsonIsRCStr = 0; } - if( pParse->zAlt ){ - sqlite3RCStrUnref(pParse->zAlt); - pParse->zAlt = 0; + if( pParse->nBlobAlloc ){ + sqlite3DbFree(pParse->db, pParse->aBlob); + pParse->aBlob = 0; + pParse->nBlob = 0; + pParse->nBlobAlloc = 0; } } /* -** Free a JsonParse object that was obtained from sqlite3_malloc(). -** -** Note that destroying JsonParse might call sqlite3RCStrUnref() to -** destroy the zJson value. The RCStr object might recursively invoke -** JsonParse to destroy this pParse object again. Take care to ensure -** that this recursive destructor sequence terminates harmlessly. +** Decrement the reference count on the JsonParse object. When the +** count reaches zero, free the object. */ static void jsonParseFree(JsonParse *pParse){ - if( pParse->nJPRef>1 ){ - pParse->nJPRef--; - }else{ - jsonParseReset(pParse); - sqlite3_free(pParse); - } -} - -/* -** Add a cleanup task to the JsonParse object. -** -** If an OOM occurs, the cleanup operation happens immediately -** and this function returns SQLITE_NOMEM. -*/ -static int jsonParseAddCleanup( - JsonParse *pParse, /* Add the cleanup task to this parser */ - void(*xOp)(void*), /* The cleanup task */ - void *pArg /* Argument to the cleanup */ -){ - JsonCleanup *pTask = sqlite3_malloc64( sizeof(*pTask) ); - if( pTask==0 ){ - pParse->oom = 1; - xOp(pArg); - return SQLITE_ERROR; - } - pTask->pJCNext = pParse->pClup; - pParse->pClup = pTask; - pTask->xOp = xOp; - pTask->pArg = pArg; - return SQLITE_OK; -} - -/* -** Convert the JsonNode pNode into a pure JSON string and -** append to pOut. Subsubstructure is also included. Return -** the number of JsonNode objects that are encoded. -*/ -static void jsonRenderNode( - JsonParse *pParse, /* the complete parse of the JSON */ - JsonNode *pNode, /* The node to render */ - JsonString *pOut /* Write JSON here */ -){ - assert( pNode!=0 ); - while( (pNode->jnFlags & JNODE_REPLACE)!=0 && pParse->useMod ){ - u32 idx = (u32)(pNode - pParse->aNode); - u32 i = pParse->iSubst; - while( 1 /*exit-by-break*/ ){ - assert( inNode ); - assert( pParse->aNode[i].eType==JSON_SUBST ); - assert( pParse->aNode[i].eU==4 ); - assert( pParse->aNode[i].u.iPrevaNode[i].n==idx ){ - pNode = &pParse->aNode[i+1]; - break; - } - i = pParse->aNode[i].u.iPrev; - } - } - switch( pNode->eType ){ - default: { - assert( pNode->eType==JSON_NULL ); - jsonAppendRawNZ(pOut, "null", 4); - break; - } - case JSON_TRUE: { - jsonAppendRawNZ(pOut, "true", 4); - break; - } - case JSON_FALSE: { - jsonAppendRawNZ(pOut, "false", 5); - break; - } - case JSON_STRING: { - assert( pNode->eU==1 ); - if( pNode->jnFlags & JNODE_RAW ){ - if( pNode->jnFlags & JNODE_LABEL ){ - jsonAppendChar(pOut, '"'); - jsonAppendRaw(pOut, pNode->u.zJContent, pNode->n); - jsonAppendChar(pOut, '"'); - }else{ - jsonAppendString(pOut, pNode->u.zJContent, pNode->n); - } - }else if( pNode->jnFlags & JNODE_JSON5 ){ - jsonAppendNormalizedString(pOut, pNode->u.zJContent, pNode->n); - }else{ - assert( pNode->n>0 ); - jsonAppendRawNZ(pOut, pNode->u.zJContent, pNode->n); - } - break; - } - case JSON_REAL: { - assert( pNode->eU==1 ); - if( pNode->jnFlags & JNODE_JSON5 ){ - jsonAppendNormalizedReal(pOut, pNode->u.zJContent, pNode->n); - }else{ - assert( pNode->n>0 ); - jsonAppendRawNZ(pOut, pNode->u.zJContent, pNode->n); - } - break; - } - case JSON_INT: { - assert( pNode->eU==1 ); - if( pNode->jnFlags & JNODE_JSON5 ){ - jsonAppendNormalizedInt(pOut, pNode->u.zJContent, pNode->n); - }else{ - assert( pNode->n>0 ); - jsonAppendRawNZ(pOut, pNode->u.zJContent, pNode->n); - } - break; - } - case JSON_ARRAY: { - u32 j = 1; - jsonAppendChar(pOut, '['); - for(;;){ - while( j<=pNode->n ){ - if( (pNode[j].jnFlags & JNODE_REMOVE)==0 || pParse->useMod==0 ){ - jsonAppendSeparator(pOut); - jsonRenderNode(pParse, &pNode[j], pOut); - } - j += jsonNodeSize(&pNode[j]); - } - if( (pNode->jnFlags & JNODE_APPEND)==0 ) break; - if( pParse->useMod==0 ) break; - assert( pNode->eU==2 ); - pNode = &pParse->aNode[pNode->u.iAppend]; - j = 1; - } - jsonAppendChar(pOut, ']'); - break; - } - case JSON_OBJECT: { - u32 j = 1; - jsonAppendChar(pOut, '{'); - for(;;){ - while( j<=pNode->n ){ - if( (pNode[j+1].jnFlags & JNODE_REMOVE)==0 || pParse->useMod==0 ){ - jsonAppendSeparator(pOut); - jsonRenderNode(pParse, &pNode[j], pOut); - jsonAppendChar(pOut, ':'); - jsonRenderNode(pParse, &pNode[j+1], pOut); - } - j += 1 + jsonNodeSize(&pNode[j+1]); - } - if( (pNode->jnFlags & JNODE_APPEND)==0 ) break; - if( pParse->useMod==0 ) break; - assert( pNode->eU==2 ); - pNode = &pParse->aNode[pNode->u.iAppend]; - j = 1; - } - jsonAppendChar(pOut, '}'); - break; + if( pParse ){ + if( pParse->nJPRef>1 ){ + pParse->nJPRef--; + }else{ + jsonParseReset(pParse); + sqlite3DbFree(pParse->db, pParse); } } } -/* -** Return a JsonNode and all its descendants as a JSON string. -*/ -static void jsonReturnJson( - JsonParse *pParse, /* The complete JSON */ - JsonNode *pNode, /* Node to return */ - sqlite3_context *pCtx, /* Return value for this function */ - int bGenerateAlt /* Also store the rendered text in zAlt */ -){ - JsonString s; - if( pParse->oom ){ - sqlite3_result_error_nomem(pCtx); - return; - } - if( pParse->nErr==0 ){ - jsonInit(&s, pCtx); - jsonRenderNode(pParse, pNode, &s); - if( bGenerateAlt && pParse->zAlt==0 && jsonForceRCStr(&s) ){ - pParse->zAlt = sqlite3RCStrRef(s.zBuf); - pParse->nAlt = s.nUsed; - } - jsonResult(&s); - sqlite3_result_subtype(pCtx, JSON_SUBTYPE); - } -} +/************************************************************************** +** Utility routines for the JSON text parser +**************************************************************************/ /* ** Translate a single byte of Hex into an integer. -** This routine only works if h really is a valid hexadecimal -** character: 0..9a..fA..F +** This routine only gives a correct answer if h really is a valid hexadecimal +** character: 0..9a..fA..F. But unlike sqlite3HexToInt(), it does not +** assert() if the digit is not hex. */ static u8 jsonHexToInt(int h){ - assert( (h>='0' && h<='9') || (h>='a' && h<='f') || (h>='A' && h<='F') ); +#ifdef SQLITE_ASCII + h += 9*(1&(h>>6)); +#endif #ifdef SQLITE_EBCDIC h += 9*(1&~(h>>4)); -#else - h += 9*(1&(h>>6)); #endif return (u8)(h & 0xf); } @@ -203436,10 +204060,6 @@ static u8 jsonHexToInt(int h){ */ static u32 jsonHexToInt4(const char *z){ u32 v; - assert( sqlite3Isxdigit(z[0]) ); - assert( sqlite3Isxdigit(z[1]) ); - assert( sqlite3Isxdigit(z[2]) ); - assert( sqlite3Isxdigit(z[3]) ); v = (jsonHexToInt(z[0])<<12) + (jsonHexToInt(z[1])<<8) + (jsonHexToInt(z[2])<<4) @@ -203447,281 +204067,6 @@ static u32 jsonHexToInt4(const char *z){ return v; } -/* -** Make the JsonNode the return value of the function. -*/ -static void jsonReturn( - JsonParse *pParse, /* Complete JSON parse tree */ - JsonNode *pNode, /* Node to return */ - sqlite3_context *pCtx /* Return value for this function */ -){ - switch( pNode->eType ){ - default: { - assert( pNode->eType==JSON_NULL ); - sqlite3_result_null(pCtx); - break; - } - case JSON_TRUE: { - sqlite3_result_int(pCtx, 1); - break; - } - case JSON_FALSE: { - sqlite3_result_int(pCtx, 0); - break; - } - case JSON_INT: { - sqlite3_int64 i = 0; - int rc; - int bNeg = 0; - const char *z; - - assert( pNode->eU==1 ); - z = pNode->u.zJContent; - if( z[0]=='-' ){ z++; bNeg = 1; } - else if( z[0]=='+' ){ z++; } - rc = sqlite3DecOrHexToI64(z, &i); - if( rc<=1 ){ - sqlite3_result_int64(pCtx, bNeg ? -i : i); - }else if( rc==3 && bNeg ){ - sqlite3_result_int64(pCtx, SMALLEST_INT64); - }else{ - goto to_double; - } - break; - } - case JSON_REAL: { - double r; - const char *z; - assert( pNode->eU==1 ); - to_double: - z = pNode->u.zJContent; - sqlite3AtoF(z, &r, sqlite3Strlen30(z), SQLITE_UTF8); - sqlite3_result_double(pCtx, r); - break; - } - case JSON_STRING: { - if( pNode->jnFlags & JNODE_RAW ){ - assert( pNode->eU==1 ); - sqlite3_result_text(pCtx, pNode->u.zJContent, pNode->n, - SQLITE_TRANSIENT); - }else if( (pNode->jnFlags & JNODE_ESCAPE)==0 ){ - /* JSON formatted without any backslash-escapes */ - assert( pNode->eU==1 ); - sqlite3_result_text(pCtx, pNode->u.zJContent+1, pNode->n-2, - SQLITE_TRANSIENT); - }else{ - /* Translate JSON formatted string into raw text */ - u32 i; - u32 n = pNode->n; - const char *z; - char *zOut; - u32 j; - u32 nOut = n; - assert( pNode->eU==1 ); - z = pNode->u.zJContent; - zOut = sqlite3_malloc( nOut+1 ); - if( zOut==0 ){ - sqlite3_result_error_nomem(pCtx); - break; - } - for(i=1, j=0; i>6)); - zOut[j++] = 0x80 | (v&0x3f); - }else{ - u32 vlo; - if( (v&0xfc00)==0xd800 - && i>18); - zOut[j++] = 0x80 | ((v>>12)&0x3f); - zOut[j++] = 0x80 | ((v>>6)&0x3f); - zOut[j++] = 0x80 | (v&0x3f); - }else{ - zOut[j++] = 0xe0 | (v>>12); - zOut[j++] = 0x80 | ((v>>6)&0x3f); - zOut[j++] = 0x80 | (v&0x3f); - } - } - continue; - }else if( c=='b' ){ - c = '\b'; - }else if( c=='f' ){ - c = '\f'; - }else if( c=='n' ){ - c = '\n'; - }else if( c=='r' ){ - c = '\r'; - }else if( c=='t' ){ - c = '\t'; - }else if( c=='v' ){ - c = '\v'; - }else if( c=='\'' || c=='"' || c=='/' || c=='\\' ){ - /* pass through unchanged */ - }else if( c=='0' ){ - c = 0; - }else if( c=='x' ){ - c = (jsonHexToInt(z[i+1])<<4) | jsonHexToInt(z[i+2]); - i += 2; - }else if( c=='\r' && z[i+1]=='\n' ){ - i++; - continue; - }else if( 0xe2==(u8)c ){ - assert( 0x80==(u8)z[i+1] ); - assert( 0xa8==(u8)z[i+2] || 0xa9==(u8)z[i+2] ); - i += 2; - continue; - }else{ - continue; - } - } /* end if( c=='\\' ) */ - zOut[j++] = c; - } /* end for() */ - zOut[j] = 0; - sqlite3_result_text(pCtx, zOut, j, sqlite3_free); - } - break; - } - case JSON_ARRAY: - case JSON_OBJECT: { - jsonReturnJson(pParse, pNode, pCtx, 0); - break; - } - } -} - -/* Forward reference */ -static int jsonParseAddNode(JsonParse*,u32,u32,const char*); - -/* -** A macro to hint to the compiler that a function should not be -** inlined. -*/ -#if defined(__GNUC__) -# define JSON_NOINLINE __attribute__((noinline)) -#elif defined(_MSC_VER) && _MSC_VER>=1310 -# define JSON_NOINLINE __declspec(noinline) -#else -# define JSON_NOINLINE -#endif - - -/* -** Add a single node to pParse->aNode after first expanding the -** size of the aNode array. Return the index of the new node. -** -** If an OOM error occurs, set pParse->oom and return -1. -*/ -static JSON_NOINLINE int jsonParseAddNodeExpand( - JsonParse *pParse, /* Append the node to this object */ - u32 eType, /* Node type */ - u32 n, /* Content size or sub-node count */ - const char *zContent /* Content */ -){ - u32 nNew; - JsonNode *pNew; - assert( pParse->nNode>=pParse->nAlloc ); - if( pParse->oom ) return -1; - nNew = pParse->nAlloc*2 + 10; - pNew = sqlite3_realloc64(pParse->aNode, sizeof(JsonNode)*nNew); - if( pNew==0 ){ - pParse->oom = 1; - return -1; - } - pParse->nAlloc = sqlite3_msize(pNew)/sizeof(JsonNode); - pParse->aNode = pNew; - assert( pParse->nNodenAlloc ); - return jsonParseAddNode(pParse, eType, n, zContent); -} - -/* -** Create a new JsonNode instance based on the arguments and append that -** instance to the JsonParse. Return the index in pParse->aNode[] of the -** new node, or -1 if a memory allocation fails. -*/ -static int jsonParseAddNode( - JsonParse *pParse, /* Append the node to this object */ - u32 eType, /* Node type */ - u32 n, /* Content size or sub-node count */ - const char *zContent /* Content */ -){ - JsonNode *p; - assert( pParse->aNode!=0 || pParse->nNode>=pParse->nAlloc ); - if( pParse->nNode>=pParse->nAlloc ){ - return jsonParseAddNodeExpand(pParse, eType, n, zContent); - } - assert( pParse->aNode!=0 ); - p = &pParse->aNode[pParse->nNode]; - assert( p!=0 ); - p->eType = (u8)(eType & 0xff); - p->jnFlags = (u8)(eType >> 8); - VVA( p->eU = zContent ? 1 : 0 ); - p->n = n; - p->u.zJContent = zContent; - return pParse->nNode++; -} - -/* -** Add an array of new nodes to the current pParse->aNode array. -** Return the index of the first node added. -** -** If an OOM error occurs, set pParse->oom. -*/ -static void jsonParseAddNodeArray( - JsonParse *pParse, /* Append the node to this object */ - JsonNode *aNode, /* Array of nodes to add */ - u32 nNode /* Number of elements in aNew */ -){ - assert( aNode!=0 ); - assert( nNode>=1 ); - if( pParse->nNode + nNode > pParse->nAlloc ){ - u32 nNew = pParse->nNode + nNode; - JsonNode *aNew = sqlite3_realloc64(pParse->aNode, nNew*sizeof(JsonNode)); - if( aNew==0 ){ - pParse->oom = 1; - return; - } - pParse->nAlloc = sqlite3_msize(aNew)/sizeof(JsonNode); - pParse->aNode = aNew; - } - memcpy(&pParse->aNode[pParse->nNode], aNode, nNode*sizeof(JsonNode)); - pParse->nNode += nNode; -} - -/* -** Add a new JSON_SUBST node. The node immediately following -** this new node will be the substitute content for iNode. -*/ -static int jsonParseAddSubstNode( - JsonParse *pParse, /* Add the JSON_SUBST here */ - u32 iNode /* References this node */ -){ - int idx = jsonParseAddNode(pParse, JSON_SUBST, iNode, 0); - if( pParse->oom ) return -1; - pParse->aNode[iNode].jnFlags |= JNODE_REPLACE; - pParse->aNode[idx].eU = 4; - pParse->aNode[idx].u.iPrev = pParse->iSubst; - pParse->iSubst = idx; - pParse->hasMod = 1; - pParse->useMod = 1; - return idx; -} - /* ** Return true if z[] begins with 2 (or more) hexadecimal digits */ @@ -203875,63 +204220,500 @@ static const struct NanInfName { char *zMatch; char *zRepl; } aNanInfName[] = { - { 'i', 'I', 3, JSON_REAL, 7, "inf", "9.0e999" }, - { 'i', 'I', 8, JSON_REAL, 7, "infinity", "9.0e999" }, - { 'n', 'N', 3, JSON_NULL, 4, "NaN", "null" }, - { 'q', 'Q', 4, JSON_NULL, 4, "QNaN", "null" }, - { 's', 'S', 4, JSON_NULL, 4, "SNaN", "null" }, + { 'i', 'I', 3, JSONB_FLOAT, 7, "inf", "9.0e999" }, + { 'i', 'I', 8, JSONB_FLOAT, 7, "infinity", "9.0e999" }, + { 'n', 'N', 3, JSONB_NULL, 4, "NaN", "null" }, + { 'q', 'Q', 4, JSONB_NULL, 4, "QNaN", "null" }, + { 's', 'S', 4, JSONB_NULL, 4, "SNaN", "null" }, }; + /* -** Parse a single JSON value which begins at pParse->zJson[i]. Return the -** index of the first character past the end of the value parsed. +** Report the wrong number of arguments for json_insert(), json_replace() +** or json_set(). +*/ +static void jsonWrongNumArgs( + sqlite3_context *pCtx, + const char *zFuncName +){ + char *zMsg = sqlite3_mprintf("json_%s() needs an odd number of arguments", + zFuncName); + sqlite3_result_error(pCtx, zMsg, -1); + sqlite3_free(zMsg); +} + +/**************************************************************************** +** Utility routines for dealing with the binary BLOB representation of JSON +****************************************************************************/ + +/* +** Expand pParse->aBlob so that it holds at least N bytes. ** -** Special return values: +** Return the number of errors. +*/ +static int jsonBlobExpand(JsonParse *pParse, u32 N){ + u8 *aNew; + u32 t; + assert( N>pParse->nBlobAlloc ); + if( pParse->nBlobAlloc==0 ){ + t = 100; + }else{ + t = pParse->nBlobAlloc*2; + } + if( tdb, pParse->aBlob, t); + if( aNew==0 ){ pParse->oom = 1; return 1; } + pParse->aBlob = aNew; + pParse->nBlobAlloc = t; + return 0; +} + +/* +** If pParse->aBlob is not previously editable (because it is taken +** from sqlite3_value_blob(), as indicated by the fact that +** pParse->nBlobAlloc==0 and pParse->nBlob>0) then make it editable +** by making a copy into space obtained from malloc. +** +** Return true on success. Return false on OOM. +*/ +static int jsonBlobMakeEditable(JsonParse *pParse, u32 nExtra){ + u8 *aOld; + u32 nSize; + assert( !pParse->bReadOnly ); + if( pParse->oom ) return 0; + if( pParse->nBlobAlloc>0 ) return 1; + aOld = pParse->aBlob; + nSize = pParse->nBlob + nExtra; + pParse->aBlob = 0; + if( jsonBlobExpand(pParse, nSize) ){ + return 0; + } + assert( pParse->nBlobAlloc >= pParse->nBlob + nExtra ); + memcpy(pParse->aBlob, aOld, pParse->nBlob); + return 1; +} + +/* Expand pParse->aBlob and append one bytes. +*/ +static SQLITE_NOINLINE void jsonBlobExpandAndAppendOneByte( + JsonParse *pParse, + u8 c +){ + jsonBlobExpand(pParse, pParse->nBlob+1); + if( pParse->oom==0 ){ + assert( pParse->nBlob+1<=pParse->nBlobAlloc ); + pParse->aBlob[pParse->nBlob++] = c; + } +} + +/* Append a single character. +*/ +static void jsonBlobAppendOneByte(JsonParse *pParse, u8 c){ + if( pParse->nBlob >= pParse->nBlobAlloc ){ + jsonBlobExpandAndAppendOneByte(pParse, c); + }else{ + pParse->aBlob[pParse->nBlob++] = c; + } +} + +/* Slow version of jsonBlobAppendNode() that first resizes the +** pParse->aBlob structure. +*/ +static void jsonBlobAppendNode(JsonParse*,u8,u32,const void*); +static SQLITE_NOINLINE void jsonBlobExpandAndAppendNode( + JsonParse *pParse, + u8 eType, + u32 szPayload, + const void *aPayload +){ + if( jsonBlobExpand(pParse, pParse->nBlob+szPayload+9) ) return; + jsonBlobAppendNode(pParse, eType, szPayload, aPayload); +} + + +/* Append an node type byte together with the payload size and +** possibly also the payload. +** +** If aPayload is not NULL, then it is a pointer to the payload which +** is also appended. If aPayload is NULL, the pParse->aBlob[] array +** is resized (if necessary) so that it is big enough to hold the +** payload, but the payload is not appended and pParse->nBlob is left +** pointing to where the first byte of payload will eventually be. +*/ +static void jsonBlobAppendNode( + JsonParse *pParse, /* The JsonParse object under construction */ + u8 eType, /* Node type. One of JSONB_* */ + u32 szPayload, /* Number of bytes of payload */ + const void *aPayload /* The payload. Might be NULL */ +){ + u8 *a; + if( pParse->nBlob+szPayload+9 > pParse->nBlobAlloc ){ + jsonBlobExpandAndAppendNode(pParse,eType,szPayload,aPayload); + return; + } + assert( pParse->aBlob!=0 ); + a = &pParse->aBlob[pParse->nBlob]; + if( szPayload<=11 ){ + a[0] = eType | (szPayload<<4); + pParse->nBlob += 1; + }else if( szPayload<=0xff ){ + a[0] = eType | 0xc0; + a[1] = szPayload & 0xff; + pParse->nBlob += 2; + }else if( szPayload<=0xffff ){ + a[0] = eType | 0xd0; + a[1] = (szPayload >> 8) & 0xff; + a[2] = szPayload & 0xff; + pParse->nBlob += 3; + }else{ + a[0] = eType | 0xe0; + a[1] = (szPayload >> 24) & 0xff; + a[2] = (szPayload >> 16) & 0xff; + a[3] = (szPayload >> 8) & 0xff; + a[4] = szPayload & 0xff; + pParse->nBlob += 5; + } + if( aPayload ){ + pParse->nBlob += szPayload; + memcpy(&pParse->aBlob[pParse->nBlob-szPayload], aPayload, szPayload); + } +} + +/* Change the payload size for the node at index i to be szPayload. +*/ +static int jsonBlobChangePayloadSize( + JsonParse *pParse, + u32 i, + u32 szPayload +){ + u8 *a; + u8 szType; + u8 nExtra; + u8 nNeeded; + int delta; + if( pParse->oom ) return 0; + a = &pParse->aBlob[i]; + szType = a[0]>>4; + if( szType<=11 ){ + nExtra = 0; + }else if( szType==12 ){ + nExtra = 1; + }else if( szType==13 ){ + nExtra = 2; + }else{ + nExtra = 4; + } + if( szPayload<=11 ){ + nNeeded = 0; + }else if( szPayload<=0xff ){ + nNeeded = 1; + }else if( szPayload<=0xffff ){ + nNeeded = 2; + }else{ + nNeeded = 4; + } + delta = nNeeded - nExtra; + if( delta ){ + u32 newSize = pParse->nBlob + delta; + if( delta>0 ){ + if( newSize>pParse->nBlobAlloc && jsonBlobExpand(pParse, newSize) ){ + return 0; /* OOM error. Error state recorded in pParse->oom. */ + } + a = &pParse->aBlob[i]; + memmove(&a[1+delta], &a[1], pParse->nBlob - (i+1)); + }else{ + memmove(&a[1], &a[1-delta], pParse->nBlob - (i+1-delta)); + } + pParse->nBlob = newSize; + } + if( nNeeded==0 ){ + a[0] = (a[0] & 0x0f) | (szPayload<<4); + }else if( nNeeded==1 ){ + a[0] = (a[0] & 0x0f) | 0xc0; + a[1] = szPayload & 0xff; + }else if( nNeeded==2 ){ + a[0] = (a[0] & 0x0f) | 0xd0; + a[1] = (szPayload >> 8) & 0xff; + a[2] = szPayload & 0xff; + }else{ + a[0] = (a[0] & 0x0f) | 0xe0; + a[1] = (szPayload >> 24) & 0xff; + a[2] = (szPayload >> 16) & 0xff; + a[3] = (szPayload >> 8) & 0xff; + a[4] = szPayload & 0xff; + } + return delta; +} + +/* +** If z[0] is 'u' and is followed by exactly 4 hexadecimal character, +** then set *pOp to JSONB_TEXTJ and return true. If not, do not make +** any changes to *pOp and return false. +*/ +static int jsonIs4HexB(const char *z, int *pOp){ + if( z[0]!='u' ) return 0; + if( !jsonIs4Hex(&z[1]) ) return 0; + *pOp = JSONB_TEXTJ; + return 1; +} + +/* +** Check a single element of the JSONB in pParse for validity. +** +** The element to be checked starts at offset i and must end at on the +** last byte before iEnd. +** +** Return 0 if everything is correct. Return the 1-based byte offset of the +** error if a problem is detected. (In other words, if the error is at offset +** 0, return 1). +*/ +static u32 jsonbValidityCheck( + const JsonParse *pParse, /* Input JSONB. Only aBlob and nBlob are used */ + u32 i, /* Start of element as pParse->aBlob[i] */ + u32 iEnd, /* One more than the last byte of the element */ + u32 iDepth /* Current nesting depth */ +){ + u32 n, sz, j, k; + const u8 *z; + u8 x; + if( iDepth>JSON_MAX_DEPTH ) return i+1; + sz = 0; + n = jsonbPayloadSize(pParse, i, &sz); + if( NEVER(n==0) ) return i+1; /* Checked by caller */ + if( NEVER(i+n+sz!=iEnd) ) return i+1; /* Checked by caller */ + z = pParse->aBlob; + x = z[i] & 0x0f; + switch( x ){ + case JSONB_NULL: + case JSONB_TRUE: + case JSONB_FALSE: { + return n+sz==1 ? 0 : i+1; + } + case JSONB_INT: { + if( sz<1 ) return i+1; + j = i+n; + if( z[j]=='-' ){ + j++; + if( sz<2 ) return i+1; + } + k = i+n+sz; + while( jk ) return j+1; + if( z[j+1]!='.' && z[j+1]!='e' && z[j+1]!='E' ) return j+1; + j++; + } + for(; j0 ) return j+1; + if( x==JSONB_FLOAT && (j==k-1 || !sqlite3Isdigit(z[j+1])) ){ + return j+1; + } + seen = 1; + continue; + } + if( z[j]=='e' || z[j]=='E' ){ + if( seen==2 ) return j+1; + if( j==k-1 ) return j+1; + if( z[j+1]=='+' || z[j+1]=='-' ){ + j++; + if( j==k-1 ) return j+1; + } + seen = 2; + continue; + } + return j+1; + } + if( seen==0 ) return i+1; + return 0; + } + case JSONB_TEXT: { + j = i+n; + k = j+sz; + while( j=k ){ + return j+1; + }else if( strchr("\"\\/bfnrt",z[j+1])!=0 ){ + j++; + }else if( z[j+1]=='u' ){ + if( j+5>=k ) return j+1; + if( !jsonIs4Hex((const char*)&z[j+2]) ) return j+1; + j++; + }else if( x!=JSONB_TEXT5 ){ + return j+1; + }else{ + u32 c = 0; + u32 szC = jsonUnescapeOneChar((const char*)&z[j], k-j, &c); + if( c==JSON_INVALID_CHAR ) return j+1; + j += szC - 1; + } + } + j++; + } + return 0; + } + case JSONB_TEXTRAW: { + return 0; + } + case JSONB_ARRAY: { + u32 sub; + j = i+n; + k = j+sz; + while( jk ) return j+1; + sub = jsonbValidityCheck(pParse, j, j+n+sz, iDepth+1); + if( sub ) return sub; + j += n + sz; + } + assert( j==k ); + return 0; + } + case JSONB_OBJECT: { + u32 cnt = 0; + u32 sub; + j = i+n; + k = j+sz; + while( jk ) return j+1; + if( (cnt & 1)==0 ){ + x = z[j] & 0x0f; + if( xJSONB_TEXTRAW ) return j+1; + } + sub = jsonbValidityCheck(pParse, j, j+n+sz, iDepth+1); + if( sub ) return sub; + cnt++; + j += n + sz; + } + assert( j==k ); + if( (cnt & 1)!=0 ) return j+1; + return 0; + } + default: { + return i+1; + } + } +} + +/* +** Translate a single element of JSON text at pParse->zJson[i] into +** its equivalent binary JSONB representation. Append the translation into +** pParse->aBlob[] beginning at pParse->nBlob. The size of +** pParse->aBlob[] is increased as necessary. +** +** Return the index of the first character past the end of the element parsed, +** or one of the following special result codes: ** ** 0 End of input -** -1 Syntax error -** -2 '}' seen -** -3 ']' seen -** -4 ',' seen -** -5 ':' seen +** -1 Syntax error or OOM +** -2 '}' seen \ +** -3 ']' seen \___ For these returns, pParse->iErr is set to +** -4 ',' seen / the index in zJson[] of the seen character +** -5 ':' seen / */ -static int jsonParseValue(JsonParse *pParse, u32 i){ +static int jsonTranslateTextToBlob(JsonParse *pParse, u32 i){ char c; u32 j; - int iThis; + u32 iThis, iStart; int x; - JsonNode *pNode; + u8 t; const char *z = pParse->zJson; json_parse_restart: switch( (u8)z[i] ){ case '{': { /* Parse object */ - iThis = jsonParseAddNode(pParse, JSON_OBJECT, 0, 0); - if( iThis<0 ) return -1; + iThis = pParse->nBlob; + jsonBlobAppendNode(pParse, JSONB_OBJECT, pParse->nJson-i, 0); if( ++pParse->iDepth > JSON_MAX_DEPTH ){ pParse->iErr = i; return -1; } + iStart = pParse->nBlob; for(j=i+1;;j++){ - u32 nNode = pParse->nNode; - x = jsonParseValue(pParse, j); + u32 iBlob = pParse->nBlob; + x = jsonTranslateTextToBlob(pParse, j); if( x<=0 ){ + int op; if( x==(-2) ){ j = pParse->iErr; - if( pParse->nNode!=(u32)iThis+1 ) pParse->hasNonstd = 1; + if( pParse->nBlob!=(u32)iStart ) pParse->hasNonstd = 1; break; } j += json5Whitespace(&z[j]); + op = JSONB_TEXT; if( sqlite3JsonId1(z[j]) - || (z[j]=='\\' && z[j+1]=='u' && jsonIs4Hex(&z[j+2])) + || (z[j]=='\\' && jsonIs4HexB(&z[j+1], &op)) ){ int k = j+1; while( (sqlite3JsonId2(z[k]) && json5Whitespace(&z[k])==0) - || (z[k]=='\\' && z[k+1]=='u' && jsonIs4Hex(&z[k+2])) + || (z[k]=='\\' && jsonIs4HexB(&z[k+1], &op)) ){ k++; } - jsonParseAddNode(pParse, JSON_STRING | (JNODE_RAW<<8), k-j, &z[j]); + assert( iBlob==pParse->nBlob ); + jsonBlobAppendNode(pParse, op, k-j, &z[j]); pParse->hasNonstd = 1; x = k; }else{ @@ -203940,24 +204722,24 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ } } if( pParse->oom ) return -1; - pNode = &pParse->aNode[nNode]; - if( pNode->eType!=JSON_STRING ){ + t = pParse->aBlob[iBlob] & 0x0f; + if( tJSONB_TEXTRAW ){ pParse->iErr = j; return -1; } - pNode->jnFlags |= JNODE_LABEL; j = x; if( z[j]==':' ){ j++; }else{ - if( fast_isspace(z[j]) ){ - do{ j++; }while( fast_isspace(z[j]) ); + if( jsonIsspace(z[j]) ){ + /* strspn() is not helpful here */ + do{ j++; }while( jsonIsspace(z[j]) ); if( z[j]==':' ){ j++; goto parse_object_value; } } - x = jsonParseValue(pParse, j); + x = jsonTranslateTextToBlob(pParse, j); if( x!=(-5) ){ if( x!=(-1) ) pParse->iErr = j; return -1; @@ -203965,7 +204747,7 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ j = pParse->iErr+1; } parse_object_value: - x = jsonParseValue(pParse, j); + x = jsonTranslateTextToBlob(pParse, j); if( x<=0 ){ if( x!=(-1) ) pParse->iErr = j; return -1; @@ -203976,15 +204758,15 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ }else if( z[j]=='}' ){ break; }else{ - if( fast_isspace(z[j]) ){ - do{ j++; }while( fast_isspace(z[j]) ); + if( jsonIsspace(z[j]) ){ + j += 1 + (u32)strspn(&z[j+1], jsonSpaces); if( z[j]==',' ){ continue; }else if( z[j]=='}' ){ break; } } - x = jsonParseValue(pParse, j); + x = jsonTranslateTextToBlob(pParse, j); if( x==(-4) ){ j = pParse->iErr; continue; @@ -203997,25 +204779,26 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ pParse->iErr = j; return -1; } - pParse->aNode[iThis].n = pParse->nNode - (u32)iThis - 1; + jsonBlobChangePayloadSize(pParse, iThis, pParse->nBlob - iStart); pParse->iDepth--; return j+1; } case '[': { /* Parse array */ - iThis = jsonParseAddNode(pParse, JSON_ARRAY, 0, 0); - if( iThis<0 ) return -1; + iThis = pParse->nBlob; + jsonBlobAppendNode(pParse, JSONB_ARRAY, pParse->nJson - i, 0); + iStart = pParse->nBlob; + if( pParse->oom ) return -1; if( ++pParse->iDepth > JSON_MAX_DEPTH ){ pParse->iErr = i; return -1; } - memset(&pParse->aNode[iThis].u, 0, sizeof(pParse->aNode[iThis].u)); for(j=i+1;;j++){ - x = jsonParseValue(pParse, j); + x = jsonTranslateTextToBlob(pParse, j); if( x<=0 ){ if( x==(-3) ){ j = pParse->iErr; - if( pParse->nNode!=(u32)iThis+1 ) pParse->hasNonstd = 1; + if( pParse->nBlob!=iStart ) pParse->hasNonstd = 1; break; } if( x!=(-1) ) pParse->iErr = j; @@ -204027,15 +204810,15 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ }else if( z[j]==']' ){ break; }else{ - if( fast_isspace(z[j]) ){ - do{ j++; }while( fast_isspace(z[j]) ); + if( jsonIsspace(z[j]) ){ + j += 1 + (u32)strspn(&z[j+1], jsonSpaces); if( z[j]==',' ){ continue; }else if( z[j]==']' ){ break; } } - x = jsonParseValue(pParse, j); + x = jsonTranslateTextToBlob(pParse, j); if( x==(-4) ){ j = pParse->iErr; continue; @@ -204048,23 +204831,33 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ pParse->iErr = j; return -1; } - pParse->aNode[iThis].n = pParse->nNode - (u32)iThis - 1; + jsonBlobChangePayloadSize(pParse, iThis, pParse->nBlob - iStart); pParse->iDepth--; return j+1; } case '\'': { - u8 jnFlags; + u8 opcode; char cDelim; pParse->hasNonstd = 1; - jnFlags = JNODE_JSON5; + opcode = JSONB_TEXT; goto parse_string; case '"': /* Parse string */ - jnFlags = 0; + opcode = JSONB_TEXT; parse_string: cDelim = z[i]; - for(j=i+1; 1; j++){ - if( jsonIsOk[(unsigned char)z[j]] ) continue; + j = i+1; + while( 1 /*exit-by-break*/ ){ + if( jsonIsOk[(u8)z[j]] ){ + if( !jsonIsOk[(u8)z[j+1]] ){ + j += 1; + }else if( !jsonIsOk[(u8)z[j+2]] ){ + j += 2; + }else{ + j += 3; + continue; + } + } c = z[j]; if( c==cDelim ){ break; @@ -204073,16 +204866,16 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ if( c=='"' || c=='\\' || c=='/' || c=='b' || c=='f' || c=='n' || c=='r' || c=='t' || (c=='u' && jsonIs4Hex(&z[j+1])) ){ - jnFlags |= JNODE_ESCAPE; + if( opcode==JSONB_TEXT ) opcode = JSONB_TEXTJ; }else if( c=='\'' || c=='0' || c=='v' || c=='\n' || (0xe2==(u8)c && 0x80==(u8)z[j+1] && (0xa8==(u8)z[j+2] || 0xa9==(u8)z[j+2])) || (c=='x' && jsonIs2Hex(&z[j+1])) ){ - jnFlags |= (JNODE_ESCAPE|JNODE_JSON5); + opcode = JSONB_TEXT5; pParse->hasNonstd = 1; }else if( c=='\r' ){ if( z[j+1]=='\n' ) j++; - jnFlags |= (JNODE_ESCAPE|JNODE_JSON5); + opcode = JSONB_TEXT5; pParse->hasNonstd = 1; }else{ pParse->iErr = j; @@ -204092,14 +204885,17 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ /* Control characters are not allowed in strings */ pParse->iErr = j; return -1; + }else if( c=='"' ){ + opcode = JSONB_TEXT5; } + j++; } - jsonParseAddNode(pParse, JSON_STRING | (jnFlags<<8), j+1-i, &z[i]); + jsonBlobAppendNode(pParse, opcode, j-1-i, &z[i+1]); return j+1; } case 't': { if( strncmp(z+i,"true",4)==0 && !sqlite3Isalnum(z[i+4]) ){ - jsonParseAddNode(pParse, JSON_TRUE, 0, 0); + jsonBlobAppendOneByte(pParse, JSONB_TRUE); return i+4; } pParse->iErr = i; @@ -204107,23 +204903,22 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ } case 'f': { if( strncmp(z+i,"false",5)==0 && !sqlite3Isalnum(z[i+5]) ){ - jsonParseAddNode(pParse, JSON_FALSE, 0, 0); + jsonBlobAppendOneByte(pParse, JSONB_FALSE); return i+5; } pParse->iErr = i; return -1; } case '+': { - u8 seenDP, seenE, jnFlags; + u8 seenE; pParse->hasNonstd = 1; - jnFlags = JNODE_JSON5; + t = 0x00; /* Bit 0x01: JSON5. Bit 0x02: FLOAT */ goto parse_number; case '.': if( sqlite3Isdigit(z[i+1]) ){ pParse->hasNonstd = 1; - jnFlags = JNODE_JSON5; + t = 0x03; /* Bit 0x01: JSON5. Bit 0x02: FLOAT */ seenE = 0; - seenDP = JSON_REAL; goto parse_number_2; } pParse->iErr = i; @@ -204140,9 +204935,8 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ case '8': case '9': /* Parse number */ - jnFlags = 0; + t = 0x00; /* Bit 0x01: JSON5. Bit 0x02: FLOAT */ parse_number: - seenDP = JSON_INT; seenE = 0; assert( '-' < '0' ); assert( '+' < '0' ); @@ -204152,9 +204946,9 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ if( c<='0' ){ if( c=='0' ){ if( (z[i+1]=='x' || z[i+1]=='X') && sqlite3Isxdigit(z[i+2]) ){ - assert( seenDP==JSON_INT ); + assert( t==0x00 ); pParse->hasNonstd = 1; - jnFlags |= JNODE_JSON5; + t = 0x01; for(j=i+3; sqlite3Isxdigit(z[j]); j++){} goto parse_number_finish; }else if( sqlite3Isdigit(z[i+1]) ){ @@ -204171,15 +204965,15 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ ){ pParse->hasNonstd = 1; if( z[i]=='-' ){ - jsonParseAddNode(pParse, JSON_REAL, 8, "-9.0e999"); + jsonBlobAppendNode(pParse, JSONB_FLOAT, 6, "-9e999"); }else{ - jsonParseAddNode(pParse, JSON_REAL, 7, "9.0e999"); + jsonBlobAppendNode(pParse, JSONB_FLOAT, 5, "9e999"); } return i + (sqlite3StrNICmp(&z[i+4],"inity",5)==0 ? 9 : 4); } if( z[i+1]=='.' ){ pParse->hasNonstd = 1; - jnFlags |= JNODE_JSON5; + t |= 0x01; goto parse_number_2; } pParse->iErr = i; @@ -204191,30 +204985,31 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ return -1; }else if( (z[i+2]=='x' || z[i+2]=='X') && sqlite3Isxdigit(z[i+3]) ){ pParse->hasNonstd = 1; - jnFlags |= JNODE_JSON5; + t |= 0x01; for(j=i+4; sqlite3Isxdigit(z[j]); j++){} goto parse_number_finish; } } } } + parse_number_2: for(j=i+1;; j++){ c = z[j]; if( sqlite3Isdigit(c) ) continue; if( c=='.' ){ - if( seenDP==JSON_REAL ){ + if( (t & 0x02)!=0 ){ pParse->iErr = j; return -1; } - seenDP = JSON_REAL; + t |= 0x02; continue; } if( c=='e' || c=='E' ){ if( z[j-1]<'0' ){ if( ALWAYS(z[j-1]=='.') && ALWAYS(j-2>=i) && sqlite3Isdigit(z[j-2]) ){ pParse->hasNonstd = 1; - jnFlags |= JNODE_JSON5; + t |= 0x01; }else{ pParse->iErr = j; return -1; @@ -204224,7 +205019,7 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ pParse->iErr = j; return -1; } - seenDP = JSON_REAL; + t |= 0x02; seenE = 1; c = z[j+1]; if( c=='+' || c=='-' ){ @@ -204242,14 +205037,18 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ if( z[j-1]<'0' ){ if( ALWAYS(z[j-1]=='.') && ALWAYS(j-2>=i) && sqlite3Isdigit(z[j-2]) ){ pParse->hasNonstd = 1; - jnFlags |= JNODE_JSON5; + t |= 0x01; }else{ pParse->iErr = j; return -1; } } parse_number_finish: - jsonParseAddNode(pParse, seenDP | (jnFlags<<8), j - i, &z[i]); + assert( JSONB_INT+0x01==JSONB_INT5 ); + assert( JSONB_FLOAT+0x01==JSONB_FLOAT5 ); + assert( JSONB_INT+0x02==JSONB_FLOAT ); + if( z[i]=='+' ) i++; + jsonBlobAppendNode(pParse, JSONB_INT+t, j-i, &z[i]); return j; } case '}': { @@ -204275,9 +205074,7 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ case 0x0a: case 0x0d: case 0x20: { - do{ - i++; - }while( fast_isspace(z[i]) ); + i += 1 + (u32)strspn(&z[i+1], jsonSpaces); goto json_parse_restart; } case 0x0b: @@ -204299,7 +205096,7 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ } case 'n': { if( strncmp(z+i,"null",4)==0 && !sqlite3Isalnum(z[i+4]) ){ - jsonParseAddNode(pParse, JSON_NULL, 0, 0); + jsonBlobAppendOneByte(pParse, JSONB_NULL); return i+4; } /* fall-through into the default case that checks for NaN */ @@ -204315,8 +205112,11 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ continue; } if( sqlite3Isalnum(z[i+nn]) ) continue; - jsonParseAddNode(pParse, aNanInfName[k].eType, - aNanInfName[k].nRepl, aNanInfName[k].zRepl); + if( aNanInfName[k].eType==JSONB_FLOAT ){ + jsonBlobAppendNode(pParse, JSONB_FLOAT, 5, "9e999"); + }else{ + jsonBlobAppendOneByte(pParse, JSONB_NULL); + } pParse->hasNonstd = 1; return i + nn; } @@ -204326,6 +205126,7 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ } /* End switch(z[i]) */ } + /* ** Parse a complete JSON string. Return 0 on success or non-zero if there ** are any errors. If an error occurs, free all memory held by pParse, @@ -204334,20 +205135,26 @@ static int jsonParseValue(JsonParse *pParse, u32 i){ ** pParse must be initialized to an empty parse object prior to calling ** this routine. */ -static int jsonParse( +static int jsonConvertTextToBlob( JsonParse *pParse, /* Initialize and fill this JsonParse object */ sqlite3_context *pCtx /* Report errors here */ ){ int i; const char *zJson = pParse->zJson; - i = jsonParseValue(pParse, 0); + i = jsonTranslateTextToBlob(pParse, 0); if( pParse->oom ) i = -1; if( i>0 ){ +#ifdef SQLITE_DEBUG assert( pParse->iDepth==0 ); - while( fast_isspace(zJson[i]) ) i++; + if( sqlite3Config.bJsonSelfcheck ){ + assert( jsonbValidityCheck(pParse, 0, pParse->nBlob, 0)==0 ); + } +#endif + while( jsonIsspace(zJson[i]) ) i++; if( zJson[i] ){ i += json5Whitespace(&zJson[i]); if( zJson[i] ){ + if( pCtx ) sqlite3_result_error(pCtx, "malformed JSON", -1); jsonParseReset(pParse); return 1; } @@ -204368,248 +205175,715 @@ static int jsonParse( return 0; } +/* +** The input string pStr is a well-formed JSON text string. Convert +** this into the JSONB format and make it the return value of the +** SQL function. +*/ +static void jsonReturnStringAsBlob(JsonString *pStr){ + JsonParse px; + memset(&px, 0, sizeof(px)); + jsonStringTerminate(pStr); + px.zJson = pStr->zBuf; + px.nJson = pStr->nUsed; + px.db = sqlite3_context_db_handle(pStr->pCtx); + (void)jsonTranslateTextToBlob(&px, 0); + if( px.oom ){ + sqlite3DbFree(px.db, px.aBlob); + sqlite3_result_error_nomem(pStr->pCtx); + }else{ + assert( px.nBlobAlloc>0 ); + assert( !px.bReadOnly ); + sqlite3_result_blob(pStr->pCtx, px.aBlob, px.nBlob, SQLITE_DYNAMIC); + } +} -/* Mark node i of pParse as being a child of iParent. Call recursively -** to fill in all the descendants of node i. +/* The byte at index i is a node type-code. This routine +** determines the payload size for that node and writes that +** payload size in to *pSz. It returns the offset from i to the +** beginning of the payload. Return 0 on error. */ -static void jsonParseFillInParentage(JsonParse *pParse, u32 i, u32 iParent){ - JsonNode *pNode = &pParse->aNode[i]; - u32 j; - pParse->aUp[i] = iParent; - switch( pNode->eType ){ - case JSON_ARRAY: { - for(j=1; j<=pNode->n; j += jsonNodeSize(pNode+j)){ - jsonParseFillInParentage(pParse, i+j, i); +static u32 jsonbPayloadSize(const JsonParse *pParse, u32 i, u32 *pSz){ + u8 x; + u32 sz; + u32 n; + if( NEVER(i>pParse->nBlob) ){ + *pSz = 0; + return 0; + } + x = pParse->aBlob[i]>>4; + if( x<=11 ){ + sz = x; + n = 1; + }else if( x==12 ){ + if( i+1>=pParse->nBlob ){ + *pSz = 0; + return 0; + } + sz = pParse->aBlob[i+1]; + n = 2; + }else if( x==13 ){ + if( i+2>=pParse->nBlob ){ + *pSz = 0; + return 0; + } + sz = (pParse->aBlob[i+1]<<8) + pParse->aBlob[i+2]; + n = 3; + }else if( x==14 ){ + if( i+4>=pParse->nBlob ){ + *pSz = 0; + return 0; + } + sz = ((u32)pParse->aBlob[i+1]<<24) + (pParse->aBlob[i+2]<<16) + + (pParse->aBlob[i+3]<<8) + pParse->aBlob[i+4]; + n = 5; + }else{ + if( i+8>=pParse->nBlob + || pParse->aBlob[i+1]!=0 + || pParse->aBlob[i+2]!=0 + || pParse->aBlob[i+3]!=0 + || pParse->aBlob[i+4]!=0 + ){ + *pSz = 0; + return 0; + } + sz = (pParse->aBlob[i+5]<<24) + (pParse->aBlob[i+6]<<16) + + (pParse->aBlob[i+7]<<8) + pParse->aBlob[i+8]; + n = 9; + } + if( (i64)i+sz+n > pParse->nBlob + && (i64)i+sz+n > pParse->nBlob-pParse->delta + ){ + sz = 0; + n = 0; + } + *pSz = sz; + return n; +} + + +/* +** Translate the binary JSONB representation of JSON beginning at +** pParse->aBlob[i] into a JSON text string. Append the JSON +** text onto the end of pOut. Return the index in pParse->aBlob[] +** of the first byte past the end of the element that is translated. +** +** If an error is detected in the BLOB input, the pOut->eErr flag +** might get set to JSTRING_MALFORMED. But not all BLOB input errors +** are detected. So a malformed JSONB input might either result +** in an error, or in incorrect JSON. +** +** The pOut->eErr JSTRING_OOM flag is set on a OOM. +*/ +static u32 jsonTranslateBlobToText( + const JsonParse *pParse, /* the complete parse of the JSON */ + u32 i, /* Start rendering at this index */ + JsonString *pOut /* Write JSON here */ +){ + u32 sz, n, j, iEnd; + + n = jsonbPayloadSize(pParse, i, &sz); + if( n==0 ){ + pOut->eErr |= JSTRING_MALFORMED; + return pParse->nBlob+1; + } + switch( pParse->aBlob[i] & 0x0f ){ + case JSONB_NULL: { + jsonAppendRawNZ(pOut, "null", 4); + return i+1; + } + case JSONB_TRUE: { + jsonAppendRawNZ(pOut, "true", 4); + return i+1; + } + case JSONB_FALSE: { + jsonAppendRawNZ(pOut, "false", 5); + return i+1; + } + case JSONB_INT: + case JSONB_FLOAT: { + if( sz==0 ) goto malformed_jsonb; + jsonAppendRaw(pOut, (const char*)&pParse->aBlob[i+n], sz); + break; + } + case JSONB_INT5: { /* Integer literal in hexadecimal notation */ + u32 k = 2; + sqlite3_uint64 u = 0; + const char *zIn = (const char*)&pParse->aBlob[i+n]; + int bOverflow = 0; + if( sz==0 ) goto malformed_jsonb; + if( zIn[0]=='-' ){ + jsonAppendChar(pOut, '-'); + k++; + }else if( zIn[0]=='+' ){ + k++; + } + for(; keErr |= JSTRING_MALFORMED; + break; + }else if( (u>>60)!=0 ){ + bOverflow = 1; + }else{ + u = u*16 + sqlite3HexToInt(zIn[k]); + } + } + jsonPrintf(100,pOut,bOverflow?"9.0e999":"%llu", u); + break; + } + case JSONB_FLOAT5: { /* Float literal missing digits beside "." */ + u32 k = 0; + const char *zIn = (const char*)&pParse->aBlob[i+n]; + if( sz==0 ) goto malformed_jsonb; + if( zIn[0]=='-' ){ + jsonAppendChar(pOut, '-'); + k++; + } + if( zIn[k]=='.' ){ + jsonAppendChar(pOut, '0'); + } + for(; kn; j += jsonNodeSize(pNode+j+1)+1){ - pParse->aUp[i+j] = i; - jsonParseFillInParentage(pParse, i+j+1, i); + case JSONB_TEXT: + case JSONB_TEXTJ: { + jsonAppendChar(pOut, '"'); + jsonAppendRaw(pOut, (const char*)&pParse->aBlob[i+n], sz); + jsonAppendChar(pOut, '"'); + break; + } + case JSONB_TEXT5: { + const char *zIn; + u32 k; + u32 sz2 = sz; + zIn = (const char*)&pParse->aBlob[i+n]; + jsonAppendChar(pOut, '"'); + while( sz2>0 ){ + for(k=0; k0 ){ + jsonAppendRawNZ(pOut, zIn, k); + if( k>=sz2 ){ + break; + } + zIn += k; + sz2 -= k; + } + if( zIn[0]=='"' ){ + jsonAppendRawNZ(pOut, "\\\"", 2); + zIn++; + sz2--; + continue; + } + assert( zIn[0]=='\\' ); + assert( sz2>=1 ); + if( sz2<2 ){ + pOut->eErr |= JSTRING_MALFORMED; + break; + } + switch( (u8)zIn[1] ){ + case '\'': + jsonAppendChar(pOut, '\''); + break; + case 'v': + jsonAppendRawNZ(pOut, "\\u0009", 6); + break; + case 'x': + if( sz2<4 ){ + pOut->eErr |= JSTRING_MALFORMED; + sz2 = 2; + break; + } + jsonAppendRawNZ(pOut, "\\u00", 4); + jsonAppendRawNZ(pOut, &zIn[2], 2); + zIn += 2; + sz2 -= 2; + break; + case '0': + jsonAppendRawNZ(pOut, "\\u0000", 6); + break; + case '\r': + if( sz2>2 && zIn[2]=='\n' ){ + zIn++; + sz2--; + } + break; + case '\n': + break; + case 0xe2: + /* '\' followed by either U+2028 or U+2029 is ignored as + ** whitespace. Not that in UTF8, U+2028 is 0xe2 0x80 0x29. + ** U+2029 is the same except for the last byte */ + if( sz2<4 + || 0x80!=(u8)zIn[2] + || (0xa8!=(u8)zIn[3] && 0xa9!=(u8)zIn[3]) + ){ + pOut->eErr |= JSTRING_MALFORMED; + sz2 = 2; + break; + } + zIn += 2; + sz2 -= 2; + break; + default: + jsonAppendRawNZ(pOut, zIn, 2); + break; + } + assert( sz2>=2 ); + zIn += 2; + sz2 -= 2; } + jsonAppendChar(pOut, '"'); + break; + } + case JSONB_TEXTRAW: { + jsonAppendString(pOut, (const char*)&pParse->aBlob[i+n], sz); + break; + } + case JSONB_ARRAY: { + jsonAppendChar(pOut, '['); + j = i+n; + iEnd = j+sz; + while( jeErr==0 ){ + j = jsonTranslateBlobToText(pParse, j, pOut); + jsonAppendChar(pOut, ','); + } + if( j>iEnd ) pOut->eErr |= JSTRING_MALFORMED; + if( sz>0 ) jsonStringTrimOneChar(pOut); + jsonAppendChar(pOut, ']'); + break; + } + case JSONB_OBJECT: { + int x = 0; + jsonAppendChar(pOut, '{'); + j = i+n; + iEnd = j+sz; + while( jeErr==0 ){ + j = jsonTranslateBlobToText(pParse, j, pOut); + jsonAppendChar(pOut, (x++ & 1) ? ',' : ':'); + } + if( (x & 1)!=0 || j>iEnd ) pOut->eErr |= JSTRING_MALFORMED; + if( sz>0 ) jsonStringTrimOneChar(pOut); + jsonAppendChar(pOut, '}'); break; } + default: { + malformed_jsonb: + pOut->eErr |= JSTRING_MALFORMED; break; } } + return i+n+sz; +} + +/* Return true if the input pJson +** +** For performance reasons, this routine does not do a detailed check of the +** input BLOB to ensure that it is well-formed. Hence, false positives are +** possible. False negatives should never occur, however. +*/ +static int jsonFuncArgMightBeBinary(sqlite3_value *pJson){ + u32 sz, n; + const u8 *aBlob; + int nBlob; + JsonParse s; + if( sqlite3_value_type(pJson)!=SQLITE_BLOB ) return 0; + aBlob = sqlite3_value_blob(pJson); + nBlob = sqlite3_value_bytes(pJson); + if( nBlob<1 ) return 0; + if( NEVER(aBlob==0) || (aBlob[0] & 0x0f)>JSONB_OBJECT ) return 0; + memset(&s, 0, sizeof(s)); + s.aBlob = (u8*)aBlob; + s.nBlob = nBlob; + n = jsonbPayloadSize(&s, 0, &sz); + if( n==0 ) return 0; + if( sz+n!=(u32)nBlob ) return 0; + if( (aBlob[0] & 0x0f)<=JSONB_FALSE && sz>0 ) return 0; + return sz+n==(u32)nBlob; } /* -** Compute the parentage of all nodes in a completed parse. +** Given that a JSONB_ARRAY object starts at offset i, return +** the number of entries in that array. */ -static int jsonParseFindParents(JsonParse *pParse){ - u32 *aUp; - assert( pParse->aUp==0 ); - aUp = pParse->aUp = sqlite3_malloc64( sizeof(u32)*pParse->nNode ); - if( aUp==0 ){ - pParse->oom = 1; - return SQLITE_NOMEM; +static u32 jsonbArrayCount(JsonParse *pParse, u32 iRoot){ + u32 n, sz, i, iEnd; + u32 k = 0; + n = jsonbPayloadSize(pParse, iRoot, &sz); + iEnd = iRoot+n+sz; + for(i=iRoot+n; n>0 && idelta. */ -#define JSON_CACHE_ID (-429938) /* First cache entry */ -#define JSON_CACHE_SZ 4 /* Max number of cache entries */ +static void jsonAfterEditSizeAdjust(JsonParse *pParse, u32 iRoot){ + u32 sz = 0; + u32 nBlob; + assert( pParse->delta!=0 ); + assert( pParse->nBlobAlloc >= pParse->nBlob ); + nBlob = pParse->nBlob; + pParse->nBlob = pParse->nBlobAlloc; + (void)jsonbPayloadSize(pParse, iRoot, &sz); + pParse->nBlob = nBlob; + sz += pParse->delta; + pParse->delta += jsonBlobChangePayloadSize(pParse, iRoot, sz); +} /* -** Obtain a complete parse of the JSON found in the pJson argument +** Modify the JSONB blob at pParse->aBlob by removing nDel bytes of +** content beginning at iDel, and replacing them with nIns bytes of +** content given by aIns. ** -** Use the sqlite3_get_auxdata() cache to find a preexisting parse -** if it is available. If the cache is not available or if it -** is no longer valid, parse the JSON again and return the new parse. -** Also register the new parse so that it will be available for -** future sqlite3_get_auxdata() calls. +** nDel may be zero, in which case no bytes are removed. But iDel is +** still important as new bytes will be insert beginning at iDel. ** -** If an error occurs and pErrCtx!=0 then report the error on pErrCtx -** and return NULL. -** -** The returned pointer (if it is not NULL) is owned by the cache in -** most cases, not the caller. The caller does NOT need to invoke -** jsonParseFree(), in most cases. -** -** Except, if an error occurs and pErrCtx==0 then return the JsonParse -** object with JsonParse.nErr non-zero and the caller will own the JsonParse -** object. In that case, it will be the responsibility of the caller to -** invoke jsonParseFree(). To summarize: +** aIns may be zero, in which case space is created to hold nIns bytes +** beginning at iDel, but that space is uninitialized. ** -** pErrCtx!=0 || p->nErr==0 ==> Return value p is owned by the -** cache. Call does not need to -** free it. -** -** pErrCtx==0 && p->nErr!=0 ==> Return value is owned by the caller -** and so the caller must free it. +** Set pParse->oom if an OOM occurs. */ -static JsonParse *jsonParseCached( - sqlite3_context *pCtx, /* Context to use for cache search */ - sqlite3_value *pJson, /* Function param containing JSON text */ - sqlite3_context *pErrCtx, /* Write parse errors here if not NULL */ - int bUnedited /* No prior edits allowed */ +static void jsonBlobEdit( + JsonParse *pParse, /* The JSONB to be modified is in pParse->aBlob */ + u32 iDel, /* First byte to be removed */ + u32 nDel, /* Number of bytes to remove */ + const u8 *aIns, /* Content to insert */ + u32 nIns /* Bytes of content to insert */ ){ - char *zJson = (char*)sqlite3_value_text(pJson); - int nJson = sqlite3_value_bytes(pJson); - JsonParse *p; - JsonParse *pMatch = 0; - int iKey; - int iMinKey = 0; - u32 iMinHold = 0xffffffff; - u32 iMaxHold = 0; - int bJsonRCStr; + i64 d = (i64)nIns - (i64)nDel; + if( d!=0 ){ + if( pParse->nBlob + d > pParse->nBlobAlloc ){ + jsonBlobExpand(pParse, pParse->nBlob+d); + if( pParse->oom ) return; + } + memmove(&pParse->aBlob[iDel+nIns], + &pParse->aBlob[iDel+nDel], + pParse->nBlob - (iDel+nDel)); + pParse->nBlob += d; + pParse->delta += d; + } + if( nIns && aIns ) memcpy(&pParse->aBlob[iDel], aIns, nIns); +} - if( zJson==0 ) return 0; - for(iKey=0; iKeynJson==nJson - && (p->hasMod==0 || bUnedited==0) - && (p->zJson==zJson || memcmp(p->zJson,zJson,nJson)==0) - ){ - p->nErr = 0; - p->useMod = 0; - pMatch = p; - }else - if( pMatch==0 - && p->zAlt!=0 - && bUnedited==0 - && p->nAlt==nJson - && memcmp(p->zAlt, zJson, nJson)==0 - ){ - p->nErr = 0; - p->useMod = 1; - pMatch = p; - }else if( p->iHoldiHold; - iMinKey = iKey; + if( z[i+1]=='\r' ){ + if( i+2iHold>iMaxHold ){ - iMaxHold = p->iHold; + if( 0xe2==(u8)z[i+1] + && i+3nErr = 0; - pMatch->iHold = iMaxHold+1; - assert( pMatch->nJPRef>0 ); /* pMatch is owned by the cache */ - return pMatch; - } + return i; +} - /* The input JSON was not found anywhere in the cache. We will need - ** to parse it ourselves and generate a new JsonParse object. - */ - bJsonRCStr = sqlite3ValueIsOfClass(pJson,sqlite3RCStrUnref); - p = sqlite3_malloc64( sizeof(*p) + (bJsonRCStr ? 0 : nJson+1) ); - if( p==0 ){ - sqlite3_result_error_nomem(pCtx); - return 0; +/* +** Input z[0..n] defines JSON escape sequence including the leading '\\'. +** Decode that escape sequence into a single character. Write that +** character into *piOut. Return the number of bytes in the escape sequence. +** +** If there is a syntax error of some kind (for example too few characters +** after the '\\' to complete the encoding) then *piOut is set to +** JSON_INVALID_CHAR. +*/ +static u32 jsonUnescapeOneChar(const char *z, u32 n, u32 *piOut){ + assert( n>0 ); + assert( z[0]=='\\' ); + if( n<2 ){ + *piOut = JSON_INVALID_CHAR; + return n; } - memset(p, 0, sizeof(*p)); - if( bJsonRCStr ){ - p->zJson = sqlite3RCStrRef(zJson); - p->bJsonIsRCStr = 1; - }else{ - p->zJson = (char*)&p[1]; - memcpy(p->zJson, zJson, nJson+1); + switch( (u8)z[1] ){ + case 'u': { + u32 v, vlo; + if( n<6 ){ + *piOut = JSON_INVALID_CHAR; + return n; + } + v = jsonHexToInt4(&z[2]); + if( (v & 0xfc00)==0xd800 + && n>=12 + && z[6]=='\\' + && z[7]=='u' + && ((vlo = jsonHexToInt4(&z[8]))&0xfc00)==0xdc00 + ){ + *piOut = ((v&0x3ff)<<10) + (vlo&0x3ff) + 0x10000; + return 12; + }else{ + *piOut = v; + return 6; + } + } + case 'b': { *piOut = '\b'; return 2; } + case 'f': { *piOut = '\f'; return 2; } + case 'n': { *piOut = '\n'; return 2; } + case 'r': { *piOut = '\r'; return 2; } + case 't': { *piOut = '\t'; return 2; } + case 'v': { *piOut = '\v'; return 2; } + case '0': { *piOut = 0; return 2; } + case '\'': + case '"': + case '/': + case '\\':{ *piOut = z[1]; return 2; } + case 'x': { + if( n<4 ){ + *piOut = JSON_INVALID_CHAR; + return n; + } + *piOut = (jsonHexToInt(z[2])<<4) | jsonHexToInt(z[3]); + return 4; + } + case 0xe2: + case '\r': + case '\n': { + u32 nSkip = jsonBytesToBypass(z, n); + if( nSkip==0 ){ + *piOut = JSON_INVALID_CHAR; + return n; + }else if( nSkip==n ){ + *piOut = 0; + return n; + }else if( z[nSkip]=='\\' ){ + return nSkip + jsonUnescapeOneChar(&z[nSkip], n-nSkip, piOut); + }else{ + int sz = sqlite3Utf8ReadLimited((u8*)&z[nSkip], n-nSkip, piOut); + return nSkip + sz; + } + } + default: { + *piOut = JSON_INVALID_CHAR; + return 2; + } } - p->nJPRef = 1; - if( jsonParse(p, pErrCtx) ){ - if( pErrCtx==0 ){ - p->nErr = 1; - assert( p->nJPRef==1 ); /* Caller will own the new JsonParse object p */ - return p; +} + + +/* +** Compare two object labels. Return 1 if they are equal and +** 0 if they differ. +** +** In this version, we know that one or the other or both of the +** two comparands contains an escape sequence. +*/ +static SQLITE_NOINLINE int jsonLabelCompareEscaped( + const char *zLeft, /* The left label */ + u32 nLeft, /* Size of the left label in bytes */ + int rawLeft, /* True if zLeft contains no escapes */ + const char *zRight, /* The right label */ + u32 nRight, /* Size of the right label in bytes */ + int rawRight /* True if zRight is escape-free */ +){ + u32 cLeft, cRight; + assert( rawLeft==0 || rawRight==0 ); + while( 1 /*exit-by-return*/ ){ + if( nLeft==0 ){ + cLeft = 0; + }else if( rawLeft || zLeft[0]!='\\' ){ + cLeft = ((u8*)zLeft)[0]; + if( cLeft>=0xc0 ){ + int sz = sqlite3Utf8ReadLimited((u8*)zLeft, nLeft, &cLeft); + zLeft += sz; + nLeft -= sz; + }else{ + zLeft++; + nLeft--; + } + }else{ + u32 n = jsonUnescapeOneChar(zLeft, nLeft, &cLeft); + zLeft += n; + assert( n<=nLeft ); + nLeft -= n; + } + if( nRight==0 ){ + cRight = 0; + }else if( rawRight || zRight[0]!='\\' ){ + cRight = ((u8*)zRight)[0]; + if( cRight>=0xc0 ){ + int sz = sqlite3Utf8ReadLimited((u8*)zRight, nRight, &cRight); + zRight += sz; + nRight -= sz; + }else{ + zRight++; + nRight--; + } + }else{ + u32 n = jsonUnescapeOneChar(zRight, nRight, &cRight); + zRight += n; + assert( n<=nRight ); + nRight -= n; } - jsonParseFree(p); - return 0; + if( cLeft!=cRight ) return 0; + if( cLeft==0 ) return 1; } - p->nJson = nJson; - p->iHold = iMaxHold+1; - /* Transfer ownership of the new JsonParse to the cache */ - sqlite3_set_auxdata(pCtx, JSON_CACHE_ID+iMinKey, p, - (void(*)(void*))jsonParseFree); - return (JsonParse*)sqlite3_get_auxdata(pCtx, JSON_CACHE_ID+iMinKey); } /* -** Compare the OBJECT label at pNode against zKey,nKey. Return true on -** a match. +** Compare two object labels. Return 1 if they are equal and +** 0 if they differ. Return -1 if an OOM occurs. */ -static int jsonLabelCompare(const JsonNode *pNode, const char *zKey, u32 nKey){ - assert( pNode->eU==1 ); - if( pNode->jnFlags & JNODE_RAW ){ - if( pNode->n!=nKey ) return 0; - return strncmp(pNode->u.zJContent, zKey, nKey)==0; +static int jsonLabelCompare( + const char *zLeft, /* The left label */ + u32 nLeft, /* Size of the left label in bytes */ + int rawLeft, /* True if zLeft contains no escapes */ + const char *zRight, /* The right label */ + u32 nRight, /* Size of the right label in bytes */ + int rawRight /* True if zRight is escape-free */ +){ + if( rawLeft && rawRight ){ + /* Simpliest case: Neither label contains escapes. A simple + ** memcmp() is sufficient. */ + if( nLeft!=nRight ) return 0; + return memcmp(zLeft, zRight, nLeft)==0; }else{ - if( pNode->n!=nKey+2 ) return 0; - return strncmp(pNode->u.zJContent+1, zKey, nKey)==0; + return jsonLabelCompareEscaped(zLeft, nLeft, rawLeft, + zRight, nRight, rawRight); } } -static int jsonSameLabel(const JsonNode *p1, const JsonNode *p2){ - if( p1->jnFlags & JNODE_RAW ){ - return jsonLabelCompare(p2, p1->u.zJContent, p1->n); - }else if( p2->jnFlags & JNODE_RAW ){ - return jsonLabelCompare(p1, p2->u.zJContent, p2->n); + +/* +** Error returns from jsonLookupStep() +*/ +#define JSON_LOOKUP_ERROR 0xffffffff +#define JSON_LOOKUP_NOTFOUND 0xfffffffe +#define JSON_LOOKUP_PATHERROR 0xfffffffd +#define JSON_LOOKUP_ISERROR(x) ((x)>=JSON_LOOKUP_PATHERROR) + +/* Forward declaration */ +static u32 jsonLookupStep(JsonParse*,u32,const char*,u32); + + +/* This helper routine for jsonLookupStep() populates pIns with +** binary data that is to be inserted into pParse. +** +** In the common case, pIns just points to pParse->aIns and pParse->nIns. +** But if the zPath of the original edit operation includes path elements +** that go deeper, additional substructure must be created. +** +** For example: +** +** json_insert('{}', '$.a.b.c', 123); +** +** The search stops at '$.a' But additional substructure must be +** created for the ".b.c" part of the patch so that the final result +** is: {"a":{"b":{"c"::123}}}. This routine populates pIns with +** the binary equivalent of {"b":{"c":123}} so that it can be inserted. +** +** The caller is responsible for resetting pIns when it has finished +** using the substructure. +*/ +static u32 jsonCreateEditSubstructure( + JsonParse *pParse, /* The original JSONB that is being edited */ + JsonParse *pIns, /* Populate this with the blob data to insert */ + const char *zTail /* Tail of the path that determins substructure */ +){ + static const u8 emptyObject[] = { JSONB_ARRAY, JSONB_OBJECT }; + int rc; + memset(pIns, 0, sizeof(*pIns)); + pIns->db = pParse->db; + if( zTail[0]==0 ){ + /* No substructure. Just insert what is given in pParse. */ + pIns->aBlob = pParse->aIns; + pIns->nBlob = pParse->nIns; + rc = 0; }else{ - return p1->n==p2->n && strncmp(p1->u.zJContent,p2->u.zJContent,p1->n)==0; + /* Construct the binary substructure */ + pIns->nBlob = 1; + pIns->aBlob = (u8*)&emptyObject[zTail[0]=='.']; + pIns->eEdit = pParse->eEdit; + pIns->nIns = pParse->nIns; + pIns->aIns = pParse->aIns; + rc = jsonLookupStep(pIns, 0, zTail, 0); + pParse->oom |= pIns->oom; } + return rc; /* Error code only */ } -/* forward declaration */ -static JsonNode *jsonLookupAppend(JsonParse*,const char*,int*,const char**); - /* -** Search along zPath to find the node specified. Return a pointer -** to that node, or NULL if zPath is malformed or if there is no such -** node. +** Search along zPath to find the Json element specified. Return an +** index into pParse->aBlob[] for the start of that element's value. +** +** If the value found by this routine is the value half of label/value pair +** within an object, then set pPath->iLabel to the start of the corresponding +** label, before returning. ** -** If pApnd!=0, then try to append new nodes to complete zPath if it is -** possible to do so and if no existing node corresponds to zPath. If -** new nodes are appended *pApnd is set to 1. +** Return one of the JSON_LOOKUP error codes if problems are seen. +** +** This routine will also modify the blob. If pParse->eEdit is one of +** JEDIT_DEL, JEDIT_REPL, JEDIT_INS, or JEDIT_SET, then changes might be +** made to the selected value. If an edit is performed, then the return +** value does not necessarily point to the select element. If an edit +** is performed, the return value is only useful for detecting error +** conditions. */ -static JsonNode *jsonLookupStep( +static u32 jsonLookupStep( JsonParse *pParse, /* The JSON to search */ - u32 iRoot, /* Begin the search at this node */ + u32 iRoot, /* Begin the search at this element of aBlob[] */ const char *zPath, /* The path to search */ - int *pApnd, /* Append nodes to complete path if not NULL */ - const char **pzErr /* Make *pzErr point to any syntax error in zPath */ + u32 iLabel /* Label if iRoot is a value of in an object */ ){ - u32 i, j, nKey; + u32 i, j, k, nKey, sz, n, iEnd, rc; const char *zKey; - JsonNode *pRoot; - if( pParse->oom ) return 0; - pRoot = &pParse->aNode[iRoot]; - if( pRoot->jnFlags & (JNODE_REPLACE|JNODE_REMOVE) && pParse->useMod ){ - while( (pRoot->jnFlags & JNODE_REPLACE)!=0 ){ - u32 idx = (u32)(pRoot - pParse->aNode); - i = pParse->iSubst; - while( 1 /*exit-by-break*/ ){ - assert( inNode ); - assert( pParse->aNode[i].eType==JSON_SUBST ); - assert( pParse->aNode[i].eU==4 ); - assert( pParse->aNode[i].u.iPrevaNode[i].n==idx ){ - pRoot = &pParse->aNode[i+1]; - iRoot = i+1; - break; - } - i = pParse->aNode[i].u.iPrev; + u8 x; + + if( zPath[0]==0 ){ + if( pParse->eEdit && jsonBlobMakeEditable(pParse, pParse->nIns) ){ + n = jsonbPayloadSize(pParse, iRoot, &sz); + sz += n; + if( pParse->eEdit==JEDIT_DEL ){ + if( iLabel>0 ){ + sz += iRoot - iLabel; + iRoot = iLabel; + } + jsonBlobEdit(pParse, iRoot, sz, 0, 0); + }else if( pParse->eEdit==JEDIT_INS ){ + /* Already exists, so json_insert() is a no-op */ + }else{ + /* json_set() or json_replace() */ + jsonBlobEdit(pParse, iRoot, sz, pParse->aIns, pParse->nIns); } } - if( pRoot->jnFlags & JNODE_REMOVE ){ - return 0; - } + pParse->iLabel = iLabel; + return iRoot; } - if( zPath[0]==0 ) return pRoot; if( zPath[0]=='.' ){ - if( pRoot->eType!=JSON_OBJECT ) return 0; + int rawKey = 1; + x = pParse->aBlob[iRoot]; zPath++; if( zPath[0]=='"' ){ zKey = zPath + 1; @@ -204618,315 +205892,829 @@ static JsonNode *jsonLookupStep( if( zPath[i] ){ i++; }else{ - *pzErr = zPath; - return 0; + return JSON_LOOKUP_PATHERROR; } testcase( nKey==0 ); + rawKey = memchr(zKey, '\\', nKey)==0; }else{ zKey = zPath; for(i=0; zPath[i] && zPath[i]!='.' && zPath[i]!='['; i++){} nKey = i; if( nKey==0 ){ - *pzErr = zPath; - return 0; - } - } - j = 1; - for(;;){ - while( j<=pRoot->n ){ - if( jsonLabelCompare(pRoot+j, zKey, nKey) ){ - return jsonLookupStep(pParse, iRoot+j+1, &zPath[i], pApnd, pzErr); - } - j++; - j += jsonNodeSize(&pRoot[j]); + return JSON_LOOKUP_PATHERROR; + } + } + if( (x & 0x0f)!=JSONB_OBJECT ) return JSON_LOOKUP_NOTFOUND; + n = jsonbPayloadSize(pParse, iRoot, &sz); + j = iRoot + n; /* j is the index of a label */ + iEnd = j+sz; + while( jaBlob[j] & 0x0f; + if( xJSONB_TEXTRAW ) return JSON_LOOKUP_ERROR; + n = jsonbPayloadSize(pParse, j, &sz); + if( n==0 ) return JSON_LOOKUP_ERROR; + k = j+n; /* k is the index of the label text */ + if( k+sz>=iEnd ) return JSON_LOOKUP_ERROR; + zLabel = (const char*)&pParse->aBlob[k]; + rawLabel = x==JSONB_TEXT || x==JSONB_TEXTRAW; + if( jsonLabelCompare(zKey, nKey, rawKey, zLabel, sz, rawLabel) ){ + u32 v = k+sz; /* v is the index of the value */ + if( ((pParse->aBlob[v])&0x0f)>JSONB_OBJECT ) return JSON_LOOKUP_ERROR; + n = jsonbPayloadSize(pParse, v, &sz); + if( n==0 || v+n+sz>iEnd ) return JSON_LOOKUP_ERROR; + assert( j>0 ); + rc = jsonLookupStep(pParse, v, &zPath[i], j); + if( pParse->delta ) jsonAfterEditSizeAdjust(pParse, iRoot); + return rc; } - if( (pRoot->jnFlags & JNODE_APPEND)==0 ) break; - if( pParse->useMod==0 ) break; - assert( pRoot->eU==2 ); - iRoot = pRoot->u.iAppend; - pRoot = &pParse->aNode[iRoot]; - j = 1; - } - if( pApnd ){ - u32 iStart, iLabel; - JsonNode *pNode; - assert( pParse->useMod ); - iStart = jsonParseAddNode(pParse, JSON_OBJECT, 2, 0); - iLabel = jsonParseAddNode(pParse, JSON_STRING, nKey, zKey); - zPath += i; - pNode = jsonLookupAppend(pParse, zPath, pApnd, pzErr); - if( pParse->oom ) return 0; - if( pNode ){ - pRoot = &pParse->aNode[iRoot]; - assert( pRoot->eU==0 ); - pRoot->u.iAppend = iStart; - pRoot->jnFlags |= JNODE_APPEND; - VVA( pRoot->eU = 2 ); - pParse->aNode[iLabel].jnFlags |= JNODE_RAW; - } - return pNode; + j = k+sz; + if( ((pParse->aBlob[j])&0x0f)>JSONB_OBJECT ) return JSON_LOOKUP_ERROR; + n = jsonbPayloadSize(pParse, j, &sz); + if( n==0 ) return JSON_LOOKUP_ERROR; + j += n+sz; + } + if( j>iEnd ) return JSON_LOOKUP_ERROR; + if( pParse->eEdit>=JEDIT_INS ){ + u32 nIns; /* Total bytes to insert (label+value) */ + JsonParse v; /* BLOB encoding of the value to be inserted */ + JsonParse ix; /* Header of the label to be inserted */ + testcase( pParse->eEdit==JEDIT_INS ); + testcase( pParse->eEdit==JEDIT_SET ); + memset(&ix, 0, sizeof(ix)); + ix.db = pParse->db; + jsonBlobAppendNode(&ix, rawKey?JSONB_TEXTRAW:JSONB_TEXT5, nKey, 0); + pParse->oom |= ix.oom; + rc = jsonCreateEditSubstructure(pParse, &v, &zPath[i]); + if( !JSON_LOOKUP_ISERROR(rc) + && jsonBlobMakeEditable(pParse, ix.nBlob+nKey+v.nBlob) + ){ + assert( !pParse->oom ); + nIns = ix.nBlob + nKey + v.nBlob; + jsonBlobEdit(pParse, j, 0, 0, nIns); + if( !pParse->oom ){ + assert( pParse->aBlob!=0 ); /* Because pParse->oom!=0 */ + assert( ix.aBlob!=0 ); /* Because pPasre->oom!=0 */ + memcpy(&pParse->aBlob[j], ix.aBlob, ix.nBlob); + k = j + ix.nBlob; + memcpy(&pParse->aBlob[k], zKey, nKey); + k += nKey; + memcpy(&pParse->aBlob[k], v.aBlob, v.nBlob); + if( ALWAYS(pParse->delta) ) jsonAfterEditSizeAdjust(pParse, iRoot); + } + } + jsonParseReset(&v); + jsonParseReset(&ix); + return rc; } }else if( zPath[0]=='[' ){ - i = 0; - j = 1; - while( sqlite3Isdigit(zPath[j]) ){ - i = i*10 + zPath[j] - '0'; - j++; + x = pParse->aBlob[iRoot] & 0x0f; + if( x!=JSONB_ARRAY ) return JSON_LOOKUP_NOTFOUND; + n = jsonbPayloadSize(pParse, iRoot, &sz); + k = 0; + i = 1; + while( sqlite3Isdigit(zPath[i]) ){ + k = k*10 + zPath[i] - '0'; + i++; } - if( j<2 || zPath[j]!=']' ){ + if( i<2 || zPath[i]!=']' ){ if( zPath[1]=='#' ){ - JsonNode *pBase = pRoot; - int iBase = iRoot; - if( pRoot->eType!=JSON_ARRAY ) return 0; - for(;;){ - while( j<=pBase->n ){ - if( (pBase[j].jnFlags & JNODE_REMOVE)==0 || pParse->useMod==0 ) i++; - j += jsonNodeSize(&pBase[j]); - } - if( (pBase->jnFlags & JNODE_APPEND)==0 ) break; - if( pParse->useMod==0 ) break; - assert( pBase->eU==2 ); - iBase = pBase->u.iAppend; - pBase = &pParse->aNode[iBase]; - j = 1; - } - j = 2; + k = jsonbArrayCount(pParse, iRoot); + i = 2; if( zPath[2]=='-' && sqlite3Isdigit(zPath[3]) ){ - unsigned int x = 0; - j = 3; + unsigned int nn = 0; + i = 3; do{ - x = x*10 + zPath[j] - '0'; - j++; - }while( sqlite3Isdigit(zPath[j]) ); - if( x>i ) return 0; - i -= x; + nn = nn*10 + zPath[i] - '0'; + i++; + }while( sqlite3Isdigit(zPath[i]) ); + if( nn>k ) return JSON_LOOKUP_NOTFOUND; + k -= nn; } - if( zPath[j]!=']' ){ - *pzErr = zPath; - return 0; + if( zPath[i]!=']' ){ + return JSON_LOOKUP_PATHERROR; } }else{ - *pzErr = zPath; - return 0; + return JSON_LOOKUP_PATHERROR; } } - if( pRoot->eType!=JSON_ARRAY ) return 0; - zPath += j + 1; - j = 1; - for(;;){ - while( j<=pRoot->n - && (i>0 || ((pRoot[j].jnFlags & JNODE_REMOVE)!=0 && pParse->useMod)) + j = iRoot+n; + iEnd = j+sz; + while( jdelta ) jsonAfterEditSizeAdjust(pParse, iRoot); + return rc; + } + k--; + n = jsonbPayloadSize(pParse, j, &sz); + if( n==0 ) return JSON_LOOKUP_ERROR; + j += n+sz; + } + if( j>iEnd ) return JSON_LOOKUP_ERROR; + if( k>0 ) return JSON_LOOKUP_NOTFOUND; + if( pParse->eEdit>=JEDIT_INS ){ + JsonParse v; + testcase( pParse->eEdit==JEDIT_INS ); + testcase( pParse->eEdit==JEDIT_SET ); + rc = jsonCreateEditSubstructure(pParse, &v, &zPath[i+1]); + if( !JSON_LOOKUP_ISERROR(rc) + && jsonBlobMakeEditable(pParse, v.nBlob) ){ - if( (pRoot[j].jnFlags & JNODE_REMOVE)==0 || pParse->useMod==0 ) i--; - j += jsonNodeSize(&pRoot[j]); - } - if( i==0 && j<=pRoot->n ) break; - if( (pRoot->jnFlags & JNODE_APPEND)==0 ) break; - if( pParse->useMod==0 ) break; - assert( pRoot->eU==2 ); - iRoot = pRoot->u.iAppend; - pRoot = &pParse->aNode[iRoot]; - j = 1; - } - if( j<=pRoot->n ){ - return jsonLookupStep(pParse, iRoot+j, zPath, pApnd, pzErr); - } - if( i==0 && pApnd ){ - u32 iStart; - JsonNode *pNode; - assert( pParse->useMod ); - iStart = jsonParseAddNode(pParse, JSON_ARRAY, 1, 0); - pNode = jsonLookupAppend(pParse, zPath, pApnd, pzErr); - if( pParse->oom ) return 0; - if( pNode ){ - pRoot = &pParse->aNode[iRoot]; - assert( pRoot->eU==0 ); - pRoot->u.iAppend = iStart; - pRoot->jnFlags |= JNODE_APPEND; - VVA( pRoot->eU = 2 ); + assert( !pParse->oom ); + jsonBlobEdit(pParse, j, 0, v.aBlob, v.nBlob); } - return pNode; + jsonParseReset(&v); + if( pParse->delta ) jsonAfterEditSizeAdjust(pParse, iRoot); + return rc; } }else{ - *pzErr = zPath; + return JSON_LOOKUP_PATHERROR; } - return 0; + return JSON_LOOKUP_NOTFOUND; } /* -** Append content to pParse that will complete zPath. Return a pointer -** to the inserted node, or return NULL if the append fails. +** Convert a JSON BLOB into text and make that text the return value +** of an SQL function. */ -static JsonNode *jsonLookupAppend( - JsonParse *pParse, /* Append content to the JSON parse */ - const char *zPath, /* Description of content to append */ - int *pApnd, /* Set this flag to 1 */ - const char **pzErr /* Make this point to any syntax error */ +static void jsonReturnTextJsonFromBlob( + sqlite3_context *ctx, + const u8 *aBlob, + u32 nBlob ){ - *pApnd = 1; - if( zPath[0]==0 ){ - jsonParseAddNode(pParse, JSON_NULL, 0, 0); - return pParse->oom ? 0 : &pParse->aNode[pParse->nNode-1]; - } - if( zPath[0]=='.' ){ - jsonParseAddNode(pParse, JSON_OBJECT, 0, 0); - }else if( strncmp(zPath,"[0]",3)==0 ){ - jsonParseAddNode(pParse, JSON_ARRAY, 0, 0); - }else{ - return 0; - } - if( pParse->oom ) return 0; - return jsonLookupStep(pParse, pParse->nNode-1, zPath, pApnd, pzErr); + JsonParse x; + JsonString s; + + if( NEVER(aBlob==0) ) return; + memset(&x, 0, sizeof(x)); + x.aBlob = (u8*)aBlob; + x.nBlob = nBlob; + jsonStringInit(&s, ctx); + jsonTranslateBlobToText(&x, 0, &s); + jsonReturnString(&s, 0, 0); } + /* -** Return the text of a syntax error message on a JSON path. Space is -** obtained from sqlite3_malloc(). +** Return the value of the BLOB node at index i. +** +** If the value is a primitive, return it as an SQL value. +** If the value is an array or object, return it as either +** JSON text or the BLOB encoding, depending on the JSON_B flag +** on the userdata. */ -static char *jsonPathSyntaxError(const char *zErr){ - return sqlite3_mprintf("JSON path error near '%q'", zErr); +static void jsonReturnFromBlob( + JsonParse *pParse, /* Complete JSON parse tree */ + u32 i, /* Index of the node */ + sqlite3_context *pCtx, /* Return value for this function */ + int textOnly /* return text JSON. Disregard user-data */ +){ + u32 n, sz; + int rc; + sqlite3 *db = sqlite3_context_db_handle(pCtx); + + n = jsonbPayloadSize(pParse, i, &sz); + if( n==0 ){ + sqlite3_result_error(pCtx, "malformed JSON", -1); + return; + } + switch( pParse->aBlob[i] & 0x0f ){ + case JSONB_NULL: { + if( sz ) goto returnfromblob_malformed; + sqlite3_result_null(pCtx); + break; + } + case JSONB_TRUE: { + if( sz ) goto returnfromblob_malformed; + sqlite3_result_int(pCtx, 1); + break; + } + case JSONB_FALSE: { + if( sz ) goto returnfromblob_malformed; + sqlite3_result_int(pCtx, 0); + break; + } + case JSONB_INT5: + case JSONB_INT: { + sqlite3_int64 iRes = 0; + char *z; + int bNeg = 0; + char x; + if( sz==0 ) goto returnfromblob_malformed; + x = (char)pParse->aBlob[i+n]; + if( x=='-' ){ + if( sz<2 ) goto returnfromblob_malformed; + n++; + sz--; + bNeg = 1; + } + z = sqlite3DbStrNDup(db, (const char*)&pParse->aBlob[i+n], (int)sz); + if( z==0 ) goto returnfromblob_oom; + rc = sqlite3DecOrHexToI64(z, &iRes); + sqlite3DbFree(db, z); + if( rc==0 ){ + sqlite3_result_int64(pCtx, bNeg ? -iRes : iRes); + }else if( rc==3 && bNeg ){ + sqlite3_result_int64(pCtx, SMALLEST_INT64); + }else if( rc==1 ){ + goto returnfromblob_malformed; + }else{ + if( bNeg ){ n--; sz++; } + goto to_double; + } + break; + } + case JSONB_FLOAT5: + case JSONB_FLOAT: { + double r; + char *z; + if( sz==0 ) goto returnfromblob_malformed; + to_double: + z = sqlite3DbStrNDup(db, (const char*)&pParse->aBlob[i+n], (int)sz); + if( z==0 ) goto returnfromblob_oom; + rc = sqlite3AtoF(z, &r, sqlite3Strlen30(z), SQLITE_UTF8); + sqlite3DbFree(db, z); + if( rc<=0 ) goto returnfromblob_malformed; + sqlite3_result_double(pCtx, r); + break; + } + case JSONB_TEXTRAW: + case JSONB_TEXT: { + sqlite3_result_text(pCtx, (char*)&pParse->aBlob[i+n], sz, + SQLITE_TRANSIENT); + break; + } + case JSONB_TEXT5: + case JSONB_TEXTJ: { + /* Translate JSON formatted string into raw text */ + u32 iIn, iOut; + const char *z; + char *zOut; + u32 nOut = sz; + z = (const char*)&pParse->aBlob[i+n]; + zOut = sqlite3DbMallocRaw(db, nOut+1); + if( zOut==0 ) goto returnfromblob_oom; + for(iIn=iOut=0; iIn=2 ); + zOut[iOut++] = (char)(0xc0 | (v>>6)); + zOut[iOut++] = 0x80 | (v&0x3f); + }else if( v<0x10000 ){ + assert( szEscape>=3 ); + zOut[iOut++] = 0xe0 | (v>>12); + zOut[iOut++] = 0x80 | ((v>>6)&0x3f); + zOut[iOut++] = 0x80 | (v&0x3f); + }else if( v==JSON_INVALID_CHAR ){ + /* Silently ignore illegal unicode */ + }else{ + assert( szEscape>=4 ); + zOut[iOut++] = 0xf0 | (v>>18); + zOut[iOut++] = 0x80 | ((v>>12)&0x3f); + zOut[iOut++] = 0x80 | ((v>>6)&0x3f); + zOut[iOut++] = 0x80 | (v&0x3f); + } + iIn += szEscape - 1; + }else{ + zOut[iOut++] = c; + } + } /* end for() */ + assert( iOut<=nOut ); + zOut[iOut] = 0; + sqlite3_result_text(pCtx, zOut, iOut, SQLITE_DYNAMIC); + break; + } + case JSONB_ARRAY: + case JSONB_OBJECT: { + int flags = textOnly ? 0 : SQLITE_PTR_TO_INT(sqlite3_user_data(pCtx)); + if( flags & JSON_BLOB ){ + sqlite3_result_blob(pCtx, &pParse->aBlob[i], sz+n, SQLITE_TRANSIENT); + }else{ + jsonReturnTextJsonFromBlob(pCtx, &pParse->aBlob[i], sz+n); + } + break; + } + default: { + goto returnfromblob_malformed; + } + } + return; + +returnfromblob_oom: + sqlite3_result_error_nomem(pCtx); + return; + +returnfromblob_malformed: + sqlite3_result_error(pCtx, "malformed JSON", -1); + return; } /* -** Do a node lookup using zPath. Return a pointer to the node on success. -** Return NULL if not found or if there is an error. +** pArg is a function argument that might be an SQL value or a JSON +** value. Figure out what it is and encode it as a JSONB blob. +** Return the results in pParse. ** -** On an error, write an error message into pCtx and increment the -** pParse->nErr counter. +** pParse is uninitialized upon entry. This routine will handle the +** initialization of pParse. The result will be contained in +** pParse->aBlob and pParse->nBlob. pParse->aBlob might be dynamically +** allocated (if pParse->nBlobAlloc is greater than zero) in which case +** the caller is responsible for freeing the space allocated to pParse->aBlob +** when it has finished with it. Or pParse->aBlob might be a static string +** or a value obtained from sqlite3_value_blob(pArg). ** -** If pApnd!=NULL then try to append missing nodes and set *pApnd = 1 if -** nodes are appended. +** If the argument is a BLOB that is clearly not a JSONB, then this +** function might set an error message in ctx and return non-zero. +** It might also set an error message and return non-zero on an OOM error. */ -static JsonNode *jsonLookup( - JsonParse *pParse, /* The JSON to search */ - const char *zPath, /* The path to search */ - int *pApnd, /* Append nodes to complete path if not NULL */ - sqlite3_context *pCtx /* Report errors here, if not NULL */ -){ - const char *zErr = 0; - JsonNode *pNode = 0; - char *zMsg; - - if( zPath==0 ) return 0; - if( zPath[0]!='$' ){ - zErr = zPath; - goto lookup_err; +static int jsonFunctionArgToBlob( + sqlite3_context *ctx, + sqlite3_value *pArg, + JsonParse *pParse +){ + int eType = sqlite3_value_type(pArg); + static u8 aNull[] = { 0x00 }; + memset(pParse, 0, sizeof(pParse[0])); + pParse->db = sqlite3_context_db_handle(ctx); + switch( eType ){ + default: { + pParse->aBlob = aNull; + pParse->nBlob = 1; + return 0; + } + case SQLITE_BLOB: { + if( jsonFuncArgMightBeBinary(pArg) ){ + pParse->aBlob = (u8*)sqlite3_value_blob(pArg); + pParse->nBlob = sqlite3_value_bytes(pArg); + }else{ + sqlite3_result_error(ctx, "JSON cannot hold BLOB values", -1); + return 1; + } + break; + } + case SQLITE_TEXT: { + const char *zJson = (const char*)sqlite3_value_text(pArg); + int nJson = sqlite3_value_bytes(pArg); + if( zJson==0 ) return 1; + if( sqlite3_value_subtype(pArg)==JSON_SUBTYPE ){ + pParse->zJson = (char*)zJson; + pParse->nJson = nJson; + if( jsonConvertTextToBlob(pParse, ctx) ){ + sqlite3_result_error(ctx, "malformed JSON", -1); + sqlite3DbFree(pParse->db, pParse->aBlob); + memset(pParse, 0, sizeof(pParse[0])); + return 1; + } + }else{ + jsonBlobAppendNode(pParse, JSONB_TEXTRAW, nJson, zJson); + } + break; + } + case SQLITE_FLOAT: { + double r = sqlite3_value_double(pArg); + if( NEVER(sqlite3IsNaN(r)) ){ + jsonBlobAppendNode(pParse, JSONB_NULL, 0, 0); + }else{ + int n = sqlite3_value_bytes(pArg); + const char *z = (const char*)sqlite3_value_text(pArg); + if( z==0 ) return 1; + if( z[0]=='I' ){ + jsonBlobAppendNode(pParse, JSONB_FLOAT, 5, "9e999"); + }else if( z[0]=='-' && z[1]=='I' ){ + jsonBlobAppendNode(pParse, JSONB_FLOAT, 6, "-9e999"); + }else{ + jsonBlobAppendNode(pParse, JSONB_FLOAT, n, z); + } + } + break; + } + case SQLITE_INTEGER: { + int n = sqlite3_value_bytes(pArg); + const char *z = (const char*)sqlite3_value_text(pArg); + if( z==0 ) return 1; + jsonBlobAppendNode(pParse, JSONB_INT, n, z); + break; + } + } + if( pParse->oom ){ + sqlite3_result_error_nomem(ctx); + return 1; + }else{ + return 0; } - zPath++; - pNode = jsonLookupStep(pParse, 0, zPath, pApnd, &zErr); - if( zErr==0 ) return pNode; +} -lookup_err: - pParse->nErr++; - assert( zErr!=0 && pCtx!=0 ); - zMsg = jsonPathSyntaxError(zErr); +/* +** Generate a bad path error. +** +** If ctx is not NULL then push the error message into ctx and return NULL. +** If ctx is NULL, then return the text of the error message. +*/ +static char *jsonBadPathError( + sqlite3_context *ctx, /* The function call containing the error */ + const char *zPath /* The path with the problem */ +){ + char *zMsg = sqlite3_mprintf("bad JSON path: %Q", zPath); + if( ctx==0 ) return zMsg; if( zMsg ){ - sqlite3_result_error(pCtx, zMsg, -1); + sqlite3_result_error(ctx, zMsg, -1); sqlite3_free(zMsg); }else{ - sqlite3_result_error_nomem(pCtx); + sqlite3_result_error_nomem(ctx); } return 0; } +/* argv[0] is a BLOB that seems likely to be a JSONB. Subsequent +** arguments come in parse where each pair contains a JSON path and +** content to insert or set at that patch. Do the updates +** and return the result. +** +** The specific operation is determined by eEdit, which can be one +** of JEDIT_INS, JEDIT_REPL, or JEDIT_SET. +*/ +static void jsonInsertIntoBlob( + sqlite3_context *ctx, + int argc, + sqlite3_value **argv, + int eEdit /* JEDIT_INS, JEDIT_REPL, or JEDIT_SET */ +){ + int i; + u32 rc = 0; + const char *zPath = 0; + int flgs; + JsonParse *p; + JsonParse ax; + + assert( (argc&1)==1 ); + flgs = argc==1 ? 0 : JSON_EDITABLE; + p = jsonParseFuncArg(ctx, argv[0], flgs); + if( p==0 ) return; + for(i=1; inBlob, ax.aBlob, ax.nBlob); + } + rc = 0; + }else{ + p->eEdit = eEdit; + p->nIns = ax.nBlob; + p->aIns = ax.aBlob; + p->delta = 0; + rc = jsonLookupStep(p, 0, zPath+1, 0); + } + jsonParseReset(&ax); + if( rc==JSON_LOOKUP_NOTFOUND ) continue; + if( JSON_LOOKUP_ISERROR(rc) ) goto jsonInsertIntoBlob_patherror; + } + jsonReturnParse(ctx, p); + jsonParseFree(p); + return; + +jsonInsertIntoBlob_patherror: + jsonParseFree(p); + if( rc==JSON_LOOKUP_ERROR ){ + sqlite3_result_error(ctx, "malformed JSON", -1); + }else{ + jsonBadPathError(ctx, zPath); + } + return; +} + +/* +** If pArg is a blob that seems like a JSONB blob, then initialize +** p to point to that JSONB and return TRUE. If pArg does not seem like +** a JSONB blob, then return FALSE; +** +** This routine is only called if it is already known that pArg is a +** blob. The only open question is whether or not the blob appears +** to be a JSONB blob. +*/ +static int jsonArgIsJsonb(sqlite3_value *pArg, JsonParse *p){ + u32 n, sz = 0; + p->aBlob = (u8*)sqlite3_value_blob(pArg); + p->nBlob = (u32)sqlite3_value_bytes(pArg); + if( p->nBlob==0 ){ + p->aBlob = 0; + return 0; + } + if( NEVER(p->aBlob==0) ){ + return 0; + } + if( (p->aBlob[0] & 0x0f)<=JSONB_OBJECT + && (n = jsonbPayloadSize(p, 0, &sz))>0 + && sz+n==p->nBlob + && ((p->aBlob[0] & 0x0f)>JSONB_FALSE || sz==0) + ){ + return 1; + } + p->aBlob = 0; + p->nBlob = 0; + return 0; +} /* -** Report the wrong number of arguments for json_insert(), json_replace() -** or json_set(). +** Generate a JsonParse object, containing valid JSONB in aBlob and nBlob, +** from the SQL function argument pArg. Return a pointer to the new +** JsonParse object. +** +** Ownership of the new JsonParse object is passed to the caller. The +** caller should invoke jsonParseFree() on the return value when it +** has finished using it. +** +** If any errors are detected, an appropriate error messages is set +** using sqlite3_result_error() or the equivalent and this routine +** returns NULL. This routine also returns NULL if the pArg argument +** is an SQL NULL value, but no error message is set in that case. This +** is so that SQL functions that are given NULL arguments will return +** a NULL value. */ -static void jsonWrongNumArgs( - sqlite3_context *pCtx, - const char *zFuncName +static JsonParse *jsonParseFuncArg( + sqlite3_context *ctx, + sqlite3_value *pArg, + u32 flgs ){ - char *zMsg = sqlite3_mprintf("json_%s() needs an odd number of arguments", - zFuncName); - sqlite3_result_error(pCtx, zMsg, -1); - sqlite3_free(zMsg); + int eType; /* Datatype of pArg */ + JsonParse *p = 0; /* Value to be returned */ + JsonParse *pFromCache = 0; /* Value taken from cache */ + sqlite3 *db; /* The database connection */ + + assert( ctx!=0 ); + eType = sqlite3_value_type(pArg); + if( eType==SQLITE_NULL ){ + return 0; + } + pFromCache = jsonCacheSearch(ctx, pArg); + if( pFromCache ){ + pFromCache->nJPRef++; + if( (flgs & JSON_EDITABLE)==0 ){ + return pFromCache; + } + } + db = sqlite3_context_db_handle(ctx); +rebuild_from_cache: + p = sqlite3DbMallocZero(db, sizeof(*p)); + if( p==0 ) goto json_pfa_oom; + memset(p, 0, sizeof(*p)); + p->db = db; + p->nJPRef = 1; + if( pFromCache!=0 ){ + u32 nBlob = pFromCache->nBlob; + p->aBlob = sqlite3DbMallocRaw(db, nBlob); + if( p->aBlob==0 ) goto json_pfa_oom; + memcpy(p->aBlob, pFromCache->aBlob, nBlob); + p->nBlobAlloc = p->nBlob = nBlob; + p->hasNonstd = pFromCache->hasNonstd; + jsonParseFree(pFromCache); + return p; + } + if( eType==SQLITE_BLOB ){ + if( jsonArgIsJsonb(pArg,p) ){ + if( (flgs & JSON_EDITABLE)!=0 && jsonBlobMakeEditable(p, 0)==0 ){ + goto json_pfa_oom; + } + return p; + } + /* If the blob is not valid JSONB, fall through into trying to cast + ** the blob into text which is then interpreted as JSON. (tag-20240123-a) + ** + ** This goes against all historical documentation about how the SQLite + ** JSON functions were suppose to work. From the beginning, blob was + ** reserved for expansion and a blob value should have raised an error. + ** But it did not, due to a bug. And many applications came to depend + ** upon this buggy behavior, espeically when using the CLI and reading + ** JSON text using readfile(), which returns a blob. For this reason + ** we will continue to support the bug moving forward. + ** See for example https://sqlite.org/forum/forumpost/012136abd5292b8d + */ + } + p->zJson = (char*)sqlite3_value_text(pArg); + p->nJson = sqlite3_value_bytes(pArg); + if( p->nJson==0 ) goto json_pfa_malformed; + if( NEVER(p->zJson==0) ) goto json_pfa_oom; + if( jsonConvertTextToBlob(p, (flgs & JSON_KEEPERROR) ? 0 : ctx) ){ + if( flgs & JSON_KEEPERROR ){ + p->nErr = 1; + return p; + }else{ + jsonParseFree(p); + return 0; + } + }else{ + int isRCStr = sqlite3ValueIsOfClass(pArg, sqlite3RCStrUnref); + int rc; + if( !isRCStr ){ + char *zNew = sqlite3RCStrNew( p->nJson ); + if( zNew==0 ) goto json_pfa_oom; + memcpy(zNew, p->zJson, p->nJson); + p->zJson = zNew; + p->zJson[p->nJson] = 0; + }else{ + sqlite3RCStrRef(p->zJson); + } + p->bJsonIsRCStr = 1; + rc = jsonCacheInsert(ctx, p); + if( rc==SQLITE_NOMEM ) goto json_pfa_oom; + if( flgs & JSON_EDITABLE ){ + pFromCache = p; + p = 0; + goto rebuild_from_cache; + } + } + return p; + +json_pfa_malformed: + if( flgs & JSON_KEEPERROR ){ + p->nErr = 1; + return p; + }else{ + jsonParseFree(p); + sqlite3_result_error(ctx, "malformed JSON", -1); + return 0; + } + +json_pfa_oom: + jsonParseFree(pFromCache); + jsonParseFree(p); + sqlite3_result_error_nomem(ctx); + return 0; } /* -** Mark all NULL entries in the Object passed in as JNODE_REMOVE. +** Make the return value of a JSON function either the raw JSONB blob +** or make it JSON text, depending on whether the JSON_BLOB flag is +** set on the function. */ -static void jsonRemoveAllNulls(JsonNode *pNode){ - int i, n; - assert( pNode->eType==JSON_OBJECT ); - n = pNode->n; - for(i=2; i<=n; i += jsonNodeSize(&pNode[i])+1){ - switch( pNode[i].eType ){ - case JSON_NULL: - pNode[i].jnFlags |= JNODE_REMOVE; - break; - case JSON_OBJECT: - jsonRemoveAllNulls(&pNode[i]); - break; +static void jsonReturnParse( + sqlite3_context *ctx, + JsonParse *p +){ + int flgs; + if( p->oom ){ + sqlite3_result_error_nomem(ctx); + return; + } + flgs = SQLITE_PTR_TO_INT(sqlite3_user_data(ctx)); + if( flgs & JSON_BLOB ){ + if( p->nBlobAlloc>0 && !p->bReadOnly ){ + sqlite3_result_blob(ctx, p->aBlob, p->nBlob, SQLITE_DYNAMIC); + p->nBlobAlloc = 0; + }else{ + sqlite3_result_blob(ctx, p->aBlob, p->nBlob, SQLITE_TRANSIENT); } + }else{ + JsonString s; + jsonStringInit(&s, ctx); + p->delta = 0; + jsonTranslateBlobToText(p, 0, &s); + jsonReturnString(&s, p, ctx); + sqlite3_result_subtype(ctx, JSON_SUBTYPE); } } - /**************************************************************************** ** SQL functions used for testing and debugging ****************************************************************************/ #if SQLITE_DEBUG /* -** Print N node entries. -*/ -static void jsonDebugPrintNodeEntries( - JsonNode *aNode, /* First node entry to print */ - int N /* Number of node entries to print */ -){ - int i; - for(i=0; iaBlob[iStart] & 0x0f; + u32 savedNBlob = pParse->nBlob; + sqlite3_str_appendf(pOut, "%5d:%*s", iStart, nIndent, ""); + if( pParse->nBlobAlloc>pParse->nBlob ){ + pParse->nBlob = pParse->nBlobAlloc; + } + nn = n = jsonbPayloadSize(pParse, iStart, &sz); + if( nn==0 ) nn = 1; + if( sz>0 && xaBlob[iStart+i]); + } + if( n==0 ){ + sqlite3_str_appendf(pOut, " ERROR invalid node size\n"); + iStart = n==0 ? iStart+1 : iEnd; + continue; } - printf("node %4u: %-7s n=%-5d", i, zType, aNode[i].n); - if( (aNode[i].jnFlags & ~JNODE_LABEL)!=0 ){ - u8 f = aNode[i].jnFlags; - if( f & JNODE_RAW ) printf(" RAW"); - if( f & JNODE_ESCAPE ) printf(" ESCAPE"); - if( f & JNODE_REMOVE ) printf(" REMOVE"); - if( f & JNODE_REPLACE ) printf(" REPLACE"); - if( f & JNODE_APPEND ) printf(" APPEND"); - if( f & JNODE_JSON5 ) printf(" JSON5"); + pParse->nBlob = savedNBlob; + if( iStart+n+sz>iEnd ){ + iEnd = iStart+n+sz; + if( iEnd>pParse->nBlob ){ + if( pParse->nBlobAlloc>0 && iEnd>pParse->nBlobAlloc ){ + iEnd = pParse->nBlobAlloc; + }else{ + iEnd = pParse->nBlob; + } + } } - switch( aNode[i].eU ){ - case 1: printf(" zJContent=[%.*s]\n", - aNode[i].n, aNode[i].u.zJContent); break; - case 2: printf(" iAppend=%u\n", aNode[i].u.iAppend); break; - case 3: printf(" iKey=%u\n", aNode[i].u.iKey); break; - case 4: printf(" iPrev=%u\n", aNode[i].u.iPrev); break; - default: printf("\n"); + sqlite3_str_appendall(pOut," <-- "); + switch( x ){ + case JSONB_NULL: sqlite3_str_appendall(pOut,"null"); break; + case JSONB_TRUE: sqlite3_str_appendall(pOut,"true"); break; + case JSONB_FALSE: sqlite3_str_appendall(pOut,"false"); break; + case JSONB_INT: sqlite3_str_appendall(pOut,"int"); break; + case JSONB_INT5: sqlite3_str_appendall(pOut,"int5"); break; + case JSONB_FLOAT: sqlite3_str_appendall(pOut,"float"); break; + case JSONB_FLOAT5: sqlite3_str_appendall(pOut,"float5"); break; + case JSONB_TEXT: sqlite3_str_appendall(pOut,"text"); break; + case JSONB_TEXTJ: sqlite3_str_appendall(pOut,"textj"); break; + case JSONB_TEXT5: sqlite3_str_appendall(pOut,"text5"); break; + case JSONB_TEXTRAW: sqlite3_str_appendall(pOut,"textraw"); break; + case JSONB_ARRAY: { + sqlite3_str_appendf(pOut,"array, %u bytes\n", sz); + jsonDebugPrintBlob(pParse, iStart+n, iStart+n+sz, nIndent+2, pOut); + showContent = 0; + break; + } + case JSONB_OBJECT: { + sqlite3_str_appendf(pOut, "object, %u bytes\n", sz); + jsonDebugPrintBlob(pParse, iStart+n, iStart+n+sz, nIndent+2, pOut); + showContent = 0; + break; + } + default: { + sqlite3_str_appendall(pOut, "ERROR: unknown node type\n"); + showContent = 0; + break; + } } + if( showContent ){ + if( sz==0 && x<=JSONB_FALSE ){ + sqlite3_str_append(pOut, "\n", 1); + }else{ + u32 i; + sqlite3_str_appendall(pOut, ": \""); + for(i=iStart+n; iaBlob[i]; + if( c<0x20 || c>=0x7f ) c = '.'; + sqlite3_str_append(pOut, (char*)&c, 1); + } + sqlite3_str_append(pOut, "\"\n", 2); + } + } + iStart += n + sz; } } -#endif /* SQLITE_DEBUG */ - - -#if 0 /* 1 for debugging. 0 normally. Requires -DSQLITE_DEBUG too */ -static void jsonDebugPrintParse(JsonParse *p){ - jsonDebugPrintNodeEntries(p->aNode, p->nNode); -} -static void jsonDebugPrintNode(JsonNode *pNode){ - jsonDebugPrintNodeEntries(pNode, jsonNodeSize(pNode)); +static void jsonShowParse(JsonParse *pParse){ + sqlite3_str out; + char zBuf[1000]; + if( pParse==0 ){ + printf("NULL pointer\n"); + return; + }else{ + printf("nBlobAlloc = %u\n", pParse->nBlobAlloc); + printf("nBlob = %u\n", pParse->nBlob); + printf("delta = %d\n", pParse->delta); + if( pParse->nBlob==0 ) return; + printf("content (bytes 0..%u):\n", pParse->nBlob-1); + } + sqlite3StrAccumInit(&out, 0, zBuf, sizeof(zBuf), 1000000); + jsonDebugPrintBlob(pParse, 0, pParse->nBlob, 0, &out); + printf("%s", sqlite3_str_value(&out)); + sqlite3_str_reset(&out); } -#else - /* The usual case */ -# define jsonDebugPrintNode(X) -# define jsonDebugPrintParse(X) -#endif +#endif /* SQLITE_DEBUG */ #ifdef SQLITE_DEBUG /* ** SQL function: json_parse(JSON) ** -** Parse JSON using jsonParseCached(). Then print a dump of that -** parse on standard output. Return the mimified JSON result, just -** like the json() function. +** Parse JSON using jsonParseFuncArg(). Return text that is a +** human-readable dump of the binary JSONB for the input parameter. */ static void jsonParseFunc( sqlite3_context *ctx, @@ -204934,38 +206722,19 @@ static void jsonParseFunc( sqlite3_value **argv ){ JsonParse *p; /* The parse */ + sqlite3_str out; - assert( argc==1 ); - p = jsonParseCached(ctx, argv[0], ctx, 0); + assert( argc>=1 ); + sqlite3StrAccumInit(&out, 0, 0, 0, 1000000); + p = jsonParseFuncArg(ctx, argv[0], 0); if( p==0 ) return; - printf("nNode = %u\n", p->nNode); - printf("nAlloc = %u\n", p->nAlloc); - printf("nJson = %d\n", p->nJson); - printf("nAlt = %d\n", p->nAlt); - printf("nErr = %u\n", p->nErr); - printf("oom = %u\n", p->oom); - printf("hasNonstd = %u\n", p->hasNonstd); - printf("useMod = %u\n", p->useMod); - printf("hasMod = %u\n", p->hasMod); - printf("nJPRef = %u\n", p->nJPRef); - printf("iSubst = %u\n", p->iSubst); - printf("iHold = %u\n", p->iHold); - jsonDebugPrintNodeEntries(p->aNode, p->nNode); - jsonReturnJson(p, p->aNode, ctx, 1); -} - -/* -** The json_test1(JSON) function return true (1) if the input is JSON -** text generated by another json function. It returns (0) if the input -** is not known to be JSON. -*/ -static void jsonTest1Func( - sqlite3_context *ctx, - int argc, - sqlite3_value **argv -){ - UNUSED_PARAMETER(argc); - sqlite3_result_int(ctx, sqlite3_value_subtype(argv[0])==JSON_SUBTYPE); + if( argc==1 ){ + jsonDebugPrintBlob(p, 0, p->nBlob, 0, &out); + sqlite3_result_text64(ctx, out.zText, out.nChar, SQLITE_DYNAMIC, SQLITE_UTF8); + }else{ + jsonShowParse(p); + } + jsonParseFree(p); } #endif /* SQLITE_DEBUG */ @@ -204974,7 +206743,7 @@ static void jsonTest1Func( ****************************************************************************/ /* -** Implementation of the json_QUOTE(VALUE) function. Return a JSON value +** Implementation of the json_quote(VALUE) function. Return a JSON value ** corresponding to the SQL value input. Mostly this means putting ** double-quotes around strings and returning the unquoted string "null" ** when given a NULL input. @@ -204987,9 +206756,9 @@ static void jsonQuoteFunc( JsonString jx; UNUSED_PARAMETER(argc); - jsonInit(&jx, ctx); - jsonAppendValue(&jx, argv[0]); - jsonResult(&jx); + jsonStringInit(&jx, ctx); + jsonAppendSqlValue(&jx, argv[0]); + jsonReturnString(&jx, 0, 0); sqlite3_result_subtype(ctx, JSON_SUBTYPE); } @@ -205006,18 +206775,17 @@ static void jsonArrayFunc( int i; JsonString jx; - jsonInit(&jx, ctx); + jsonStringInit(&jx, ctx); jsonAppendChar(&jx, '['); for(i=0; inNode ); if( argc==2 ){ const char *zPath = (const char*)sqlite3_value_text(argv[1]); - pNode = jsonLookup(p, zPath, 0, ctx); - }else{ - pNode = p->aNode; - } - if( pNode==0 ){ - return; - } - if( pNode->eType==JSON_ARRAY ){ - while( 1 /*exit-by-break*/ ){ - i = 1; - while( i<=pNode->n ){ - if( (pNode[i].jnFlags & JNODE_REMOVE)==0 ) n++; - i += jsonNodeSize(&pNode[i]); + if( zPath==0 ){ + jsonParseFree(p); + return; + } + i = jsonLookupStep(p, 0, zPath[0]=='$' ? zPath+1 : "@", 0); + if( JSON_LOOKUP_ISERROR(i) ){ + if( i==JSON_LOOKUP_NOTFOUND ){ + /* no-op */ + }else if( i==JSON_LOOKUP_PATHERROR ){ + jsonBadPathError(ctx, zPath); + }else{ + sqlite3_result_error(ctx, "malformed JSON", -1); } - if( (pNode->jnFlags & JNODE_APPEND)==0 ) break; - if( p->useMod==0 ) break; - assert( pNode->eU==2 ); - pNode = &p->aNode[pNode->u.iAppend]; + eErr = 1; + i = 0; } + }else{ + i = 0; + } + if( (p->aBlob[i] & 0x0f)==JSONB_ARRAY ){ + cnt = jsonbArrayCount(p, i); } - sqlite3_result_int64(ctx, n); + if( !eErr ) sqlite3_result_int64(ctx, cnt); + jsonParseFree(p); } -/* -** Bit values for the flags passed into jsonExtractFunc() or -** jsonSetFunc() via the user-data value. -*/ -#define JSON_JSON 0x01 /* Result is always JSON */ -#define JSON_SQL 0x02 /* Result is always SQL */ -#define JSON_ABPATH 0x03 /* Allow abbreviated JSON path specs */ -#define JSON_ISSET 0x04 /* json_set(), not json_insert() */ +/* True if the string is all digits */ +static int jsonAllDigits(const char *z, int n){ + int i; + for(i=0; i2 ){ + jsonAppendChar(&jx, '['); + } + for(i=1; i and ->> operators accept abbreviated PATH arguments. This - ** is mostly for compatibility with PostgreSQL, but also for - ** convenience. - ** - ** NUMBER ==> $[NUMBER] // PG compatible - ** LABEL ==> $.LABEL // PG compatible - ** [NUMBER] ==> $[NUMBER] // Not PG. Purely for convenience - */ - jsonInit(&jx, ctx); - if( sqlite3Isdigit(zPath[0]) ){ - jsonAppendRawNZ(&jx, "$[", 2); - jsonAppendRaw(&jx, zPath, (int)strlen(zPath)); - jsonAppendRawNZ(&jx, "]", 2); - }else{ - jsonAppendRawNZ(&jx, "$.", 1 + (zPath[0]!='[')); - jsonAppendRaw(&jx, zPath, (int)strlen(zPath)); - jsonAppendChar(&jx, 0); - } - pNode = jx.bErr ? 0 : jsonLookup(p, jx.zBuf, 0, ctx); - jsonReset(&jx); + const char *zPath = (const char*)sqlite3_value_text(argv[i]); + int nPath; + u32 j; + if( zPath==0 ) goto json_extract_error; + nPath = sqlite3Strlen30(zPath); + if( zPath[0]=='$' ){ + j = jsonLookupStep(p, 0, zPath+1, 0); + }else if( (flags & JSON_ABPATH) ){ + /* The -> and ->> operators accept abbreviated PATH arguments. This + ** is mostly for compatibility with PostgreSQL, but also for + ** convenience. + ** + ** NUMBER ==> $[NUMBER] // PG compatible + ** LABEL ==> $.LABEL // PG compatible + ** [NUMBER] ==> $[NUMBER] // Not PG. Purely for convenience + */ + jsonStringInit(&jx, ctx); + if( jsonAllDigits(zPath, nPath) ){ + jsonAppendRawNZ(&jx, "[", 1); + jsonAppendRaw(&jx, zPath, nPath); + jsonAppendRawNZ(&jx, "]", 2); + }else if( jsonAllAlphanum(zPath, nPath) ){ + jsonAppendRawNZ(&jx, ".", 1); + jsonAppendRaw(&jx, zPath, nPath); + }else if( zPath[0]=='[' && nPath>=3 && zPath[nPath-1]==']' ){ + jsonAppendRaw(&jx, zPath, nPath); }else{ - pNode = jsonLookup(p, zPath, 0, ctx); + jsonAppendRawNZ(&jx, ".\"", 2); + jsonAppendRaw(&jx, zPath, nPath); + jsonAppendRawNZ(&jx, "\"", 1); } - if( pNode ){ + jsonStringTerminate(&jx); + j = jsonLookupStep(p, 0, jx.zBuf, 0); + jsonStringReset(&jx); + }else{ + jsonBadPathError(ctx, zPath); + goto json_extract_error; + } + if( jnBlob ){ + if( argc==2 ){ if( flags & JSON_JSON ){ - jsonReturnJson(p, pNode, ctx, 0); + jsonStringInit(&jx, ctx); + jsonTranslateBlobToText(p, j, &jx); + jsonReturnString(&jx, 0, 0); + jsonStringReset(&jx); + assert( (flags & JSON_BLOB)==0 ); + sqlite3_result_subtype(ctx, JSON_SUBTYPE); }else{ - jsonReturn(p, pNode, ctx); - sqlite3_result_subtype(ctx, 0); + jsonReturnFromBlob(p, j, ctx, 0); + if( (flags & (JSON_SQL|JSON_BLOB))==0 + && (p->aBlob[j]&0x0f)>=JSONB_ARRAY + ){ + sqlite3_result_subtype(ctx, JSON_SUBTYPE); + } } + }else{ + jsonAppendSeparator(&jx); + jsonTranslateBlobToText(p, j, &jx); } - }else{ - pNode = jsonLookup(p, zPath, 0, ctx); - if( p->nErr==0 && pNode ) jsonReturn(p, pNode, ctx); - } - }else{ - /* Two or more PATH arguments results in a JSON array with each - ** element of the array being the value selected by one of the PATHs */ - int i; - jsonInit(&jx, ctx); - jsonAppendChar(&jx, '['); - for(i=1; inErr ) break; - jsonAppendSeparator(&jx); - if( pNode ){ - jsonRenderNode(p, pNode, &jx); + }else if( j==JSON_LOOKUP_NOTFOUND ){ + if( argc==2 ){ + goto json_extract_error; /* Return NULL if not found */ }else{ + jsonAppendSeparator(&jx); jsonAppendRawNZ(&jx, "null", 4); } + }else if( j==JSON_LOOKUP_ERROR ){ + sqlite3_result_error(ctx, "malformed JSON", -1); + goto json_extract_error; + }else{ + jsonBadPathError(ctx, zPath); + goto json_extract_error; } - if( i==argc ){ - jsonAppendChar(&jx, ']'); - jsonResult(&jx); + } + if( argc>2 ){ + jsonAppendChar(&jx, ']'); + jsonReturnString(&jx, 0, 0); + if( (flags & JSON_BLOB)==0 ){ sqlite3_result_subtype(ctx, JSON_SUBTYPE); } - jsonReset(&jx); } +json_extract_error: + jsonStringReset(&jx); + jsonParseFree(p); + return; } -/* This is the RFC 7396 MergePatch algorithm. -*/ -static JsonNode *jsonMergePatch( - JsonParse *pParse, /* The JSON parser that contains the TARGET */ - u32 iTarget, /* Node of the TARGET in pParse */ - JsonNode *pPatch /* The PATCH */ -){ - u32 i, j; - u32 iRoot; - JsonNode *pTarget; - if( pPatch->eType!=JSON_OBJECT ){ - return pPatch; - } - assert( iTargetnNode ); - pTarget = &pParse->aNode[iTarget]; - assert( (pPatch->jnFlags & JNODE_APPEND)==0 ); - if( pTarget->eType!=JSON_OBJECT ){ - jsonRemoveAllNulls(pPatch); - return pPatch; - } - iRoot = iTarget; - for(i=1; in; i += jsonNodeSize(&pPatch[i+1])+1){ - u32 nKey; - const char *zKey; - assert( pPatch[i].eType==JSON_STRING ); - assert( pPatch[i].jnFlags & JNODE_LABEL ); - assert( pPatch[i].eU==1 ); - nKey = pPatch[i].n; - zKey = pPatch[i].u.zJContent; - for(j=1; jn; j += jsonNodeSize(&pTarget[j+1])+1 ){ - assert( pTarget[j].eType==JSON_STRING ); - assert( pTarget[j].jnFlags & JNODE_LABEL ); - if( jsonSameLabel(&pPatch[i], &pTarget[j]) ){ - if( pTarget[j+1].jnFlags & (JNODE_REMOVE|JNODE_REPLACE) ) break; - if( pPatch[i+1].eType==JSON_NULL ){ - pTarget[j+1].jnFlags |= JNODE_REMOVE; - }else{ - JsonNode *pNew = jsonMergePatch(pParse, iTarget+j+1, &pPatch[i+1]); - if( pNew==0 ) return 0; - if( pNew!=&pParse->aNode[iTarget+j+1] ){ - jsonParseAddSubstNode(pParse, iTarget+j+1); - jsonParseAddNodeArray(pParse, pNew, jsonNodeSize(pNew)); - } - pTarget = &pParse->aNode[iTarget]; - } - break; +/* +** Return codes for jsonMergePatch() +*/ +#define JSON_MERGE_OK 0 /* Success */ +#define JSON_MERGE_BADTARGET 1 /* Malformed TARGET blob */ +#define JSON_MERGE_BADPATCH 2 /* Malformed PATCH blob */ +#define JSON_MERGE_OOM 3 /* Out-of-memory condition */ + +/* +** RFC-7396 MergePatch for two JSONB blobs. +** +** pTarget is the target. pPatch is the patch. The target is updated +** in place. The patch is read-only. +** +** The original RFC-7396 algorithm is this: +** +** define MergePatch(Target, Patch): +** if Patch is an Object: +** if Target is not an Object: +** Target = {} # Ignore the contents and set it to an empty Object +** for each Name/Value pair in Patch: +** if Value is null: +** if Name exists in Target: +** remove the Name/Value pair from Target +** else: +** Target[Name] = MergePatch(Target[Name], Value) +** return Target +** else: +** return Patch +** +** Here is an equivalent algorithm restructured to show the actual +** implementation: +** +** 01 define MergePatch(Target, Patch): +** 02 if Patch is not an Object: +** 03 return Patch +** 04 else: // if Patch is an Object +** 05 if Target is not an Object: +** 06 Target = {} +** 07 for each Name/Value pair in Patch: +** 08 if Name exists in Target: +** 09 if Value is null: +** 10 remove the Name/Value pair from Target +** 11 else +** 12 Target[name] = MergePatch(Target[Name], Value) +** 13 else if Value is not NULL: +** 14 if Value is not an Object: +** 15 Target[name] = Value +** 16 else: +** 17 Target[name] = MergePatch('{}',value) +** 18 return Target +** | +** ^---- Line numbers referenced in comments in the implementation +*/ +static int jsonMergePatch( + JsonParse *pTarget, /* The JSON parser that contains the TARGET */ + u32 iTarget, /* Index of TARGET in pTarget->aBlob[] */ + const JsonParse *pPatch, /* The PATCH */ + u32 iPatch /* Index of PATCH in pPatch->aBlob[] */ +){ + u8 x; /* Type of a single node */ + u32 n, sz=0; /* Return values from jsonbPayloadSize() */ + u32 iTCursor; /* Cursor position while scanning the target object */ + u32 iTStart; /* First label in the target object */ + u32 iTEndBE; /* Original first byte past end of target, before edit */ + u32 iTEnd; /* Current first byte past end of target */ + u8 eTLabel; /* Node type of the target label */ + u32 iTLabel = 0; /* Index of the label */ + u32 nTLabel = 0; /* Header size in bytes for the target label */ + u32 szTLabel = 0; /* Size of the target label payload */ + u32 iTValue = 0; /* Index of the target value */ + u32 nTValue = 0; /* Header size of the target value */ + u32 szTValue = 0; /* Payload size for the target value */ + + u32 iPCursor; /* Cursor position while scanning the patch */ + u32 iPEnd; /* First byte past the end of the patch */ + u8 ePLabel; /* Node type of the patch label */ + u32 iPLabel; /* Start of patch label */ + u32 nPLabel; /* Size of header on the patch label */ + u32 szPLabel; /* Payload size of the patch label */ + u32 iPValue; /* Start of patch value */ + u32 nPValue; /* Header size for the patch value */ + u32 szPValue; /* Payload size of the patch value */ + + assert( iTarget>=0 && iTargetnBlob ); + assert( iPatch>=0 && iPatchnBlob ); + x = pPatch->aBlob[iPatch] & 0x0f; + if( x!=JSONB_OBJECT ){ /* Algorithm line 02 */ + u32 szPatch; /* Total size of the patch, header+payload */ + u32 szTarget; /* Total size of the target, header+payload */ + n = jsonbPayloadSize(pPatch, iPatch, &sz); + szPatch = n+sz; + sz = 0; + n = jsonbPayloadSize(pTarget, iTarget, &sz); + szTarget = n+sz; + jsonBlobEdit(pTarget, iTarget, szTarget, pPatch->aBlob+iPatch, szPatch); + return pTarget->oom ? JSON_MERGE_OOM : JSON_MERGE_OK; /* Line 03 */ + } + x = pTarget->aBlob[iTarget] & 0x0f; + if( x!=JSONB_OBJECT ){ /* Algorithm line 05 */ + n = jsonbPayloadSize(pTarget, iTarget, &sz); + jsonBlobEdit(pTarget, iTarget+n, sz, 0, 0); + x = pTarget->aBlob[iTarget]; + pTarget->aBlob[iTarget] = (x & 0xf0) | JSONB_OBJECT; + } + n = jsonbPayloadSize(pPatch, iPatch, &sz); + if( NEVER(n==0) ) return JSON_MERGE_BADPATCH; + iPCursor = iPatch+n; + iPEnd = iPCursor+sz; + n = jsonbPayloadSize(pTarget, iTarget, &sz); + if( NEVER(n==0) ) return JSON_MERGE_BADTARGET; + iTStart = iTarget+n; + iTEndBE = iTStart+sz; + + while( iPCursoraBlob[iPCursor] & 0x0f; + if( ePLabelJSONB_TEXTRAW ){ + return JSON_MERGE_BADPATCH; + } + nPLabel = jsonbPayloadSize(pPatch, iPCursor, &szPLabel); + if( nPLabel==0 ) return JSON_MERGE_BADPATCH; + iPValue = iPCursor + nPLabel + szPLabel; + if( iPValue>=iPEnd ) return JSON_MERGE_BADPATCH; + nPValue = jsonbPayloadSize(pPatch, iPValue, &szPValue); + if( nPValue==0 ) return JSON_MERGE_BADPATCH; + iPCursor = iPValue + nPValue + szPValue; + if( iPCursor>iPEnd ) return JSON_MERGE_BADPATCH; + + iTCursor = iTStart; + iTEnd = iTEndBE + pTarget->delta; + while( iTCursoraBlob[iTCursor] & 0x0f; + if( eTLabelJSONB_TEXTRAW ){ + return JSON_MERGE_BADTARGET; + } + nTLabel = jsonbPayloadSize(pTarget, iTCursor, &szTLabel); + if( nTLabel==0 ) return JSON_MERGE_BADTARGET; + iTValue = iTLabel + nTLabel + szTLabel; + if( iTValue>=iTEnd ) return JSON_MERGE_BADTARGET; + nTValue = jsonbPayloadSize(pTarget, iTValue, &szTValue); + if( nTValue==0 ) return JSON_MERGE_BADTARGET; + if( iTValue + nTValue + szTValue > iTEnd ) return JSON_MERGE_BADTARGET; + isEqual = jsonLabelCompare( + (const char*)&pPatch->aBlob[iPLabel+nPLabel], + szPLabel, + (ePLabel==JSONB_TEXT || ePLabel==JSONB_TEXTRAW), + (const char*)&pTarget->aBlob[iTLabel+nTLabel], + szTLabel, + (eTLabel==JSONB_TEXT || eTLabel==JSONB_TEXTRAW)); + if( isEqual ) break; + iTCursor = iTValue + nTValue + szTValue; + } + x = pPatch->aBlob[iPValue] & 0x0f; + if( iTCursoroom) ) return JSON_MERGE_OOM; + }else{ + /* Algorithm line 12 */ + int rc, savedDelta = pTarget->delta; + pTarget->delta = 0; + rc = jsonMergePatch(pTarget, iTValue, pPatch, iPValue); + if( rc ) return rc; + pTarget->delta += savedDelta; + } + }else if( x>0 ){ /* Algorithm line 13 */ + /* No match and patch value is not NULL */ + u32 szNew = szPLabel+nPLabel; + if( (pPatch->aBlob[iPValue] & 0x0f)!=JSONB_OBJECT ){ /* Line 14 */ + jsonBlobEdit(pTarget, iTEnd, 0, 0, szPValue+nPValue+szNew); + if( pTarget->oom ) return JSON_MERGE_OOM; + memcpy(&pTarget->aBlob[iTEnd], &pPatch->aBlob[iPLabel], szNew); + memcpy(&pTarget->aBlob[iTEnd+szNew], + &pPatch->aBlob[iPValue], szPValue+nPValue); + }else{ + int rc, savedDelta; + jsonBlobEdit(pTarget, iTEnd, 0, 0, szNew+1); + if( pTarget->oom ) return JSON_MERGE_OOM; + memcpy(&pTarget->aBlob[iTEnd], &pPatch->aBlob[iPLabel], szNew); + pTarget->aBlob[iTEnd+szNew] = 0x00; + savedDelta = pTarget->delta; + pTarget->delta = 0; + rc = jsonMergePatch(pTarget, iTEnd+szNew,pPatch,iPValue); + if( rc ) return rc; + pTarget->delta += savedDelta; } } - if( j>=pTarget->n && pPatch[i+1].eType!=JSON_NULL ){ - int iStart; - JsonNode *pApnd; - u32 nApnd; - iStart = jsonParseAddNode(pParse, JSON_OBJECT, 0, 0); - jsonParseAddNode(pParse, JSON_STRING, nKey, zKey); - pApnd = &pPatch[i+1]; - if( pApnd->eType==JSON_OBJECT ) jsonRemoveAllNulls(pApnd); - nApnd = jsonNodeSize(pApnd); - jsonParseAddNodeArray(pParse, pApnd, jsonNodeSize(pApnd)); - if( pParse->oom ) return 0; - pParse->aNode[iStart].n = 1+nApnd; - pParse->aNode[iRoot].jnFlags |= JNODE_APPEND; - pParse->aNode[iRoot].u.iAppend = iStart; - VVA( pParse->aNode[iRoot].eU = 2 ); - iRoot = iStart; - pTarget = &pParse->aNode[iTarget]; - } } - return pTarget; + if( pTarget->delta ) jsonAfterEditSizeAdjust(pTarget, iTarget); + return pTarget->oom ? JSON_MERGE_OOM : JSON_MERGE_OK; } + /* ** Implementation of the json_mergepatch(JSON1,JSON2) function. Return a JSON ** object that is the result of running the RFC 7396 MergePatch() algorithm @@ -205253,28 +207183,27 @@ static void jsonPatchFunc( int argc, sqlite3_value **argv ){ - JsonParse *pX; /* The JSON that is being patched */ - JsonParse *pY; /* The patch */ - JsonNode *pResult; /* The result of the merge */ + JsonParse *pTarget; /* The TARGET */ + JsonParse *pPatch; /* The PATCH */ + int rc; /* Result code */ UNUSED_PARAMETER(argc); - pX = jsonParseCached(ctx, argv[0], ctx, 1); - if( pX==0 ) return; - assert( pX->hasMod==0 ); - pX->hasMod = 1; - pY = jsonParseCached(ctx, argv[1], ctx, 1); - if( pY==0 ) return; - pX->useMod = 1; - pY->useMod = 1; - pResult = jsonMergePatch(pX, 0, pY->aNode); - assert( pResult!=0 || pX->oom ); - if( pResult && pX->oom==0 ){ - jsonDebugPrintParse(pX); - jsonDebugPrintNode(pResult); - jsonReturnJson(pX, pResult, ctx, 0); - }else{ - sqlite3_result_error_nomem(ctx); + assert( argc==2 ); + pTarget = jsonParseFuncArg(ctx, argv[0], JSON_EDITABLE); + if( pTarget==0 ) return; + pPatch = jsonParseFuncArg(ctx, argv[1], 0); + if( pPatch ){ + rc = jsonMergePatch(pTarget, 0, pPatch, 0); + if( rc==JSON_MERGE_OK ){ + jsonReturnParse(ctx, pTarget); + }else if( rc==JSON_MERGE_OOM ){ + sqlite3_result_error_nomem(ctx); + }else{ + sqlite3_result_error(ctx, "malformed JSON", -1); + } + jsonParseFree(pPatch); } + jsonParseFree(pTarget); } @@ -205298,23 +207227,23 @@ static void jsonObjectFunc( "of arguments", -1); return; } - jsonInit(&jx, ctx); + jsonStringInit(&jx, ctx); jsonAppendChar(&jx, '{'); for(i=0; i1); - if( pParse==0 ) return; - for(i=1; i<(u32)argc; i++){ + p = jsonParseFuncArg(ctx, argv[0], argc>1 ? JSON_EDITABLE : 0); + if( p==0 ) return; + for(i=1; inErr ) goto remove_done; - if( pNode ){ - pNode->jnFlags |= JNODE_REMOVE; - pParse->hasMod = 1; - pParse->useMod = 1; - } - } - if( (pParse->aNode[0].jnFlags & JNODE_REMOVE)==0 ){ - jsonReturnJson(pParse, pParse->aNode, ctx, 1); - } -remove_done: - jsonDebugPrintParse(p); -} - -/* -** Substitute the value at iNode with the pValue parameter. -*/ -static void jsonReplaceNode( - sqlite3_context *pCtx, - JsonParse *p, - int iNode, - sqlite3_value *pValue -){ - int idx = jsonParseAddSubstNode(p, iNode); - if( idx<=0 ){ - assert( p->oom ); - return; - } - switch( sqlite3_value_type(pValue) ){ - case SQLITE_NULL: { - jsonParseAddNode(p, JSON_NULL, 0, 0); - break; - } - case SQLITE_FLOAT: { - char *z = sqlite3_mprintf("%!0.15g", sqlite3_value_double(pValue)); - int n; - if( z==0 ){ - p->oom = 1; - break; - } - n = sqlite3Strlen30(z); - jsonParseAddNode(p, JSON_REAL, n, z); - jsonParseAddCleanup(p, sqlite3_free, z); - break; - } - case SQLITE_INTEGER: { - char *z = sqlite3_mprintf("%lld", sqlite3_value_int64(pValue)); - int n; - if( z==0 ){ - p->oom = 1; - break; - } - n = sqlite3Strlen30(z); - jsonParseAddNode(p, JSON_INT, n, z); - jsonParseAddCleanup(p, sqlite3_free, z); - - break; - } - case SQLITE_TEXT: { - const char *z = (const char*)sqlite3_value_text(pValue); - u32 n = (u32)sqlite3_value_bytes(pValue); - if( z==0 ){ - p->oom = 1; - break; - } - if( sqlite3_value_subtype(pValue)!=JSON_SUBTYPE ){ - char *zCopy = sqlite3_malloc64( n+1 ); - int k; - if( zCopy ){ - memcpy(zCopy, z, n); - zCopy[n] = 0; - jsonParseAddCleanup(p, sqlite3_free, zCopy); - }else{ - p->oom = 1; - sqlite3_result_error_nomem(pCtx); - } - k = jsonParseAddNode(p, JSON_STRING, n, zCopy); - assert( k>0 || p->oom ); - if( p->oom==0 ) p->aNode[k].jnFlags |= JNODE_RAW; + if( zPath==0 ){ + goto json_remove_done; + } + if( zPath[0]!='$' ){ + goto json_remove_patherror; + } + if( zPath[1]==0 ){ + /* json_remove(j,'$') returns NULL */ + goto json_remove_done; + } + p->eEdit = JEDIT_DEL; + p->delta = 0; + rc = jsonLookupStep(p, 0, zPath+1, 0); + if( JSON_LOOKUP_ISERROR(rc) ){ + if( rc==JSON_LOOKUP_NOTFOUND ){ + continue; /* No-op */ + }else if( rc==JSON_LOOKUP_PATHERROR ){ + jsonBadPathError(ctx, zPath); }else{ - JsonParse *pPatch = jsonParseCached(pCtx, pValue, pCtx, 1); - if( pPatch==0 ){ - p->oom = 1; - break; - } - jsonParseAddNodeArray(p, pPatch->aNode, pPatch->nNode); - /* The nodes copied out of pPatch and into p likely contain - ** u.zJContent pointers into pPatch->zJson. So preserve the - ** content of pPatch until p is destroyed. */ - assert( pPatch->nJPRef>=1 ); - pPatch->nJPRef++; - jsonParseAddCleanup(p, (void(*)(void*))jsonParseFree, pPatch); + sqlite3_result_error(ctx, "malformed JSON", -1); } - break; - } - default: { - jsonParseAddNode(p, JSON_NULL, 0, 0); - sqlite3_result_error(pCtx, "JSON cannot hold BLOB values", -1); - p->nErr++; - break; + goto json_remove_done; } } + jsonReturnParse(ctx, p); + jsonParseFree(p); + return; + +json_remove_patherror: + jsonBadPathError(ctx, zPath); + +json_remove_done: + jsonParseFree(p); + return; } /* @@ -205457,32 +207316,12 @@ static void jsonReplaceFunc( int argc, sqlite3_value **argv ){ - JsonParse *pParse; /* The parse */ - JsonNode *pNode; - const char *zPath; - u32 i; - if( argc<1 ) return; if( (argc&1)==0 ) { jsonWrongNumArgs(ctx, "replace"); return; } - pParse = jsonParseCached(ctx, argv[0], ctx, argc>1); - if( pParse==0 ) return; - pParse->nJPRef++; - for(i=1; i<(u32)argc; i+=2){ - zPath = (const char*)sqlite3_value_text(argv[i]); - pParse->useMod = 1; - pNode = jsonLookup(pParse, zPath, 0, ctx); - if( pParse->nErr ) goto replace_err; - if( pNode ){ - jsonReplaceNode(ctx, pParse, (u32)(pNode - pParse->aNode), argv[i+1]); - } - } - jsonReturnJson(pParse, pParse->aNode, ctx, 1); -replace_err: - jsonDebugPrintParse(pParse); - jsonParseFree(pParse); + jsonInsertIntoBlob(ctx, argc, argv, JEDIT_REPL); } @@ -205503,39 +207342,16 @@ static void jsonSetFunc( int argc, sqlite3_value **argv ){ - JsonParse *pParse; /* The parse */ - JsonNode *pNode; - const char *zPath; - u32 i; - int bApnd; - int bIsSet = sqlite3_user_data(ctx)!=0; + + int flags = SQLITE_PTR_TO_INT(sqlite3_user_data(ctx)); + int bIsSet = (flags&JSON_ISSET)!=0; if( argc<1 ) return; if( (argc&1)==0 ) { jsonWrongNumArgs(ctx, bIsSet ? "set" : "insert"); return; } - pParse = jsonParseCached(ctx, argv[0], ctx, argc>1); - if( pParse==0 ) return; - pParse->nJPRef++; - for(i=1; i<(u32)argc; i+=2){ - zPath = (const char*)sqlite3_value_text(argv[i]); - bApnd = 0; - pParse->useMod = 1; - pNode = jsonLookup(pParse, zPath, &bApnd, ctx); - if( pParse->oom ){ - sqlite3_result_error_nomem(ctx); - goto jsonSetDone; - }else if( pParse->nErr ){ - goto jsonSetDone; - }else if( pNode && (bApnd || bIsSet) ){ - jsonReplaceNode(ctx, pParse, (u32)(pNode - pParse->aNode), argv[i+1]); - } - } - jsonDebugPrintParse(pParse); - jsonReturnJson(pParse, pParse->aNode, ctx, 1); -jsonSetDone: - jsonParseFree(pParse); + jsonInsertIntoBlob(ctx, argc, argv, bIsSet ? JEDIT_SET : JEDIT_INS); } /* @@ -205551,27 +207367,93 @@ static void jsonTypeFunc( sqlite3_value **argv ){ JsonParse *p; /* The parse */ - const char *zPath; - JsonNode *pNode; + const char *zPath = 0; + u32 i; - p = jsonParseCached(ctx, argv[0], ctx, 0); + p = jsonParseFuncArg(ctx, argv[0], 0); if( p==0 ) return; if( argc==2 ){ zPath = (const char*)sqlite3_value_text(argv[1]); - pNode = jsonLookup(p, zPath, 0, ctx); + if( zPath==0 ) goto json_type_done; + if( zPath[0]!='$' ){ + jsonBadPathError(ctx, zPath); + goto json_type_done; + } + i = jsonLookupStep(p, 0, zPath+1, 0); + if( JSON_LOOKUP_ISERROR(i) ){ + if( i==JSON_LOOKUP_NOTFOUND ){ + /* no-op */ + }else if( i==JSON_LOOKUP_PATHERROR ){ + jsonBadPathError(ctx, zPath); + }else{ + sqlite3_result_error(ctx, "malformed JSON", -1); + } + goto json_type_done; + } }else{ - pNode = p->aNode; - } - if( pNode ){ - sqlite3_result_text(ctx, jsonType[pNode->eType], -1, SQLITE_STATIC); + i = 0; } + sqlite3_result_text(ctx, jsonbType[p->aBlob[i]&0x0f], -1, SQLITE_STATIC); +json_type_done: + jsonParseFree(p); } /* ** json_valid(JSON) -** -** Return 1 if JSON is a well-formed canonical JSON string according -** to RFC-7159. Return 0 otherwise. +** json_valid(JSON, FLAGS) +** +** Check the JSON argument to see if it is well-formed. The FLAGS argument +** encodes the various constraints on what is meant by "well-formed": +** +** 0x01 Canonical RFC-8259 JSON text +** 0x02 JSON text with optional JSON-5 extensions +** 0x04 Superficially appears to be JSONB +** 0x08 Strictly well-formed JSONB +** +** If the FLAGS argument is omitted, it defaults to 1. Useful values for +** FLAGS include: +** +** 1 Strict canonical JSON text +** 2 JSON text perhaps with JSON-5 extensions +** 4 Superficially appears to be JSONB +** 5 Canonical JSON text or superficial JSONB +** 6 JSON-5 text or superficial JSONB +** 8 Strict JSONB +** 9 Canonical JSON text or strict JSONB +** 10 JSON-5 text or strict JSONB +** +** Other flag combinations are redundant. For example, every canonical +** JSON text is also well-formed JSON-5 text, so FLAG values 2 and 3 +** are the same. Similarly, any input that passes a strict JSONB validation +** will also pass the superficial validation so 12 through 15 are the same +** as 8 through 11 respectively. +** +** This routine runs in linear time to validate text and when doing strict +** JSONB validation. Superficial JSONB validation is constant time, +** assuming the BLOB is already in memory. The performance advantage +** of superficial JSONB validation is why that option is provided. +** Application developers can choose to do fast superficial validation or +** slower strict validation, according to their specific needs. +** +** Only the lower four bits of the FLAGS argument are currently used. +** Higher bits are reserved for future expansion. To facilitate +** compatibility, the current implementation raises an error if any bit +** in FLAGS is set other than the lower four bits. +** +** The original circa 2015 implementation of the JSON routines in +** SQLite only supported canonical RFC-8259 JSON text and the json_valid() +** function only accepted one argument. That is why the default value +** for the FLAGS argument is 1, since FLAGS=1 causes this routine to only +** recognize canonical RFC-8259 JSON text as valid. The extra FLAGS +** argument was added when the JSON routines were extended to support +** JSON5-like extensions and binary JSONB stored in BLOBs. +** +** Return Values: +** +** * Raise an error if FLAGS is outside the range of 1 to 15. +** * Return NULL if the input is NULL +** * Return 1 if the input is well-formed. +** * Return 0 if the input is not well-formed. */ static void jsonValidFunc( sqlite3_context *ctx, @@ -205579,79 +207461,128 @@ static void jsonValidFunc( sqlite3_value **argv ){ JsonParse *p; /* The parse */ - UNUSED_PARAMETER(argc); - if( sqlite3_value_type(argv[0])==SQLITE_NULL ){ + u8 flags = 1; + u8 res = 0; + if( argc==2 ){ + i64 f = sqlite3_value_int64(argv[1]); + if( f<1 || f>15 ){ + sqlite3_result_error(ctx, "FLAGS parameter to json_valid() must be" + " between 1 and 15", -1); + return; + } + flags = f & 0x0f; + } + switch( sqlite3_value_type(argv[0]) ){ + case SQLITE_NULL: { #ifdef SQLITE_LEGACY_JSON_VALID - /* Incorrect legacy behavior was to return FALSE for a NULL input */ - sqlite3_result_int(ctx, 0); + /* Incorrect legacy behavior was to return FALSE for a NULL input */ + sqlite3_result_int(ctx, 0); #endif - return; - } - p = jsonParseCached(ctx, argv[0], 0, 0); - if( p==0 || p->oom ){ - sqlite3_result_error_nomem(ctx); - sqlite3_free(p); - }else{ - sqlite3_result_int(ctx, p->nErr==0 && (p->hasNonstd==0 || p->useMod)); - if( p->nErr ) jsonParseFree(p); + return; + } + case SQLITE_BLOB: { + if( jsonFuncArgMightBeBinary(argv[0]) ){ + if( flags & 0x04 ){ + /* Superficial checking only - accomplished by the + ** jsonFuncArgMightBeBinary() call above. */ + res = 1; + }else if( flags & 0x08 ){ + /* Strict checking. Check by translating BLOB->TEXT->BLOB. If + ** no errors occur, call that a "strict check". */ + JsonParse px; + u32 iErr; + memset(&px, 0, sizeof(px)); + px.aBlob = (u8*)sqlite3_value_blob(argv[0]); + px.nBlob = sqlite3_value_bytes(argv[0]); + iErr = jsonbValidityCheck(&px, 0, px.nBlob, 1); + res = iErr==0; + } + break; + } + /* Fall through into interpreting the input as text. See note + ** above at tag-20240123-a. */ + /* no break */ deliberate_fall_through + } + default: { + JsonParse px; + if( (flags & 0x3)==0 ) break; + memset(&px, 0, sizeof(px)); + + p = jsonParseFuncArg(ctx, argv[0], JSON_KEEPERROR); + if( p ){ + if( p->oom ){ + sqlite3_result_error_nomem(ctx); + }else if( p->nErr ){ + /* no-op */ + }else if( (flags & 0x02)!=0 || p->hasNonstd==0 ){ + res = 1; + } + jsonParseFree(p); + }else{ + sqlite3_result_error_nomem(ctx); + } + break; + } } + sqlite3_result_int(ctx, res); } /* ** json_error_position(JSON) ** -** If the argument is not an interpretable JSON string, then return the 1-based -** character position at which the parser first recognized that the input -** was in error. The left-most character is 1. If the string is valid -** JSON, then return 0. -** -** Note that json_valid() is only true for strictly conforming canonical JSON. -** But this routine returns zero if the input contains extension. Thus: +** If the argument is NULL, return NULL ** -** (1) If the input X is strictly conforming canonical JSON: +** If the argument is BLOB, do a full validity check and return non-zero +** if the check fails. The return value is the approximate 1-based offset +** to the byte of the element that contains the first error. ** -** json_valid(X) returns true -** json_error_position(X) returns 0 -** -** (2) If the input X is JSON but it includes extension (such as JSON5) that -** are not part of RFC-8259: -** -** json_valid(X) returns false -** json_error_position(X) return 0 -** -** (3) If the input X cannot be interpreted as JSON even taking extensions -** into account: -** -** json_valid(X) return false -** json_error_position(X) returns 1 or more +** Otherwise interpret the argument is TEXT (even if it is numeric) and +** return the 1-based character position for where the parser first recognized +** that the input was not valid JSON, or return 0 if the input text looks +** ok. JSON-5 extensions are accepted. */ static void jsonErrorFunc( sqlite3_context *ctx, int argc, sqlite3_value **argv ){ - JsonParse *p; /* The parse */ + i64 iErrPos = 0; /* Error position to be returned */ + JsonParse s; + + assert( argc==1 ); UNUSED_PARAMETER(argc); - if( sqlite3_value_type(argv[0])==SQLITE_NULL ) return; - p = jsonParseCached(ctx, argv[0], 0, 0); - if( p==0 || p->oom ){ + memset(&s, 0, sizeof(s)); + s.db = sqlite3_context_db_handle(ctx); + if( jsonFuncArgMightBeBinary(argv[0]) ){ + s.aBlob = (u8*)sqlite3_value_blob(argv[0]); + s.nBlob = sqlite3_value_bytes(argv[0]); + iErrPos = (i64)jsonbValidityCheck(&s, 0, s.nBlob, 1); + }else{ + s.zJson = (char*)sqlite3_value_text(argv[0]); + if( s.zJson==0 ) return; /* NULL input or OOM */ + s.nJson = sqlite3_value_bytes(argv[0]); + if( jsonConvertTextToBlob(&s,0) ){ + if( s.oom ){ + iErrPos = -1; + }else{ + /* Convert byte-offset s.iErr into a character offset */ + u32 k; + assert( s.zJson!=0 ); /* Because s.oom is false */ + for(k=0; knErr==0 ){ - sqlite3_result_int(ctx, 0); }else{ - int n = 1; - u32 i; - const char *z = (const char*)sqlite3_value_text(argv[0]); - for(i=0; iiErr && ALWAYS(z[i]); i++){ - if( (z[i]&0xc0)!=0x80 ) n++; - } - sqlite3_result_int(ctx, n); - jsonParseFree(p); + sqlite3_result_int64(ctx, iErrPos); } } - /**************************************************************************** ** Aggregate SQL function implementations ****************************************************************************/ @@ -205670,24 +207601,34 @@ static void jsonArrayStep( pStr = (JsonString*)sqlite3_aggregate_context(ctx, sizeof(*pStr)); if( pStr ){ if( pStr->zBuf==0 ){ - jsonInit(pStr, ctx); + jsonStringInit(pStr, ctx); jsonAppendChar(pStr, '['); }else if( pStr->nUsed>1 ){ jsonAppendChar(pStr, ','); } pStr->pCtx = ctx; - jsonAppendValue(pStr, argv[0]); + jsonAppendSqlValue(pStr, argv[0]); } } static void jsonArrayCompute(sqlite3_context *ctx, int isFinal){ JsonString *pStr; pStr = (JsonString*)sqlite3_aggregate_context(ctx, 0); if( pStr ){ + int flags; pStr->pCtx = ctx; jsonAppendChar(pStr, ']'); - if( pStr->bErr ){ - if( pStr->bErr==1 ) sqlite3_result_error_nomem(ctx); - assert( pStr->bStatic ); + flags = SQLITE_PTR_TO_INT(sqlite3_user_data(ctx)); + if( pStr->eErr ){ + jsonReturnString(pStr, 0, 0); + return; + }else if( flags & JSON_BLOB ){ + jsonReturnStringAsBlob(pStr); + if( isFinal ){ + if( !pStr->bStatic ) sqlite3RCStrUnref(pStr->zBuf); + }else{ + jsonStringTrimOneChar(pStr); + } + return; }else if( isFinal ){ sqlite3_result_text(ctx, pStr->zBuf, (int)pStr->nUsed, pStr->bStatic ? SQLITE_TRANSIENT : @@ -205695,7 +207636,7 @@ static void jsonArrayCompute(sqlite3_context *ctx, int isFinal){ pStr->bStatic = 1; }else{ sqlite3_result_text(ctx, pStr->zBuf, (int)pStr->nUsed, SQLITE_TRANSIENT); - pStr->nUsed--; + jsonStringTrimOneChar(pStr); } }else{ sqlite3_result_text(ctx, "[]", 2, SQLITE_STATIC); @@ -205776,27 +207717,38 @@ static void jsonObjectStep( pStr = (JsonString*)sqlite3_aggregate_context(ctx, sizeof(*pStr)); if( pStr ){ if( pStr->zBuf==0 ){ - jsonInit(pStr, ctx); + jsonStringInit(pStr, ctx); jsonAppendChar(pStr, '{'); }else if( pStr->nUsed>1 ){ jsonAppendChar(pStr, ','); } pStr->pCtx = ctx; z = (const char*)sqlite3_value_text(argv[0]); - n = (u32)sqlite3_value_bytes(argv[0]); + n = sqlite3Strlen30(z); jsonAppendString(pStr, z, n); jsonAppendChar(pStr, ':'); - jsonAppendValue(pStr, argv[1]); + jsonAppendSqlValue(pStr, argv[1]); } } static void jsonObjectCompute(sqlite3_context *ctx, int isFinal){ JsonString *pStr; pStr = (JsonString*)sqlite3_aggregate_context(ctx, 0); if( pStr ){ + int flags; jsonAppendChar(pStr, '}'); - if( pStr->bErr ){ - if( pStr->bErr==1 ) sqlite3_result_error_nomem(ctx); - assert( pStr->bStatic ); + pStr->pCtx = ctx; + flags = SQLITE_PTR_TO_INT(sqlite3_user_data(ctx)); + if( pStr->eErr ){ + jsonReturnString(pStr, 0, 0); + return; + }else if( flags & JSON_BLOB ){ + jsonReturnStringAsBlob(pStr); + if( isFinal ){ + if( !pStr->bStatic ) sqlite3RCStrUnref(pStr->zBuf); + }else{ + jsonStringTrimOneChar(pStr); + } + return; }else if( isFinal ){ sqlite3_result_text(ctx, pStr->zBuf, (int)pStr->nUsed, pStr->bStatic ? SQLITE_TRANSIENT : @@ -205804,7 +207756,7 @@ static void jsonObjectCompute(sqlite3_context *ctx, int isFinal){ pStr->bStatic = 1; }else{ sqlite3_result_text(ctx, pStr->zBuf, (int)pStr->nUsed, SQLITE_TRANSIENT); - pStr->nUsed--; + jsonStringTrimOneChar(pStr); } }else{ sqlite3_result_text(ctx, "{}", 2, SQLITE_STATIC); @@ -205824,19 +207776,37 @@ static void jsonObjectFinal(sqlite3_context *ctx){ /**************************************************************************** ** The json_each virtual table ****************************************************************************/ +typedef struct JsonParent JsonParent; +struct JsonParent { + u32 iHead; /* Start of object or array */ + u32 iValue; /* Start of the value */ + u32 iEnd; /* First byte past the end */ + u32 nPath; /* Length of path */ + i64 iKey; /* Key for JSONB_ARRAY */ +}; + typedef struct JsonEachCursor JsonEachCursor; struct JsonEachCursor { sqlite3_vtab_cursor base; /* Base class - must be first */ u32 iRowid; /* The rowid */ - u32 iBegin; /* The first node of the scan */ - u32 i; /* Index in sParse.aNode[] of current row */ + u32 i; /* Index in sParse.aBlob[] of current row */ u32 iEnd; /* EOF when i equals or exceeds this value */ - u8 eType; /* Type of top-level element */ + u32 nRoot; /* Size of the root path in bytes */ + u8 eType; /* Type of the container for element i */ u8 bRecursive; /* True for json_tree(). False for json_each() */ - char *zJson; /* Input JSON */ - char *zRoot; /* Path by which to filter zJson */ + u32 nParent; /* Current nesting depth */ + u32 nParentAlloc; /* Space allocated for aParent[] */ + JsonParent *aParent; /* Parent elements of i */ + sqlite3 *db; /* Database connection */ + JsonString path; /* Current path */ JsonParse sParse; /* Parse of the input JSON */ }; +typedef struct JsonEachConnection JsonEachConnection; +struct JsonEachConnection { + sqlite3_vtab base; /* Base class - must be first */ + sqlite3 *db; /* Database connection */ +}; + /* Constructor for the json_each virtual table */ static int jsonEachConnect( @@ -205846,7 +207816,7 @@ static int jsonEachConnect( sqlite3_vtab **ppVtab, char **pzErr ){ - sqlite3_vtab *pNew; + JsonEachConnection *pNew; int rc; /* Column numbers */ @@ -205872,28 +207842,32 @@ static int jsonEachConnect( "CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path," "json HIDDEN,root HIDDEN)"); if( rc==SQLITE_OK ){ - pNew = *ppVtab = sqlite3_malloc( sizeof(*pNew) ); + pNew = (JsonEachConnection*)sqlite3DbMallocZero(db, sizeof(*pNew)); + *ppVtab = (sqlite3_vtab*)pNew; if( pNew==0 ) return SQLITE_NOMEM; - memset(pNew, 0, sizeof(*pNew)); sqlite3_vtab_config(db, SQLITE_VTAB_INNOCUOUS); + pNew->db = db; } return rc; } /* destructor for json_each virtual table */ static int jsonEachDisconnect(sqlite3_vtab *pVtab){ - sqlite3_free(pVtab); + JsonEachConnection *p = (JsonEachConnection*)pVtab; + sqlite3DbFree(p->db, pVtab); return SQLITE_OK; } /* constructor for a JsonEachCursor object for json_each(). */ static int jsonEachOpenEach(sqlite3_vtab *p, sqlite3_vtab_cursor **ppCursor){ + JsonEachConnection *pVtab = (JsonEachConnection*)p; JsonEachCursor *pCur; UNUSED_PARAMETER(p); - pCur = sqlite3_malloc( sizeof(*pCur) ); + pCur = sqlite3DbMallocZero(pVtab->db, sizeof(*pCur)); if( pCur==0 ) return SQLITE_NOMEM; - memset(pCur, 0, sizeof(*pCur)); + pCur->db = pVtab->db; + jsonStringZero(&pCur->path); *ppCursor = &pCur->base; return SQLITE_OK; } @@ -205911,21 +207885,24 @@ static int jsonEachOpenTree(sqlite3_vtab *p, sqlite3_vtab_cursor **ppCursor){ /* Reset a JsonEachCursor back to its original state. Free any memory ** held. */ static void jsonEachCursorReset(JsonEachCursor *p){ - sqlite3_free(p->zRoot); jsonParseReset(&p->sParse); + jsonStringReset(&p->path); + sqlite3DbFree(p->db, p->aParent); p->iRowid = 0; p->i = 0; + p->aParent = 0; + p->nParent = 0; + p->nParentAlloc = 0; p->iEnd = 0; p->eType = 0; - p->zJson = 0; - p->zRoot = 0; } /* Destructor for a jsonEachCursor object */ static int jsonEachClose(sqlite3_vtab_cursor *cur){ JsonEachCursor *p = (JsonEachCursor*)cur; jsonEachCursorReset(p); - sqlite3_free(cur); + + sqlite3DbFree(p->db, cur); return SQLITE_OK; } @@ -205936,200 +207913,230 @@ static int jsonEachEof(sqlite3_vtab_cursor *cur){ return p->i >= p->iEnd; } -/* Advance the cursor to the next element for json_tree() */ -static int jsonEachNext(sqlite3_vtab_cursor *cur){ - JsonEachCursor *p = (JsonEachCursor*)cur; - if( p->bRecursive ){ - if( p->sParse.aNode[p->i].jnFlags & JNODE_LABEL ) p->i++; - p->i++; - p->iRowid++; - if( p->iiEnd ){ - u32 iUp = p->sParse.aUp[p->i]; - JsonNode *pUp = &p->sParse.aNode[iUp]; - p->eType = pUp->eType; - if( pUp->eType==JSON_ARRAY ){ - assert( pUp->eU==0 || pUp->eU==3 ); - testcase( pUp->eU==3 ); - VVA( pUp->eU = 3 ); - if( iUp==p->i-1 ){ - pUp->u.iKey = 0; - }else{ - pUp->u.iKey++; +/* +** If the cursor is currently pointing at the label of a object entry, +** then return the index of the value. For all other cases, return the +** current pointer position, which is the value. +*/ +static int jsonSkipLabel(JsonEachCursor *p){ + if( p->eType==JSONB_OBJECT ){ + u32 sz = 0; + u32 n = jsonbPayloadSize(&p->sParse, p->i, &sz); + return p->i + n + sz; + }else{ + return p->i; + } +} + +/* +** Append the path name for the current element. +*/ +static void jsonAppendPathName(JsonEachCursor *p){ + assert( p->nParent>0 ); + assert( p->eType==JSONB_ARRAY || p->eType==JSONB_OBJECT ); + if( p->eType==JSONB_ARRAY ){ + jsonPrintf(30, &p->path, "[%lld]", p->aParent[p->nParent-1].iKey); + }else{ + u32 n, sz = 0, k, i; + const char *z; + int needQuote = 0; + n = jsonbPayloadSize(&p->sParse, p->i, &sz); + k = p->i + n; + z = (const char*)&p->sParse.aBlob[k]; + if( sz==0 || !sqlite3Isalpha(z[0]) ){ + needQuote = 1; + }else{ + for(i=0; ieType ){ - case JSON_ARRAY: { - p->i += jsonNodeSize(&p->sParse.aNode[p->i]); - p->iRowid++; - break; - } - case JSON_OBJECT: { - p->i += 1 + jsonNodeSize(&p->sParse.aNode[p->i+1]); - p->iRowid++; - break; - } - default: { - p->i = p->iEnd; - break; - } + if( needQuote ){ + jsonPrintf(sz+4,&p->path,".\"%.*s\"", sz, z); + }else{ + jsonPrintf(sz+2,&p->path,".%.*s", sz, z); } } - return SQLITE_OK; } -/* Append an object label to the JSON Path being constructed -** in pStr. -*/ -static void jsonAppendObjectPathElement( - JsonString *pStr, - JsonNode *pNode -){ - int jj, nn; - const char *z; - assert( pNode->eType==JSON_STRING ); - assert( pNode->jnFlags & JNODE_LABEL ); - assert( pNode->eU==1 ); - z = pNode->u.zJContent; - nn = pNode->n; - if( (pNode->jnFlags & JNODE_RAW)==0 ){ - assert( nn>=2 ); - assert( z[0]=='"' || z[0]=='\'' ); - assert( z[nn-1]=='"' || z[0]=='\'' ); - if( nn>2 && sqlite3Isalpha(z[1]) ){ - for(jj=2; jjbRecursive ){ + u8 x; + u8 levelChange = 0; + u32 n, sz = 0; + u32 i = jsonSkipLabel(p); + x = p->sParse.aBlob[i] & 0x0f; + n = jsonbPayloadSize(&p->sParse, i, &sz); + if( x==JSONB_OBJECT || x==JSONB_ARRAY ){ + JsonParent *pParent; + if( p->nParent>=p->nParentAlloc ){ + JsonParent *pNew; + u64 nNew; + nNew = p->nParentAlloc*2 + 3; + pNew = sqlite3DbRealloc(p->db, p->aParent, sizeof(JsonParent)*nNew); + if( pNew==0 ) return SQLITE_NOMEM; + p->nParentAlloc = (u32)nNew; + p->aParent = pNew; + } + levelChange = 1; + pParent = &p->aParent[p->nParent]; + pParent->iHead = p->i; + pParent->iValue = i; + pParent->iEnd = i + n + sz; + pParent->iKey = -1; + pParent->nPath = (u32)p->path.nUsed; + if( p->eType && p->nParent ){ + jsonAppendPathName(p); + if( p->path.eErr ) rc = SQLITE_NOMEM; + } + p->nParent++; + p->i = i + n; + }else{ + p->i = i + n + sz; + } + while( p->nParent>0 && p->i >= p->aParent[p->nParent-1].iEnd ){ + p->nParent--; + p->path.nUsed = p->aParent[p->nParent].nPath; + levelChange = 1; + } + if( levelChange ){ + if( p->nParent>0 ){ + JsonParent *pParent = &p->aParent[p->nParent-1]; + u32 iVal = pParent->iValue; + p->eType = p->sParse.aBlob[iVal] & 0x0f; + }else{ + p->eType = 0; } } + }else{ + u32 n, sz = 0; + u32 i = jsonSkipLabel(p); + n = jsonbPayloadSize(&p->sParse, i, &sz); + p->i = i + n + sz; + } + if( p->eType==JSONB_ARRAY && p->nParent ){ + p->aParent[p->nParent-1].iKey++; } - jsonPrintf(nn+2, pStr, ".%.*s", nn, z); + p->iRowid++; + return rc; } -/* Append the name of the path for element i to pStr +/* Length of the path for rowid==0 in bRecursive mode. */ -static void jsonEachComputePath( - JsonEachCursor *p, /* The cursor */ - JsonString *pStr, /* Write the path here */ - u32 i /* Path to this element */ -){ - JsonNode *pNode, *pUp; - u32 iUp; - if( i==0 ){ - jsonAppendChar(pStr, '$'); - return; - } - iUp = p->sParse.aUp[i]; - jsonEachComputePath(p, pStr, iUp); - pNode = &p->sParse.aNode[i]; - pUp = &p->sParse.aNode[iUp]; - if( pUp->eType==JSON_ARRAY ){ - assert( pUp->eU==3 || (pUp->eU==0 && pUp->u.iKey==0) ); - testcase( pUp->eU==0 ); - jsonPrintf(30, pStr, "[%d]", pUp->u.iKey); - }else{ - assert( pUp->eType==JSON_OBJECT ); - if( (pNode->jnFlags & JNODE_LABEL)==0 ) pNode--; - jsonAppendObjectPathElement(pStr, pNode); +static int jsonEachPathLength(JsonEachCursor *p){ + u32 n = p->path.nUsed; + char *z = p->path.zBuf; + if( p->iRowid==0 && p->bRecursive && n>=2 ){ + while( n>1 ){ + n--; + if( z[n]=='[' || z[n]=='.' ){ + u32 x, sz = 0; + char cSaved = z[n]; + z[n] = 0; + assert( p->sParse.eEdit==0 ); + x = jsonLookupStep(&p->sParse, 0, z+1, 0); + z[n] = cSaved; + if( JSON_LOOKUP_ISERROR(x) ) continue; + if( x + jsonbPayloadSize(&p->sParse, x, &sz) == p->i ) break; + } + } } + return n; } /* Return the value of a column */ static int jsonEachColumn( sqlite3_vtab_cursor *cur, /* The cursor */ sqlite3_context *ctx, /* First argument to sqlite3_result_...() */ - int i /* Which column to return */ + int iColumn /* Which column to return */ ){ JsonEachCursor *p = (JsonEachCursor*)cur; - JsonNode *pThis = &p->sParse.aNode[p->i]; - switch( i ){ + switch( iColumn ){ case JEACH_KEY: { - if( p->i==0 ) break; - if( p->eType==JSON_OBJECT ){ - jsonReturn(&p->sParse, pThis, ctx); - }else if( p->eType==JSON_ARRAY ){ - u32 iKey; - if( p->bRecursive ){ - if( p->iRowid==0 ) break; - assert( p->sParse.aNode[p->sParse.aUp[p->i]].eU==3 ); - iKey = p->sParse.aNode[p->sParse.aUp[p->i]].u.iKey; + if( p->nParent==0 ){ + u32 n, j; + if( p->nRoot==1 ) break; + j = jsonEachPathLength(p); + n = p->nRoot - j; + if( n==0 ){ + break; + }else if( p->path.zBuf[j]=='[' ){ + i64 x; + sqlite3Atoi64(&p->path.zBuf[j+1], &x, n-1, SQLITE_UTF8); + sqlite3_result_int64(ctx, x); + }else if( p->path.zBuf[j+1]=='"' ){ + sqlite3_result_text(ctx, &p->path.zBuf[j+2], n-3, SQLITE_TRANSIENT); }else{ - iKey = p->iRowid; + sqlite3_result_text(ctx, &p->path.zBuf[j+1], n-1, SQLITE_TRANSIENT); } - sqlite3_result_int64(ctx, (sqlite3_int64)iKey); + break; + } + if( p->eType==JSONB_OBJECT ){ + jsonReturnFromBlob(&p->sParse, p->i, ctx, 1); + }else{ + assert( p->eType==JSONB_ARRAY ); + sqlite3_result_int64(ctx, p->aParent[p->nParent-1].iKey); } break; } case JEACH_VALUE: { - if( pThis->jnFlags & JNODE_LABEL ) pThis++; - jsonReturn(&p->sParse, pThis, ctx); + u32 i = jsonSkipLabel(p); + jsonReturnFromBlob(&p->sParse, i, ctx, 1); break; } case JEACH_TYPE: { - if( pThis->jnFlags & JNODE_LABEL ) pThis++; - sqlite3_result_text(ctx, jsonType[pThis->eType], -1, SQLITE_STATIC); + u32 i = jsonSkipLabel(p); + u8 eType = p->sParse.aBlob[i] & 0x0f; + sqlite3_result_text(ctx, jsonbType[eType], -1, SQLITE_STATIC); break; } case JEACH_ATOM: { - if( pThis->jnFlags & JNODE_LABEL ) pThis++; - if( pThis->eType>=JSON_ARRAY ) break; - jsonReturn(&p->sParse, pThis, ctx); + u32 i = jsonSkipLabel(p); + if( (p->sParse.aBlob[i] & 0x0f)sParse, i, ctx, 1); + } break; } case JEACH_ID: { - sqlite3_result_int64(ctx, - (sqlite3_int64)p->i + ((pThis->jnFlags & JNODE_LABEL)!=0)); + sqlite3_result_int64(ctx, (sqlite3_int64)p->i); break; } case JEACH_PARENT: { - if( p->i>p->iBegin && p->bRecursive ){ - sqlite3_result_int64(ctx, (sqlite3_int64)p->sParse.aUp[p->i]); + if( p->nParent>0 && p->bRecursive ){ + sqlite3_result_int64(ctx, p->aParent[p->nParent-1].iHead); } break; } case JEACH_FULLKEY: { - JsonString x; - jsonInit(&x, ctx); - if( p->bRecursive ){ - jsonEachComputePath(p, &x, p->i); - }else{ - if( p->zRoot ){ - jsonAppendRaw(&x, p->zRoot, (int)strlen(p->zRoot)); - }else{ - jsonAppendChar(&x, '$'); - } - if( p->eType==JSON_ARRAY ){ - jsonPrintf(30, &x, "[%d]", p->iRowid); - }else if( p->eType==JSON_OBJECT ){ - jsonAppendObjectPathElement(&x, pThis); - } - } - jsonResult(&x); + u64 nBase = p->path.nUsed; + if( p->nParent ) jsonAppendPathName(p); + sqlite3_result_text64(ctx, p->path.zBuf, p->path.nUsed, + SQLITE_TRANSIENT, SQLITE_UTF8); + p->path.nUsed = nBase; break; } case JEACH_PATH: { - if( p->bRecursive ){ - JsonString x; - jsonInit(&x, ctx); - jsonEachComputePath(p, &x, p->sParse.aUp[p->i]); - jsonResult(&x); - break; - } - /* For json_each() path and root are the same so fall through - ** into the root case */ - /* no break */ deliberate_fall_through + u32 n = jsonEachPathLength(p); + sqlite3_result_text64(ctx, p->path.zBuf, n, + SQLITE_TRANSIENT, SQLITE_UTF8); + break; } default: { - const char *zRoot = p->zRoot; - if( zRoot==0 ) zRoot = "$"; - sqlite3_result_text(ctx, zRoot, -1, SQLITE_STATIC); + sqlite3_result_text(ctx, p->path.zBuf, p->nRoot, SQLITE_STATIC); break; } case JEACH_JSON: { - assert( i==JEACH_JSON ); - sqlite3_result_text(ctx, p->sParse.zJson, -1, SQLITE_STATIC); + if( p->sParse.zJson==0 ){ + sqlite3_result_blob(ctx, p->sParse.aBlob, p->sParse.nBlob, + SQLITE_STATIC); + }else{ + sqlite3_result_text(ctx, p->sParse.zJson, -1, SQLITE_STATIC); + } break; } } @@ -206220,86 +208227,97 @@ static int jsonEachFilter( int argc, sqlite3_value **argv ){ JsonEachCursor *p = (JsonEachCursor*)cur; - const char *z; const char *zRoot = 0; - sqlite3_int64 n; + u32 i, n, sz; UNUSED_PARAMETER(idxStr); UNUSED_PARAMETER(argc); jsonEachCursorReset(p); if( idxNum==0 ) return SQLITE_OK; - z = (const char*)sqlite3_value_text(argv[0]); - if( z==0 ) return SQLITE_OK; memset(&p->sParse, 0, sizeof(p->sParse)); p->sParse.nJPRef = 1; - if( sqlite3ValueIsOfClass(argv[0], sqlite3RCStrUnref) ){ - p->sParse.zJson = sqlite3RCStrRef((char*)z); - }else{ - n = sqlite3_value_bytes(argv[0]); - p->sParse.zJson = sqlite3RCStrNew( n+1 ); - if( p->sParse.zJson==0 ) return SQLITE_NOMEM; - memcpy(p->sParse.zJson, z, (size_t)n+1); - } - p->sParse.bJsonIsRCStr = 1; - p->zJson = p->sParse.zJson; - if( jsonParse(&p->sParse, 0) ){ - int rc = SQLITE_NOMEM; - if( p->sParse.oom==0 ){ - sqlite3_free(cur->pVtab->zErrMsg); - cur->pVtab->zErrMsg = sqlite3_mprintf("malformed JSON"); - if( cur->pVtab->zErrMsg ) rc = SQLITE_ERROR; + p->sParse.db = p->db; + if( jsonFuncArgMightBeBinary(argv[0]) ){ + p->sParse.nBlob = sqlite3_value_bytes(argv[0]); + p->sParse.aBlob = (u8*)sqlite3_value_blob(argv[0]); + }else{ + p->sParse.zJson = (char*)sqlite3_value_text(argv[0]); + p->sParse.nJson = sqlite3_value_bytes(argv[0]); + if( p->sParse.zJson==0 ){ + p->i = p->iEnd = 0; + return SQLITE_OK; } - jsonEachCursorReset(p); - return rc; - }else if( p->bRecursive && jsonParseFindParents(&p->sParse) ){ - jsonEachCursorReset(p); - return SQLITE_NOMEM; - }else{ - JsonNode *pNode = 0; - if( idxNum==3 ){ - const char *zErr = 0; - zRoot = (const char*)sqlite3_value_text(argv[1]); - if( zRoot==0 ) return SQLITE_OK; - n = sqlite3_value_bytes(argv[1]); - p->zRoot = sqlite3_malloc64( n+1 ); - if( p->zRoot==0 ) return SQLITE_NOMEM; - memcpy(p->zRoot, zRoot, (size_t)n+1); - if( zRoot[0]!='$' ){ - zErr = zRoot; - }else{ - pNode = jsonLookupStep(&p->sParse, 0, p->zRoot+1, 0, &zErr); + if( jsonConvertTextToBlob(&p->sParse, 0) ){ + if( p->sParse.oom ){ + return SQLITE_NOMEM; } - if( zErr ){ + goto json_each_malformed_input; + } + } + if( idxNum==3 ){ + zRoot = (const char*)sqlite3_value_text(argv[1]); + if( zRoot==0 ) return SQLITE_OK; + if( zRoot[0]!='$' ){ + sqlite3_free(cur->pVtab->zErrMsg); + cur->pVtab->zErrMsg = jsonBadPathError(0, zRoot); + jsonEachCursorReset(p); + return cur->pVtab->zErrMsg ? SQLITE_ERROR : SQLITE_NOMEM; + } + p->nRoot = sqlite3Strlen30(zRoot); + if( zRoot[1]==0 ){ + i = p->i = 0; + p->eType = 0; + }else{ + i = jsonLookupStep(&p->sParse, 0, zRoot+1, 0); + if( JSON_LOOKUP_ISERROR(i) ){ + if( i==JSON_LOOKUP_NOTFOUND ){ + p->i = 0; + p->eType = 0; + p->iEnd = 0; + return SQLITE_OK; + } sqlite3_free(cur->pVtab->zErrMsg); - cur->pVtab->zErrMsg = jsonPathSyntaxError(zErr); + cur->pVtab->zErrMsg = jsonBadPathError(0, zRoot); jsonEachCursorReset(p); return cur->pVtab->zErrMsg ? SQLITE_ERROR : SQLITE_NOMEM; - }else if( pNode==0 ){ - return SQLITE_OK; } - }else{ - pNode = p->sParse.aNode; - } - p->iBegin = p->i = (int)(pNode - p->sParse.aNode); - p->eType = pNode->eType; - if( p->eType>=JSON_ARRAY ){ - assert( pNode->eU==0 ); - VVA( pNode->eU = 3 ); - pNode->u.iKey = 0; - p->iEnd = p->i + pNode->n + 1; - if( p->bRecursive ){ - p->eType = p->sParse.aNode[p->sParse.aUp[p->i]].eType; - if( p->i>0 && (p->sParse.aNode[p->i-1].jnFlags & JNODE_LABEL)!=0 ){ - p->i--; - } + if( p->sParse.iLabel ){ + p->i = p->sParse.iLabel; + p->eType = JSONB_OBJECT; }else{ - p->i++; - } - }else{ - p->iEnd = p->i+1; - } + p->i = i; + p->eType = JSONB_ARRAY; + } + } + jsonAppendRaw(&p->path, zRoot, p->nRoot); + }else{ + i = p->i = 0; + p->eType = 0; + p->nRoot = 1; + jsonAppendRaw(&p->path, "$", 1); + } + p->nParent = 0; + n = jsonbPayloadSize(&p->sParse, i, &sz); + p->iEnd = i+n+sz; + if( (p->sParse.aBlob[i] & 0x0f)>=JSONB_ARRAY && !p->bRecursive ){ + p->i = i + n; + p->eType = p->sParse.aBlob[i] & 0x0f; + p->aParent = sqlite3DbMallocZero(p->db, sizeof(JsonParent)); + if( p->aParent==0 ) return SQLITE_NOMEM; + p->nParent = 1; + p->nParentAlloc = 1; + p->aParent[0].iKey = 0; + p->aParent[0].iEnd = p->iEnd; + p->aParent[0].iHead = p->i; + p->aParent[0].iValue = i; } return SQLITE_OK; + +json_each_malformed_input: + sqlite3_free(cur->pVtab->zErrMsg); + cur->pVtab->zErrMsg = sqlite3_mprintf("malformed JSON"); + jsonEachCursorReset(p); + return cur->pVtab->zErrMsg ? SQLITE_ERROR : SQLITE_NOMEM; } /* The methods of the json_each virtual table */ @@ -206368,34 +208386,57 @@ static sqlite3_module jsonTreeModule = { SQLITE_PRIVATE void sqlite3RegisterJsonFunctions(void){ #ifndef SQLITE_OMIT_JSON static FuncDef aJsonFunc[] = { - JFUNCTION(json, 1, 0, jsonRemoveFunc), - JFUNCTION(json_array, -1, 0, jsonArrayFunc), - JFUNCTION(json_array_length, 1, 0, jsonArrayLengthFunc), - JFUNCTION(json_array_length, 2, 0, jsonArrayLengthFunc), - JFUNCTION(json_error_position,1, 0, jsonErrorFunc), - JFUNCTION(json_extract, -1, 0, jsonExtractFunc), - JFUNCTION(->, 2, JSON_JSON, jsonExtractFunc), - JFUNCTION(->>, 2, JSON_SQL, jsonExtractFunc), - JFUNCTION(json_insert, -1, 0, jsonSetFunc), - JFUNCTION(json_object, -1, 0, jsonObjectFunc), - JFUNCTION(json_patch, 2, 0, jsonPatchFunc), - JFUNCTION(json_quote, 1, 0, jsonQuoteFunc), - JFUNCTION(json_remove, -1, 0, jsonRemoveFunc), - JFUNCTION(json_replace, -1, 0, jsonReplaceFunc), - JFUNCTION(json_set, -1, JSON_ISSET, jsonSetFunc), - JFUNCTION(json_type, 1, 0, jsonTypeFunc), - JFUNCTION(json_type, 2, 0, jsonTypeFunc), - JFUNCTION(json_valid, 1, 0, jsonValidFunc), + /* sqlite3_result_subtype() ----, ,--- sqlite3_value_subtype() */ + /* | | */ + /* Uses cache ------, | | ,---- Returns JSONB */ + /* | | | | */ + /* Number of arguments ---, | | | | ,--- Flags */ + /* | | | | | | */ + JFUNCTION(json, 1,1,1, 0,0,0, jsonRemoveFunc), + JFUNCTION(jsonb, 1,1,0, 0,1,0, jsonRemoveFunc), + JFUNCTION(json_array, -1,0,1, 1,0,0, jsonArrayFunc), + JFUNCTION(jsonb_array, -1,0,1, 1,1,0, jsonArrayFunc), + JFUNCTION(json_array_length, 1,1,0, 0,0,0, jsonArrayLengthFunc), + JFUNCTION(json_array_length, 2,1,0, 0,0,0, jsonArrayLengthFunc), + JFUNCTION(json_error_position,1,1,0, 0,0,0, jsonErrorFunc), + JFUNCTION(json_extract, -1,1,1, 0,0,0, jsonExtractFunc), + JFUNCTION(jsonb_extract, -1,1,0, 0,1,0, jsonExtractFunc), + JFUNCTION(->, 2,1,1, 0,0,JSON_JSON, jsonExtractFunc), + JFUNCTION(->>, 2,1,0, 0,0,JSON_SQL, jsonExtractFunc), + JFUNCTION(json_insert, -1,1,1, 1,0,0, jsonSetFunc), + JFUNCTION(jsonb_insert, -1,1,0, 1,1,0, jsonSetFunc), + JFUNCTION(json_object, -1,0,1, 1,0,0, jsonObjectFunc), + JFUNCTION(jsonb_object, -1,0,1, 1,1,0, jsonObjectFunc), + JFUNCTION(json_patch, 2,1,1, 0,0,0, jsonPatchFunc), + JFUNCTION(jsonb_patch, 2,1,0, 0,1,0, jsonPatchFunc), + JFUNCTION(json_quote, 1,0,1, 1,0,0, jsonQuoteFunc), + JFUNCTION(json_remove, -1,1,1, 0,0,0, jsonRemoveFunc), + JFUNCTION(jsonb_remove, -1,1,0, 0,1,0, jsonRemoveFunc), + JFUNCTION(json_replace, -1,1,1, 1,0,0, jsonReplaceFunc), + JFUNCTION(jsonb_replace, -1,1,0, 1,1,0, jsonReplaceFunc), + JFUNCTION(json_set, -1,1,1, 1,0,JSON_ISSET, jsonSetFunc), + JFUNCTION(jsonb_set, -1,1,0, 1,1,JSON_ISSET, jsonSetFunc), + JFUNCTION(json_type, 1,1,0, 0,0,0, jsonTypeFunc), + JFUNCTION(json_type, 2,1,0, 0,0,0, jsonTypeFunc), + JFUNCTION(json_valid, 1,1,0, 0,0,0, jsonValidFunc), + JFUNCTION(json_valid, 2,1,0, 0,0,0, jsonValidFunc), #if SQLITE_DEBUG - JFUNCTION(json_parse, 1, 0, jsonParseFunc), - JFUNCTION(json_test1, 1, 0, jsonTest1Func), + JFUNCTION(json_parse, 1,1,0, 0,0,0, jsonParseFunc), #endif WAGGREGATE(json_group_array, 1, 0, 0, jsonArrayStep, jsonArrayFinal, jsonArrayValue, jsonGroupInverse, - SQLITE_SUBTYPE|SQLITE_UTF8|SQLITE_DETERMINISTIC), + SQLITE_SUBTYPE|SQLITE_RESULT_SUBTYPE|SQLITE_UTF8| + SQLITE_DETERMINISTIC), + WAGGREGATE(jsonb_group_array, 1, JSON_BLOB, 0, + jsonArrayStep, jsonArrayFinal, jsonArrayValue, jsonGroupInverse, + SQLITE_SUBTYPE|SQLITE_RESULT_SUBTYPE|SQLITE_UTF8|SQLITE_DETERMINISTIC), WAGGREGATE(json_group_object, 2, 0, 0, jsonObjectStep, jsonObjectFinal, jsonObjectValue, jsonGroupInverse, - SQLITE_SUBTYPE|SQLITE_UTF8|SQLITE_DETERMINISTIC) + SQLITE_SUBTYPE|SQLITE_RESULT_SUBTYPE|SQLITE_UTF8|SQLITE_DETERMINISTIC), + WAGGREGATE(jsonb_group_object,2, JSON_BLOB, 0, + jsonObjectStep, jsonObjectFinal, jsonObjectValue, jsonGroupInverse, + SQLITE_SUBTYPE|SQLITE_RESULT_SUBTYPE|SQLITE_UTF8| + SQLITE_DETERMINISTIC) }; sqlite3InsertBuiltinFuncs(aJsonFunc, ArraySize(aJsonFunc)); #endif @@ -207143,7 +209184,7 @@ static int nodeAcquire( ** increase its reference count and return it. */ if( (pNode = nodeHashLookup(pRtree, iNode))!=0 ){ - if( pParent && pParent!=pNode->pParent ){ + if( pParent && ALWAYS(pParent!=pNode->pParent) ){ RTREE_IS_CORRUPT(pRtree); return SQLITE_CORRUPT_VTAB; } @@ -209878,7 +211919,7 @@ static int rtreeSqlInit( } sqlite3_free(zSql); } - if( pRtree->nAux ){ + if( pRtree->nAux && rc!=SQLITE_NOMEM ){ pRtree->zReadAuxSql = sqlite3_mprintf( "SELECT * FROM \"%w\".\"%w_rowid\" WHERE rowid=?1", zDb, zPrefix); @@ -210567,15 +212608,13 @@ static int rtreeCheckTable( check.zTab = zTab; /* Find the number of auxiliary columns */ - if( check.rc==SQLITE_OK ){ - pStmt = rtreeCheckPrepare(&check, "SELECT * FROM %Q.'%q_rowid'", zDb, zTab); - if( pStmt ){ - nAux = sqlite3_column_count(pStmt) - 2; - sqlite3_finalize(pStmt); - }else - if( check.rc!=SQLITE_NOMEM ){ - check.rc = SQLITE_OK; - } + pStmt = rtreeCheckPrepare(&check, "SELECT * FROM %Q.'%q_rowid'", zDb, zTab); + if( pStmt ){ + nAux = sqlite3_column_count(pStmt) - 2; + sqlite3_finalize(pStmt); + }else + if( check.rc!=SQLITE_NOMEM ){ + check.rc = SQLITE_OK; } /* Find number of dimensions in the rtree table. */ @@ -210630,6 +212669,7 @@ static int rtreeIntegrity( if( rc==SQLITE_OK && *pzErr ){ *pzErr = sqlite3_mprintf("In RTree %s.%s:\n%z", pRtree->zDb, pRtree->zName, *pzErr); + if( (*pzErr)==0 ) rc = SQLITE_NOMEM; } return rc; } @@ -223300,9 +225340,7 @@ SQLITE_API void sqlite3session_delete(sqlite3_session *pSession){ ** associated hash-tables. */ sessionDeleteTable(pSession, pSession->pTable); - /* Assert that all allocations have been freed and then free the - ** session object itself. */ - // assert( pSession->nMalloc==0 ); + /* Free the session object. */ sqlite3_free(pSession); } @@ -227515,8 +229553,11 @@ struct Fts5PhraseIter { ** created with the "columnsize=0" option. ** ** xColumnText: -** This function attempts to retrieve the text of column iCol of the -** current document. If successful, (*pz) is set to point to a buffer +** If parameter iCol is less than zero, or greater than or equal to the +** number of columns in the table, SQLITE_RANGE is returned. +** +** Otherwise, this function attempts to retrieve the text of column iCol of +** the current document. If successful, (*pz) is set to point to a buffer ** containing the text in utf-8 encoding, (*pn) is set to the size in bytes ** (not characters) of the buffer and SQLITE_OK is returned. Otherwise, ** if an error occurs, an SQLite error code is returned and the final values @@ -227526,8 +229567,10 @@ struct Fts5PhraseIter { ** Returns the number of phrases in the current query expression. ** ** xPhraseSize: -** Returns the number of tokens in phrase iPhrase of the query. Phrases -** are numbered starting from zero. +** If parameter iCol is less than zero, or greater than or equal to the +** number of phrases in the current query, as returned by xPhraseCount, +** 0 is returned. Otherwise, this function returns the number of tokens in +** phrase iPhrase of the query. Phrases are numbered starting from zero. ** ** xInstCount: ** Set *pnInst to the total number of occurrences of all phrases within @@ -227543,12 +229586,13 @@ struct Fts5PhraseIter { ** Query for the details of phrase match iIdx within the current row. ** Phrase matches are numbered starting from zero, so the iIdx argument ** should be greater than or equal to zero and smaller than the value -** output by xInstCount(). +** output by xInstCount(). If iIdx is less than zero or greater than +** or equal to the value returned by xInstCount(), SQLITE_RANGE is returned. ** -** Usually, output parameter *piPhrase is set to the phrase number, *piCol +** Otherwise, output parameter *piPhrase is set to the phrase number, *piCol ** to the column in which it occurs and *piOff the token offset of the -** first token of the phrase. Returns SQLITE_OK if successful, or an error -** code (i.e. SQLITE_NOMEM) if an error occurs. +** first token of the phrase. SQLITE_OK is returned if successful, or an +** error code (i.e. SQLITE_NOMEM) if an error occurs. ** ** This API can be quite slow if used with an FTS5 table created with the ** "detail=none" or "detail=column" option. @@ -227574,6 +229618,10 @@ struct Fts5PhraseIter { ** Invoking Api.xUserData() returns a copy of the pointer passed as ** the third argument to pUserData. ** +** If parameter iPhrase is less than zero, or greater than or equal to +** the number of phrases in the query, as returned by xPhraseCount(), +** this function returns SQLITE_RANGE. +** ** If the callback function returns any value other than SQLITE_OK, the ** query is abandoned and the xQueryPhrase function returns immediately. ** If the returned value is SQLITE_DONE, xQueryPhrase returns SQLITE_OK. @@ -227688,9 +229736,42 @@ struct Fts5PhraseIter { ** ** xPhraseNextColumn() ** See xPhraseFirstColumn above. +** +** xQueryToken(pFts5, iPhrase, iToken, ppToken, pnToken) +** This is used to access token iToken of phrase iPhrase of the current +** query. Before returning, output parameter *ppToken is set to point +** to a buffer containing the requested token, and *pnToken to the +** size of this buffer in bytes. +** +** If iPhrase or iToken are less than zero, or if iPhrase is greater than +** or equal to the number of phrases in the query as reported by +** xPhraseCount(), or if iToken is equal to or greater than the number of +** tokens in the phrase, SQLITE_RANGE is returned and *ppToken and *pnToken + are both zeroed. +** +** The output text is not a copy of the query text that specified the +** token. It is the output of the tokenizer module. For tokendata=1 +** tables, this includes any embedded 0x00 and trailing data. +** +** xInstToken(pFts5, iIdx, iToken, ppToken, pnToken) +** This is used to access token iToken of phrase hit iIdx within the +** current row. If iIdx is less than zero or greater than or equal to the +** value returned by xInstCount(), SQLITE_RANGE is returned. Otherwise, +** output variable (*ppToken) is set to point to a buffer containing the +** matching document token, and (*pnToken) to the size of that buffer in +** bytes. This API is not available if the specified token matches a +** prefix query term. In that case both output variables are always set +** to 0. +** +** The output text is not a copy of the document text that was tokenized. +** It is the output of the tokenizer module. For tokendata=1 tables, this +** includes any embedded 0x00 and trailing data. +** +** This API can be quite slow if used with an FTS5 table created with the +** "detail=none" or "detail=column" option. */ struct Fts5ExtensionApi { - int iVersion; /* Currently always set to 2 */ + int iVersion; /* Currently always set to 3 */ void *(*xUserData)(Fts5Context*); @@ -227725,6 +229806,13 @@ struct Fts5ExtensionApi { int (*xPhraseFirstColumn)(Fts5Context*, int iPhrase, Fts5PhraseIter*, int*); void (*xPhraseNextColumn)(Fts5Context*, Fts5PhraseIter*, int *piCol); + + /* Below this point are iVersion>=3 only */ + int (*xQueryToken)(Fts5Context*, + int iPhrase, int iToken, + const char **ppToken, int *pnToken + ); + int (*xInstToken)(Fts5Context*, int iIdx, int iToken, const char**, int*); }; /* @@ -228199,6 +230287,7 @@ struct Fts5Config { char *zContent; /* content table */ char *zContentRowid; /* "content_rowid=" option value */ int bColumnsize; /* "columnsize=" option value (dflt==1) */ + int bTokendata; /* "tokendata=" option value (dflt==0) */ int eDetail; /* FTS5_DETAIL_XXX value */ char *zContentExprlist; Fts5Tokenizer *pTok; @@ -228387,17 +230476,19 @@ struct Fts5IndexIter { /* ** Values used as part of the flags argument passed to IndexQuery(). */ -#define FTS5INDEX_QUERY_PREFIX 0x0001 /* Prefix query */ -#define FTS5INDEX_QUERY_DESC 0x0002 /* Docs in descending rowid order */ -#define FTS5INDEX_QUERY_TEST_NOIDX 0x0004 /* Do not use prefix index */ -#define FTS5INDEX_QUERY_SCAN 0x0008 /* Scan query (fts5vocab) */ +#define FTS5INDEX_QUERY_PREFIX 0x0001 /* Prefix query */ +#define FTS5INDEX_QUERY_DESC 0x0002 /* Docs in descending rowid order */ +#define FTS5INDEX_QUERY_TEST_NOIDX 0x0004 /* Do not use prefix index */ +#define FTS5INDEX_QUERY_SCAN 0x0008 /* Scan query (fts5vocab) */ /* The following are used internally by the fts5_index.c module. They are ** defined here only to make it easier to avoid clashes with the flags ** above. */ -#define FTS5INDEX_QUERY_SKIPEMPTY 0x0010 -#define FTS5INDEX_QUERY_NOOUTPUT 0x0020 -#define FTS5INDEX_QUERY_SKIPHASH 0x0040 +#define FTS5INDEX_QUERY_SKIPEMPTY 0x0010 +#define FTS5INDEX_QUERY_NOOUTPUT 0x0020 +#define FTS5INDEX_QUERY_SKIPHASH 0x0040 +#define FTS5INDEX_QUERY_NOTOKENDATA 0x0080 +#define FTS5INDEX_QUERY_SCANONETERM 0x0100 /* ** Create/destroy an Fts5Index object. @@ -228466,6 +230557,10 @@ static void *sqlite3Fts5StructureRef(Fts5Index*); static void sqlite3Fts5StructureRelease(void*); static int sqlite3Fts5StructureTest(Fts5Index*, void*); +/* +** Used by xInstToken(): +*/ +static int sqlite3Fts5IterToken(Fts5IndexIter*, i64, int, int, const char**, int*); /* ** Insert or remove data to or from the index. Each time a document is @@ -228543,6 +230638,13 @@ static int sqlite3Fts5IndexLoadConfig(Fts5Index *p); static int sqlite3Fts5IndexGetOrigin(Fts5Index *p, i64 *piOrigin); static int sqlite3Fts5IndexContentlessDelete(Fts5Index *p, i64 iOrigin, i64 iRowid); +static void sqlite3Fts5IndexIterClearTokendata(Fts5IndexIter*); + +/* Used to populate hash tables for xInstToken in detail=none/column mode. */ +static int sqlite3Fts5IndexIterWriteTokendata( + Fts5IndexIter*, const char*, int, i64 iRowid, int iCol, int iOff +); + /* ** End of interface to code in fts5_index.c. **************************************************************************/ @@ -228648,6 +230750,7 @@ static void sqlite3Fts5HashScanNext(Fts5Hash*); static int sqlite3Fts5HashScanEof(Fts5Hash*); static void sqlite3Fts5HashScanEntry(Fts5Hash *, const char **pzTerm, /* OUT: term (nul-terminated) */ + int *pnTerm, /* OUT: Size of term in bytes */ const u8 **ppDoclist, /* OUT: pointer to doclist */ int *pnDoclist /* OUT: size of doclist in bytes */ ); @@ -228774,6 +230877,10 @@ static int sqlite3Fts5ExprClonePhrase(Fts5Expr*, int, Fts5Expr**); static int sqlite3Fts5ExprPhraseCollist(Fts5Expr *, int, const u8 **, int *); +static int sqlite3Fts5ExprQueryToken(Fts5Expr*, int, int, const char**, int*); +static int sqlite3Fts5ExprInstToken(Fts5Expr*, i64, int, int, int, int, const char**, int*); +static void sqlite3Fts5ExprClearTokens(Fts5Expr*); + /******************************************* ** The fts5_expr.c API above this point is used by the other hand-written ** C code in this module. The interfaces below this point are called by @@ -230589,6 +232696,14 @@ static int fts5HighlightCb( } if( iPos==p->iRangeEnd ){ + if( p->bOpen ){ + if( p->iter.iStart>=0 && iPos>=p->iter.iStart ){ + fts5HighlightAppend(&rc, p, &p->zIn[p->iOff], iEndOff - p->iOff); + p->iOff = iEndOff; + } + fts5HighlightAppend(&rc, p, p->zClose, -1); + p->bOpen = 0; + } fts5HighlightAppend(&rc, p, &p->zIn[p->iOff], iEndOff - p->iOff); p->iOff = iEndOff; } @@ -230622,8 +232737,10 @@ static void fts5HighlightFunction( ctx.zClose = (const char*)sqlite3_value_text(apVal[2]); ctx.iRangeEnd = -1; rc = pApi->xColumnText(pFts, iCol, &ctx.zIn, &ctx.nIn); - - if( ctx.zIn ){ + if( rc==SQLITE_RANGE ){ + sqlite3_result_text(pCtx, "", -1, SQLITE_STATIC); + rc = SQLITE_OK; + }else if( ctx.zIn ){ if( rc==SQLITE_OK ){ rc = fts5CInstIterInit(pApi, pFts, iCol, &ctx.iter); } @@ -231190,6 +233307,7 @@ static void sqlite3Fts5BufferAppendBlob( ){ if( nData ){ if( fts5BufferGrow(pRc, pBuf, nData) ) return; + assert( pBuf->p!=0 ); memcpy(&pBuf->p[pBuf->n], pData, nData); pBuf->n += nData; } @@ -231291,6 +233409,7 @@ static int sqlite3Fts5PoslistNext64( i64 *piOff /* IN/OUT: Current offset */ ){ int i = *pi; + assert( a!=0 || i==0 ); if( i>=n ){ /* EOF */ *piOff = -1; @@ -231298,6 +233417,7 @@ static int sqlite3Fts5PoslistNext64( }else{ i64 iOff = *piOff; u32 iVal; + assert( a!=0 ); fts5FastGetVarint32(a, i, iVal); if( iVal<=1 ){ if( iVal==0 ){ @@ -231929,6 +234049,16 @@ static int fts5ConfigParseSpecial( return rc; } + if( sqlite3_strnicmp("tokendata", zCmd, nCmd)==0 ){ + if( (zArg[0]!='0' && zArg[0]!='1') || zArg[1]!='\0' ){ + *pzErr = sqlite3_mprintf("malformed tokendata=... directive"); + rc = SQLITE_ERROR; + }else{ + pConfig->bTokendata = (zArg[0]=='1'); + } + return rc; + } + *pzErr = sqlite3_mprintf("unrecognized option: \"%.*s\"", nCmd, zCmd); return SQLITE_ERROR; } @@ -232662,7 +234792,9 @@ struct Fts5ExprNode { struct Fts5ExprTerm { u8 bPrefix; /* True for a prefix term */ u8 bFirst; /* True if token must be first in column */ - char *zTerm; /* nul-terminated term */ + char *pTerm; /* Term data */ + int nQueryTerm; /* Effective size of term in bytes */ + int nFullTerm; /* Size of term in bytes incl. tokendata */ Fts5IndexIter *pIter; /* Iterator for this term */ Fts5ExprTerm *pSynonym; /* Pointer to first in list of synonyms */ }; @@ -233529,7 +235661,7 @@ static int fts5ExprNearInitAll( p->pIter = 0; } rc = sqlite3Fts5IndexQuery( - pExpr->pIndex, p->zTerm, (int)strlen(p->zTerm), + pExpr->pIndex, p->pTerm, p->nQueryTerm, (pTerm->bPrefix ? FTS5INDEX_QUERY_PREFIX : 0) | (pExpr->bDesc ? FTS5INDEX_QUERY_DESC : 0), pNear->pColset, @@ -234166,7 +236298,7 @@ static void fts5ExprPhraseFree(Fts5ExprPhrase *pPhrase){ Fts5ExprTerm *pSyn; Fts5ExprTerm *pNext; Fts5ExprTerm *pTerm = &pPhrase->aTerm[i]; - sqlite3_free(pTerm->zTerm); + sqlite3_free(pTerm->pTerm); sqlite3Fts5IterClose(pTerm->pIter); for(pSyn=pTerm->pSynonym; pSyn; pSyn=pNext){ pNext = pSyn->pSynonym; @@ -234264,6 +236396,7 @@ static Fts5ExprNearset *sqlite3Fts5ParseNearset( typedef struct TokenCtx TokenCtx; struct TokenCtx { Fts5ExprPhrase *pPhrase; + Fts5Config *pConfig; int rc; }; @@ -234297,8 +236430,12 @@ static int fts5ParseTokenize( rc = SQLITE_NOMEM; }else{ memset(pSyn, 0, (size_t)nByte); - pSyn->zTerm = ((char*)pSyn) + sizeof(Fts5ExprTerm) + sizeof(Fts5Buffer); - memcpy(pSyn->zTerm, pToken, nToken); + pSyn->pTerm = ((char*)pSyn) + sizeof(Fts5ExprTerm) + sizeof(Fts5Buffer); + pSyn->nFullTerm = pSyn->nQueryTerm = nToken; + if( pCtx->pConfig->bTokendata ){ + pSyn->nQueryTerm = (int)strlen(pSyn->pTerm); + } + memcpy(pSyn->pTerm, pToken, nToken); pSyn->pSynonym = pPhrase->aTerm[pPhrase->nTerm-1].pSynonym; pPhrase->aTerm[pPhrase->nTerm-1].pSynonym = pSyn; } @@ -234323,7 +236460,11 @@ static int fts5ParseTokenize( if( rc==SQLITE_OK ){ pTerm = &pPhrase->aTerm[pPhrase->nTerm++]; memset(pTerm, 0, sizeof(Fts5ExprTerm)); - pTerm->zTerm = sqlite3Fts5Strndup(&rc, pToken, nToken); + pTerm->pTerm = sqlite3Fts5Strndup(&rc, pToken, nToken); + pTerm->nFullTerm = pTerm->nQueryTerm = nToken; + if( pCtx->pConfig->bTokendata && rc==SQLITE_OK ){ + pTerm->nQueryTerm = (int)strlen(pTerm->pTerm); + } } } @@ -234390,6 +236531,7 @@ static Fts5ExprPhrase *sqlite3Fts5ParseTerm( memset(&sCtx, 0, sizeof(TokenCtx)); sCtx.pPhrase = pAppend; + sCtx.pConfig = pConfig; rc = fts5ParseStringFromToken(pToken, &z); if( rc==SQLITE_OK ){ @@ -234437,12 +236579,15 @@ static int sqlite3Fts5ExprClonePhrase( Fts5Expr **ppNew ){ int rc = SQLITE_OK; /* Return code */ - Fts5ExprPhrase *pOrig; /* The phrase extracted from pExpr */ + Fts5ExprPhrase *pOrig = 0; /* The phrase extracted from pExpr */ Fts5Expr *pNew = 0; /* Expression to return via *ppNew */ - TokenCtx sCtx = {0,0}; /* Context object for fts5ParseTokenize */ - - pOrig = pExpr->apExprPhrase[iPhrase]; - pNew = (Fts5Expr*)sqlite3Fts5MallocZero(&rc, sizeof(Fts5Expr)); + TokenCtx sCtx = {0,0,0}; /* Context object for fts5ParseTokenize */ + if( iPhrase<0 || iPhrase>=pExpr->nPhrase ){ + rc = SQLITE_RANGE; + }else{ + pOrig = pExpr->apExprPhrase[iPhrase]; + pNew = (Fts5Expr*)sqlite3Fts5MallocZero(&rc, sizeof(Fts5Expr)); + } if( rc==SQLITE_OK ){ pNew->apExprPhrase = (Fts5ExprPhrase**)sqlite3Fts5MallocZero(&rc, sizeof(Fts5ExprPhrase*)); @@ -234455,7 +236600,7 @@ static int sqlite3Fts5ExprClonePhrase( pNew->pRoot->pNear = (Fts5ExprNearset*)sqlite3Fts5MallocZero(&rc, sizeof(Fts5ExprNearset) + sizeof(Fts5ExprPhrase*)); } - if( rc==SQLITE_OK ){ + if( rc==SQLITE_OK && ALWAYS(pOrig!=0) ){ Fts5Colset *pColsetOrig = pOrig->pNode->pNear->pColset; if( pColsetOrig ){ sqlite3_int64 nByte; @@ -234469,26 +236614,27 @@ static int sqlite3Fts5ExprClonePhrase( } } - if( pOrig->nTerm ){ - int i; /* Used to iterate through phrase terms */ - for(i=0; rc==SQLITE_OK && inTerm; i++){ - int tflags = 0; - Fts5ExprTerm *p; - for(p=&pOrig->aTerm[i]; p && rc==SQLITE_OK; p=p->pSynonym){ - const char *zTerm = p->zTerm; - rc = fts5ParseTokenize((void*)&sCtx, tflags, zTerm, (int)strlen(zTerm), - 0, 0); - tflags = FTS5_TOKEN_COLOCATED; - } - if( rc==SQLITE_OK ){ - sCtx.pPhrase->aTerm[i].bPrefix = pOrig->aTerm[i].bPrefix; - sCtx.pPhrase->aTerm[i].bFirst = pOrig->aTerm[i].bFirst; + if( rc==SQLITE_OK ){ + if( pOrig->nTerm ){ + int i; /* Used to iterate through phrase terms */ + sCtx.pConfig = pExpr->pConfig; + for(i=0; rc==SQLITE_OK && inTerm; i++){ + int tflags = 0; + Fts5ExprTerm *p; + for(p=&pOrig->aTerm[i]; p && rc==SQLITE_OK; p=p->pSynonym){ + rc = fts5ParseTokenize((void*)&sCtx,tflags,p->pTerm,p->nFullTerm,0,0); + tflags = FTS5_TOKEN_COLOCATED; + } + if( rc==SQLITE_OK ){ + sCtx.pPhrase->aTerm[i].bPrefix = pOrig->aTerm[i].bPrefix; + sCtx.pPhrase->aTerm[i].bFirst = pOrig->aTerm[i].bFirst; + } } + }else{ + /* This happens when parsing a token or quoted phrase that contains + ** no token characters at all. (e.g ... MATCH '""'). */ + sCtx.pPhrase = sqlite3Fts5MallocZero(&rc, sizeof(Fts5ExprPhrase)); } - }else{ - /* This happens when parsing a token or quoted phrase that contains - ** no token characters at all. (e.g ... MATCH '""'). */ - sCtx.pPhrase = sqlite3Fts5MallocZero(&rc, sizeof(Fts5ExprPhrase)); } if( rc==SQLITE_OK && ALWAYS(sCtx.pPhrase) ){ @@ -234858,11 +237004,13 @@ static Fts5ExprNode *fts5ParsePhraseToAnd( if( parseGrowPhraseArray(pParse) ){ fts5ExprPhraseFree(pPhrase); }else{ + Fts5ExprTerm *p = &pNear->apPhrase[0]->aTerm[ii]; + Fts5ExprTerm *pTo = &pPhrase->aTerm[0]; pParse->apPhrase[pParse->nPhrase++] = pPhrase; pPhrase->nTerm = 1; - pPhrase->aTerm[0].zTerm = sqlite3Fts5Strndup( - &pParse->rc, pNear->apPhrase[0]->aTerm[ii].zTerm, -1 - ); + pTo->pTerm = sqlite3Fts5Strndup(&pParse->rc, p->pTerm, p->nFullTerm); + pTo->nQueryTerm = p->nQueryTerm; + pTo->nFullTerm = p->nFullTerm; pRet->apChild[ii] = sqlite3Fts5ParseNode(pParse, FTS5_STRING, 0, 0, sqlite3Fts5ParseNearset(pParse, 0, pPhrase) ); @@ -235047,16 +237195,17 @@ static char *fts5ExprTermPrint(Fts5ExprTerm *pTerm){ /* Determine the maximum amount of space required. */ for(p=pTerm; p; p=p->pSynonym){ - nByte += (int)strlen(pTerm->zTerm) * 2 + 3 + 2; + nByte += pTerm->nQueryTerm * 2 + 3 + 2; } zQuoted = sqlite3_malloc64(nByte); if( zQuoted ){ int i = 0; for(p=pTerm; p; p=p->pSynonym){ - char *zIn = p->zTerm; + char *zIn = p->pTerm; + char *zEnd = &zIn[p->nQueryTerm]; zQuoted[i++] = '"'; - while( *zIn ){ + while( zInnTerm; iTerm++){ - char *zTerm = pPhrase->aTerm[iTerm].zTerm; - zRet = fts5PrintfAppend(zRet, "%s%s", iTerm==0?"":" ", zTerm); + Fts5ExprTerm *p = &pPhrase->aTerm[iTerm]; + zRet = fts5PrintfAppend(zRet, "%s%.*s", iTerm==0?"":" ", + p->nQueryTerm, p->pTerm + ); if( pPhrase->aTerm[iTerm].bPrefix ){ zRet = fts5PrintfAppend(zRet, "*"); } @@ -235536,6 +237687,17 @@ static int fts5ExprColsetTest(Fts5Colset *pColset, int iCol){ return 0; } +/* +** pToken is a buffer nToken bytes in size that may or may not contain +** an embedded 0x00 byte. If it does, return the number of bytes in +** the buffer before the 0x00. If it does not, return nToken. +*/ +static int fts5QueryTerm(const char *pToken, int nToken){ + int ii; + for(ii=0; iipExpr; int i; + int nQuery = nToken; + i64 iRowid = pExpr->pRoot->iRowid; UNUSED_PARAM2(iUnused1, iUnused2); - if( nToken>FTS5_MAX_TOKEN_SIZE ) nToken = FTS5_MAX_TOKEN_SIZE; + if( nQuery>FTS5_MAX_TOKEN_SIZE ) nQuery = FTS5_MAX_TOKEN_SIZE; + if( pExpr->pConfig->bTokendata ){ + nQuery = fts5QueryTerm(pToken, nQuery); + } if( (tflags & FTS5_TOKEN_COLOCATED)==0 ) p->iOff++; for(i=0; inPhrase; i++){ - Fts5ExprTerm *pTerm; + Fts5ExprTerm *pT; if( p->aPopulator[i].bOk==0 ) continue; - for(pTerm=&pExpr->apExprPhrase[i]->aTerm[0]; pTerm; pTerm=pTerm->pSynonym){ - int nTerm = (int)strlen(pTerm->zTerm); - if( (nTerm==nToken || (nTermbPrefix)) - && memcmp(pTerm->zTerm, pToken, nTerm)==0 + for(pT=&pExpr->apExprPhrase[i]->aTerm[0]; pT; pT=pT->pSynonym){ + if( (pT->nQueryTerm==nQuery || (pT->nQueryTermbPrefix)) + && memcmp(pT->pTerm, pToken, pT->nQueryTerm)==0 ){ int rc = sqlite3Fts5PoslistWriterAppend( &pExpr->apExprPhrase[i]->poslist, &p->aPopulator[i].writer, p->iOff ); + if( rc==SQLITE_OK && pExpr->pConfig->bTokendata && !pT->bPrefix ){ + int iCol = p->iOff>>32; + int iTokOff = p->iOff & 0x7FFFFFFF; + rc = sqlite3Fts5IndexIterWriteTokendata( + pT->pIter, pToken, nToken, iRowid, iCol, iTokOff + ); + } if( rc ) return rc; break; } @@ -235698,6 +237871,83 @@ static int sqlite3Fts5ExprPhraseCollist( return rc; } +/* +** Does the work of the fts5_api.xQueryToken() API method. +*/ +static int sqlite3Fts5ExprQueryToken( + Fts5Expr *pExpr, + int iPhrase, + int iToken, + const char **ppOut, + int *pnOut +){ + Fts5ExprPhrase *pPhrase = 0; + + if( iPhrase<0 || iPhrase>=pExpr->nPhrase ){ + return SQLITE_RANGE; + } + pPhrase = pExpr->apExprPhrase[iPhrase]; + if( iToken<0 || iToken>=pPhrase->nTerm ){ + return SQLITE_RANGE; + } + + *ppOut = pPhrase->aTerm[iToken].pTerm; + *pnOut = pPhrase->aTerm[iToken].nFullTerm; + return SQLITE_OK; +} + +/* +** Does the work of the fts5_api.xInstToken() API method. +*/ +static int sqlite3Fts5ExprInstToken( + Fts5Expr *pExpr, + i64 iRowid, + int iPhrase, + int iCol, + int iOff, + int iToken, + const char **ppOut, + int *pnOut +){ + Fts5ExprPhrase *pPhrase = 0; + Fts5ExprTerm *pTerm = 0; + int rc = SQLITE_OK; + + if( iPhrase<0 || iPhrase>=pExpr->nPhrase ){ + return SQLITE_RANGE; + } + pPhrase = pExpr->apExprPhrase[iPhrase]; + if( iToken<0 || iToken>=pPhrase->nTerm ){ + return SQLITE_RANGE; + } + pTerm = &pPhrase->aTerm[iToken]; + if( pTerm->bPrefix==0 ){ + if( pExpr->pConfig->bTokendata ){ + rc = sqlite3Fts5IterToken( + pTerm->pIter, iRowid, iCol, iOff+iToken, ppOut, pnOut + ); + }else{ + *ppOut = pTerm->pTerm; + *pnOut = pTerm->nFullTerm; + } + } + return rc; +} + +/* +** Clear the token mappings for all Fts5IndexIter objects mannaged by +** the expression passed as the only argument. +*/ +static void sqlite3Fts5ExprClearTokens(Fts5Expr *pExpr){ + int ii; + for(ii=0; iinPhrase; ii++){ + Fts5ExprTerm *pT; + for(pT=&pExpr->apExprPhrase[ii]->aTerm[0]; pT; pT=pT->pSynonym){ + sqlite3Fts5IndexIterClearTokendata(pT->pIter); + } + } +} + /* ** 2014 August 11 ** @@ -235736,10 +237986,15 @@ struct Fts5Hash { /* ** Each entry in the hash table is represented by an object of the -** following type. Each object, its key (a nul-terminated string) and -** its current data are stored in a single memory allocation. The -** key immediately follows the object in memory. The position list -** data immediately follows the key data in memory. +** following type. Each object, its key, and its current data are stored +** in a single memory allocation. The key immediately follows the object +** in memory. The position list data immediately follows the key data +** in memory. +** +** The key is Fts5HashEntry.nKey bytes in size. It consists of a single +** byte identifying the index (either the main term index or a prefix-index), +** followed by the term data. For example: "0token". There is no +** nul-terminator - in this case nKey=6. ** ** The data that follows the key is in a similar, but not identical format ** to the doclist data stored in the database. It is: @@ -235874,8 +238129,7 @@ static int fts5HashResize(Fts5Hash *pHash){ unsigned int iHash; Fts5HashEntry *p = apOld[i]; apOld[i] = p->pHashNext; - iHash = fts5HashKey(nNew, (u8*)fts5EntryKey(p), - (int)strlen(fts5EntryKey(p))); + iHash = fts5HashKey(nNew, (u8*)fts5EntryKey(p), p->nKey); p->pHashNext = apNew[iHash]; apNew[iHash] = p; } @@ -235959,7 +238213,7 @@ static int sqlite3Fts5HashWrite( for(p=pHash->aSlot[iHash]; p; p=p->pHashNext){ char *zKey = fts5EntryKey(p); if( zKey[0]==bByte - && p->nKey==nToken + && p->nKey==nToken+1 && memcmp(&zKey[1], pToken, nToken)==0 ){ break; @@ -235989,9 +238243,9 @@ static int sqlite3Fts5HashWrite( zKey[0] = bByte; memcpy(&zKey[1], pToken, nToken); assert( iHash==fts5HashKey(pHash->nSlot, (u8*)zKey, nToken+1) ); - p->nKey = nToken; + p->nKey = nToken+1; zKey[nToken+1] = '\0'; - p->nData = nToken+1 + 1 + sizeof(Fts5HashEntry); + p->nData = nToken+1 + sizeof(Fts5HashEntry); p->pHashNext = pHash->aSlot[iHash]; pHash->aSlot[iHash] = p; pHash->nEntry++; @@ -236108,12 +238362,17 @@ static Fts5HashEntry *fts5HashEntryMerge( *ppOut = p1; p1 = 0; }else{ - int i = 0; char *zKey1 = fts5EntryKey(p1); char *zKey2 = fts5EntryKey(p2); - while( zKey1[i]==zKey2[i] ) i++; + int nMin = MIN(p1->nKey, p2->nKey); - if( ((u8)zKey1[i])>((u8)zKey2[i]) ){ + int cmp = memcmp(zKey1, zKey2, nMin); + if( cmp==0 ){ + cmp = p1->nKey - p2->nKey; + } + assert( cmp!=0 ); + + if( cmp>0 ){ /* p2 is smaller */ *ppOut = p2; ppOut = &p2->pScanNext; @@ -236132,10 +238391,8 @@ static Fts5HashEntry *fts5HashEntryMerge( } /* -** Extract all tokens from hash table iHash and link them into a list -** in sorted order. The hash table is cleared before returning. It is -** the responsibility of the caller to free the elements of the returned -** list. +** Link all tokens from hash table iHash into a list in sorted order. The +** tokens are not removed from the hash table. */ static int fts5HashEntrySort( Fts5Hash *pHash, @@ -236157,7 +238414,7 @@ static int fts5HashEntrySort( Fts5HashEntry *pIter; for(pIter=pHash->aSlot[iSlot]; pIter; pIter=pIter->pHashNext){ if( pTerm==0 - || (pIter->nKey+1>=nTerm && 0==memcmp(fts5EntryKey(pIter), pTerm, nTerm)) + || (pIter->nKey>=nTerm && 0==memcmp(fts5EntryKey(pIter), pTerm, nTerm)) ){ Fts5HashEntry *pEntry = pIter; pEntry->pScanNext = 0; @@ -236196,12 +238453,11 @@ static int sqlite3Fts5HashQuery( for(p=pHash->aSlot[iHash]; p; p=p->pHashNext){ zKey = fts5EntryKey(p); - assert( p->nKey+1==(int)strlen(zKey) ); - if( nTerm==p->nKey+1 && memcmp(zKey, pTerm, nTerm)==0 ) break; + if( nTerm==p->nKey && memcmp(zKey, pTerm, nTerm)==0 ) break; } if( p ){ - int nHashPre = sizeof(Fts5HashEntry) + nTerm + 1; + int nHashPre = sizeof(Fts5HashEntry) + nTerm; int nList = p->nData - nHashPre; u8 *pRet = (u8*)(*ppOut = sqlite3_malloc64(nPre + nList + 10)); if( pRet ){ @@ -236262,19 +238518,22 @@ static int sqlite3Fts5HashScanEof(Fts5Hash *p){ static void sqlite3Fts5HashScanEntry( Fts5Hash *pHash, const char **pzTerm, /* OUT: term (nul-terminated) */ + int *pnTerm, /* OUT: Size of term in bytes */ const u8 **ppDoclist, /* OUT: pointer to doclist */ int *pnDoclist /* OUT: size of doclist in bytes */ ){ Fts5HashEntry *p; if( (p = pHash->pScan) ){ char *zKey = fts5EntryKey(p); - int nTerm = (int)strlen(zKey); + int nTerm = p->nKey; fts5HashAddPoslistSize(pHash, p, 0); *pzTerm = zKey; - *ppDoclist = (const u8*)&zKey[nTerm+1]; - *pnDoclist = p->nData - (sizeof(Fts5HashEntry) + nTerm + 1); + *pnTerm = nTerm; + *ppDoclist = (const u8*)&zKey[nTerm]; + *pnDoclist = p->nData - (sizeof(Fts5HashEntry) + nTerm); }else{ *pzTerm = 0; + *pnTerm = 0; *ppDoclist = 0; *pnDoclist = 0; } @@ -236605,6 +238864,9 @@ typedef struct Fts5SegWriter Fts5SegWriter; typedef struct Fts5Structure Fts5Structure; typedef struct Fts5StructureLevel Fts5StructureLevel; typedef struct Fts5StructureSegment Fts5StructureSegment; +typedef struct Fts5TokenDataIter Fts5TokenDataIter; +typedef struct Fts5TokenDataMap Fts5TokenDataMap; +typedef struct Fts5TombstoneArray Fts5TombstoneArray; struct Fts5Data { u8 *p; /* Pointer to buffer containing record */ @@ -236639,6 +238901,7 @@ struct Fts5Index { /* Error state. */ int rc; /* Current error code */ + int flushRc; /* State used by the fts5DataXXX() functions. */ sqlite3_blob *pReader; /* RO incr-blob open on %_data table */ @@ -236647,6 +238910,7 @@ struct Fts5Index { sqlite3_stmt *pIdxWriter; /* "INSERT ... %_idx VALUES(?,?,?,?)" */ sqlite3_stmt *pIdxDeleter; /* "DELETE FROM %_idx WHERE segid=?" */ sqlite3_stmt *pIdxSelect; + sqlite3_stmt *pIdxNextSelect; int nRead; /* Total number of blocks read */ sqlite3_stmt *pDeleteFromIdx; @@ -236800,8 +239064,7 @@ struct Fts5SegIter { Fts5Data *pLeaf; /* Current leaf data */ Fts5Data *pNextLeaf; /* Leaf page (iLeafPgno+1) */ i64 iLeafOffset; /* Byte offset within current leaf */ - Fts5Data **apTombstone; /* Array of tombstone pages */ - int nTombstone; + Fts5TombstoneArray *pTombArray; /* Array of tombstone pages */ /* Next method */ void (*xNext)(Fts5Index*, Fts5SegIter*, int*); @@ -236828,6 +239091,15 @@ struct Fts5SegIter { u8 bDel; /* True if the delete flag is set */ }; +/* +** Array of tombstone pages. Reference counted. +*/ +struct Fts5TombstoneArray { + int nRef; /* Number of pointers to this object */ + int nTombstone; + Fts5Data *apTombstone[1]; /* Array of tombstone pages */ +}; + /* ** Argument is a pointer to an Fts5Data structure that contains a ** leaf page. @@ -236872,9 +239144,16 @@ struct Fts5SegIter { ** poslist: ** Used by sqlite3Fts5IterPoslist() when the poslist needs to be buffered. ** There is no way to tell if this is populated or not. +** +** pColset: +** If not NULL, points to an object containing a set of column indices. +** Only matches that occur in one of these columns will be returned. +** The Fts5Iter does not own the Fts5Colset object, and so it is not +** freed when the iterator is closed - it is owned by the upper layer. */ struct Fts5Iter { Fts5IndexIter base; /* Base class containing output vars */ + Fts5TokenDataIter *pTokenDataIter; Fts5Index *pIndex; /* Index that owns this iterator */ Fts5Buffer poslist; /* Buffer containing current poslist */ @@ -236892,7 +239171,6 @@ struct Fts5Iter { Fts5SegIter aSeg[1]; /* Array of segment iterators */ }; - /* ** An instance of the following type is used to iterate through the contents ** of a doclist-index record. @@ -237810,9 +240088,9 @@ static int fts5DlidxLvlNext(Fts5DlidxLvl *pLvl){ } if( iOffnn ){ - i64 iVal; + u64 iVal; pLvl->iLeafPgno += (iOff - pLvl->iOff) + 1; - iOff += fts5GetVarint(&pData->p[iOff], (u64*)&iVal); + iOff += fts5GetVarint(&pData->p[iOff], &iVal); pLvl->iRowid += iVal; pLvl->iOff = iOff; }else{ @@ -238191,18 +240469,20 @@ static void fts5SegIterSetNext(Fts5Index *p, Fts5SegIter *pIter){ } /* -** Allocate a tombstone hash page array (pIter->apTombstone) for the -** iterator passed as the second argument. If an OOM error occurs, leave -** an error in the Fts5Index object. +** Allocate a tombstone hash page array object (pIter->pTombArray) for +** the iterator passed as the second argument. If an OOM error occurs, +** leave an error in the Fts5Index object. */ static void fts5SegIterAllocTombstone(Fts5Index *p, Fts5SegIter *pIter){ const int nTomb = pIter->pSeg->nPgTombstone; if( nTomb>0 ){ - Fts5Data **apTomb = 0; - apTomb = (Fts5Data**)sqlite3Fts5MallocZero(&p->rc, sizeof(Fts5Data)*nTomb); - if( apTomb ){ - pIter->apTombstone = apTomb; - pIter->nTombstone = nTomb; + int nByte = nTomb * sizeof(Fts5Data*) + sizeof(Fts5TombstoneArray); + Fts5TombstoneArray *pNew; + pNew = (Fts5TombstoneArray*)sqlite3Fts5MallocZero(&p->rc, nByte); + if( pNew ){ + pNew->nTombstone = nTomb; + pNew->nRef = 1; + pIter->pTombArray = pNew; } } } @@ -238459,15 +240739,16 @@ static void fts5SegIterNext_None( }else{ const u8 *pList = 0; const char *zTerm = 0; + int nTerm = 0; int nList; sqlite3Fts5HashScanNext(p->pHash); - sqlite3Fts5HashScanEntry(p->pHash, &zTerm, &pList, &nList); + sqlite3Fts5HashScanEntry(p->pHash, &zTerm, &nTerm, &pList, &nList); if( pList==0 ) goto next_none_eof; pIter->pLeaf->p = (u8*)pList; pIter->pLeaf->nn = nList; pIter->pLeaf->szLeaf = nList; pIter->iEndofDoclist = nList; - sqlite3Fts5BufferSet(&p->rc,&pIter->term, (int)strlen(zTerm), (u8*)zTerm); + sqlite3Fts5BufferSet(&p->rc,&pIter->term, nTerm, (u8*)zTerm); pIter->iLeafOffset = fts5GetVarint(pList, (u64*)&pIter->iRowid); } @@ -238533,11 +240814,12 @@ static void fts5SegIterNext( }else if( pIter->pSeg==0 ){ const u8 *pList = 0; const char *zTerm = 0; + int nTerm = 0; int nList = 0; assert( (pIter->flags & FTS5_SEGITER_ONETERM) || pbNewTerm ); if( 0==(pIter->flags & FTS5_SEGITER_ONETERM) ){ sqlite3Fts5HashScanNext(p->pHash); - sqlite3Fts5HashScanEntry(p->pHash, &zTerm, &pList, &nList); + sqlite3Fts5HashScanEntry(p->pHash, &zTerm, &nTerm, &pList, &nList); } if( pList==0 ){ fts5DataRelease(pIter->pLeaf); @@ -238547,8 +240829,7 @@ static void fts5SegIterNext( pIter->pLeaf->nn = nList; pIter->pLeaf->szLeaf = nList; pIter->iEndofDoclist = nList+1; - sqlite3Fts5BufferSet(&p->rc, &pIter->term, (int)strlen(zTerm), - (u8*)zTerm); + sqlite3Fts5BufferSet(&p->rc, &pIter->term, nTerm, (u8*)zTerm); pIter->iLeafOffset = fts5GetVarint(pList, (u64*)&pIter->iRowid); *pbNewTerm = 1; } @@ -238934,7 +241215,7 @@ static void fts5SegIterSeekInit( fts5LeafSeek(p, bGe, pIter, pTerm, nTerm); } - if( p->rc==SQLITE_OK && bGe==0 ){ + if( p->rc==SQLITE_OK && (bGe==0 || (flags & FTS5INDEX_QUERY_SCANONETERM)) ){ pIter->flags |= FTS5_SEGITER_ONETERM; if( pIter->pLeaf ){ if( flags & FTS5INDEX_QUERY_DESC ){ @@ -238950,7 +241231,9 @@ static void fts5SegIterSeekInit( } fts5SegIterSetNext(p, pIter); - fts5SegIterAllocTombstone(p, pIter); + if( 0==(flags & FTS5INDEX_QUERY_SCANONETERM) ){ + fts5SegIterAllocTombstone(p, pIter); + } /* Either: ** @@ -238967,6 +241250,79 @@ static void fts5SegIterSeekInit( ); } + +/* +** SQL used by fts5SegIterNextInit() to find the page to open. +*/ +static sqlite3_stmt *fts5IdxNextStmt(Fts5Index *p){ + if( p->pIdxNextSelect==0 ){ + Fts5Config *pConfig = p->pConfig; + fts5IndexPrepareStmt(p, &p->pIdxNextSelect, sqlite3_mprintf( + "SELECT pgno FROM '%q'.'%q_idx' WHERE " + "segid=? AND term>? ORDER BY term ASC LIMIT 1", + pConfig->zDb, pConfig->zName + )); + + } + return p->pIdxNextSelect; +} + +/* +** This is similar to fts5SegIterSeekInit(), except that it initializes +** the segment iterator to point to the first term following the page +** with pToken/nToken on it. +*/ +static void fts5SegIterNextInit( + Fts5Index *p, + const char *pTerm, int nTerm, + Fts5StructureSegment *pSeg, /* Description of segment */ + Fts5SegIter *pIter /* Object to populate */ +){ + int iPg = -1; /* Page of segment to open */ + int bDlidx = 0; + sqlite3_stmt *pSel = 0; /* SELECT to find iPg */ + + pSel = fts5IdxNextStmt(p); + if( pSel ){ + assert( p->rc==SQLITE_OK ); + sqlite3_bind_int(pSel, 1, pSeg->iSegid); + sqlite3_bind_blob(pSel, 2, pTerm, nTerm, SQLITE_STATIC); + + if( sqlite3_step(pSel)==SQLITE_ROW ){ + i64 val = sqlite3_column_int64(pSel, 0); + iPg = (int)(val>>1); + bDlidx = (val & 0x0001); + } + p->rc = sqlite3_reset(pSel); + sqlite3_bind_null(pSel, 2); + if( p->rc ) return; + } + + memset(pIter, 0, sizeof(*pIter)); + pIter->pSeg = pSeg; + pIter->flags |= FTS5_SEGITER_ONETERM; + if( iPg>=0 ){ + pIter->iLeafPgno = iPg - 1; + fts5SegIterNextPage(p, pIter); + fts5SegIterSetNext(p, pIter); + } + if( pIter->pLeaf ){ + const u8 *a = pIter->pLeaf->p; + int iTermOff = 0; + + pIter->iPgidxOff = pIter->pLeaf->szLeaf; + pIter->iPgidxOff += fts5GetVarint32(&a[pIter->iPgidxOff], iTermOff); + pIter->iLeafOffset = iTermOff; + fts5SegIterLoadTerm(p, pIter, 0); + fts5SegIterLoadNPos(p, pIter); + if( bDlidx ) fts5SegIterLoadDlidx(p, pIter); + + assert( p->rc!=SQLITE_OK || + fts5BufferCompareBlob(&pIter->term, (const u8*)pTerm, nTerm)>0 + ); + } +} + /* ** Initialize the object pIter to point to term pTerm/nTerm within the ** in-memory hash table. If there is no such term in the hash-table, the @@ -238993,14 +241349,21 @@ static void fts5SegIterHashInit( const u8 *pList = 0; p->rc = sqlite3Fts5HashScanInit(p->pHash, (const char*)pTerm, nTerm); - sqlite3Fts5HashScanEntry(p->pHash, (const char**)&z, &pList, &nList); - n = (z ? (int)strlen((const char*)z) : 0); + sqlite3Fts5HashScanEntry(p->pHash, (const char**)&z, &n, &pList, &nList); if( pList ){ pLeaf = fts5IdxMalloc(p, sizeof(Fts5Data)); if( pLeaf ){ pLeaf->p = (u8*)pList; } } + + /* The call to sqlite3Fts5HashScanInit() causes the hash table to + ** fill the size field of all existing position lists. This means they + ** can no longer be appended to. Since the only scenario in which they + ** can be appended to is if the previous operation on this table was + ** a DELETE, by clearing the Fts5Index.bDelete flag we can avoid this + ** possibility altogether. */ + p->bDelete = 0; }else{ p->rc = sqlite3Fts5HashQuery(p->pHash, sizeof(Fts5Data), (const char*)pTerm, nTerm, (void**)&pLeaf, &nList @@ -239045,6 +241408,23 @@ static void fts5IndexFreeArray(Fts5Data **ap, int n){ } } +/* +** Decrement the ref-count of the object passed as the only argument. If it +** reaches 0, free it and its contents. +*/ +static void fts5TombstoneArrayDelete(Fts5TombstoneArray *p){ + if( p ){ + p->nRef--; + if( p->nRef<=0 ){ + int ii; + for(ii=0; iinTombstone; ii++){ + fts5DataRelease(p->apTombstone[ii]); + } + sqlite3_free(p); + } + } +} + /* ** Zero the iterator passed as the only argument. */ @@ -239052,7 +241432,7 @@ static void fts5SegIterClear(Fts5SegIter *pIter){ fts5BufferFree(&pIter->term); fts5DataRelease(pIter->pLeaf); fts5DataRelease(pIter->pNextLeaf); - fts5IndexFreeArray(pIter->apTombstone, pIter->nTombstone); + fts5TombstoneArrayDelete(pIter->pTombArray); fts5DlidxIterFree(pIter->pDlidx); sqlite3_free(pIter->aRowidOffset); memset(pIter, 0, sizeof(Fts5SegIter)); @@ -239297,7 +241677,6 @@ static void fts5SegIterNextFrom( }while( p->rc==SQLITE_OK ); } - /* ** Free the iterator object passed as the second argument. */ @@ -239442,24 +241821,25 @@ static int fts5IndexTombstoneQuery( static int fts5MultiIterIsDeleted(Fts5Iter *pIter){ int iFirst = pIter->aFirst[1].iFirst; Fts5SegIter *pSeg = &pIter->aSeg[iFirst]; + Fts5TombstoneArray *pArray = pSeg->pTombArray; - if( pSeg->pLeaf && pSeg->nTombstone ){ + if( pSeg->pLeaf && pArray ){ /* Figure out which page the rowid might be present on. */ - int iPg = ((u64)pSeg->iRowid) % pSeg->nTombstone; + int iPg = ((u64)pSeg->iRowid) % pArray->nTombstone; assert( iPg>=0 ); /* If tombstone hash page iPg has not yet been loaded from the ** database, load it now. */ - if( pSeg->apTombstone[iPg]==0 ){ - pSeg->apTombstone[iPg] = fts5DataRead(pIter->pIndex, + if( pArray->apTombstone[iPg]==0 ){ + pArray->apTombstone[iPg] = fts5DataRead(pIter->pIndex, FTS5_TOMBSTONE_ROWID(pSeg->pSeg->iSegid, iPg) ); - if( pSeg->apTombstone[iPg]==0 ) return 0; + if( pArray->apTombstone[iPg]==0 ) return 0; } return fts5IndexTombstoneQuery( - pSeg->apTombstone[iPg], - pSeg->nTombstone, + pArray->apTombstone[iPg], + pArray->nTombstone, pSeg->iRowid ); } @@ -239998,6 +242378,32 @@ static void fts5IterSetOutputCb(int *pRc, Fts5Iter *pIter){ } } +/* +** All the component segment-iterators of pIter have been set up. This +** functions finishes setup for iterator pIter itself. +*/ +static void fts5MultiIterFinishSetup(Fts5Index *p, Fts5Iter *pIter){ + int iIter; + for(iIter=pIter->nSeg-1; iIter>0; iIter--){ + int iEq; + if( (iEq = fts5MultiIterDoCompare(pIter, iIter)) ){ + Fts5SegIter *pSeg = &pIter->aSeg[iEq]; + if( p->rc==SQLITE_OK ) pSeg->xNext(p, pSeg, 0); + fts5MultiIterAdvanced(p, pIter, iEq, iIter); + } + } + fts5MultiIterSetEof(pIter); + fts5AssertMultiIterSetup(p, pIter); + + if( (pIter->bSkipEmpty && fts5MultiIterIsEmpty(p, pIter)) + || fts5MultiIterIsDeleted(pIter) + ){ + fts5MultiIterNext(p, pIter, 0, 0); + }else if( pIter->base.bEof==0 ){ + Fts5SegIter *pSeg = &pIter->aSeg[pIter->aFirst[1].iFirst]; + pIter->xSetOutputs(pIter, pSeg); + } +} /* ** Allocate a new Fts5Iter object. @@ -240079,31 +242485,12 @@ static void fts5MultiIterNew( assert( iIter==nSeg ); } - /* If the above was successful, each component iterators now points + /* If the above was successful, each component iterator now points ** to the first entry in its segment. In this case initialize the ** aFirst[] array. Or, if an error has occurred, free the iterator ** object and set the output variable to NULL. */ if( p->rc==SQLITE_OK ){ - for(iIter=pNew->nSeg-1; iIter>0; iIter--){ - int iEq; - if( (iEq = fts5MultiIterDoCompare(pNew, iIter)) ){ - Fts5SegIter *pSeg = &pNew->aSeg[iEq]; - if( p->rc==SQLITE_OK ) pSeg->xNext(p, pSeg, 0); - fts5MultiIterAdvanced(p, pNew, iEq, iIter); - } - } - fts5MultiIterSetEof(pNew); - fts5AssertMultiIterSetup(p, pNew); - - if( (pNew->bSkipEmpty && fts5MultiIterIsEmpty(p, pNew)) - || fts5MultiIterIsDeleted(pNew) - ){ - fts5MultiIterNext(p, pNew, 0, 0); - }else if( pNew->base.bEof==0 ){ - Fts5SegIter *pSeg = &pNew->aSeg[pNew->aFirst[1].iFirst]; - pNew->xSetOutputs(pNew, pSeg); - } - + fts5MultiIterFinishSetup(p, pNew); }else{ fts5MultiIterFree(pNew); *ppOut = 0; @@ -240128,7 +242515,6 @@ static void fts5MultiIterNew2( pNew = fts5MultiIterAlloc(p, 2); if( pNew ){ Fts5SegIter *pIter = &pNew->aSeg[1]; - pIter->flags = FTS5_SEGITER_ONETERM; if( pData->szLeaf>0 ){ pIter->pLeaf = pData; @@ -240276,6 +242662,7 @@ static void fts5IndexDiscardData(Fts5Index *p){ sqlite3Fts5HashClear(p->pHash); p->nPendingData = 0; p->nPendingRow = 0; + p->flushRc = SQLITE_OK; } p->nContentlessDelete = 0; } @@ -240491,7 +242878,7 @@ static void fts5WriteDlidxAppend( } if( pDlidx->bPrevValid ){ - iVal = iRowid - pDlidx->iPrev; + iVal = (u64)iRowid - (u64)pDlidx->iPrev; }else{ i64 iPgno = (i==0 ? pWriter->writer.pgno : pDlidx[-1].pgno); assert( pDlidx->buf.n==0 ); @@ -240678,7 +243065,7 @@ static void fts5WriteAppendPoslistData( const u8 *a = aData; int n = nData; - assert( p->pConfig->pgsz>0 ); + assert( p->pConfig->pgsz>0 || p->rc!=SQLITE_OK ); while( p->rc==SQLITE_OK && (pPage->buf.n + pPage->pgidx.n + n)>=p->pConfig->pgsz ){ @@ -241411,18 +243798,24 @@ static void fts5DoSecureDelete( iOff = iStart; - /* Set variable bLastInDoclist to true if this entry happens to be - ** the last rowid in the doclist for its term. */ + /* If the position-list for the entry being removed flows over past + ** the end of this page, delete the portion of the position-list on the + ** next page and beyond. + ** + ** Set variable bLastInDoclist to true if this entry happens + ** to be the last rowid in the doclist for its term. */ + if( iNextOff>=iPgIdx ){ + int pgno = pSeg->iLeafPgno+1; + fts5SecureDeleteOverflow(p, pSeg->pSeg, pgno, &bLastInDoclist); + iNextOff = iPgIdx; + } + if( pSeg->bDel==0 ){ - if( iNextOff>=iPgIdx ){ - int pgno = pSeg->iLeafPgno+1; - fts5SecureDeleteOverflow(p, pSeg->pSeg, pgno, &bLastInDoclist); - iNextOff = iPgIdx; - }else{ + if( iNextOff!=iPgIdx ){ /* Loop through the page-footer. If iNextOff (offset of the ** entry following the one we are removing) is equal to the ** offset of a key on this page, then the entry is the last - ** in its doclist. */ + ** in its doclist. */ int iKeyOff = 0; for(iIdx=0; iIdxrc!=SQLITE_OK ) break; @@ -241716,7 +244108,7 @@ static void fts5FlushOneHash(Fts5Index *p){ if( bSecureDelete ){ if( eDetail==FTS5_DETAIL_NONE ){ if( iOffrc!=SQLITE_OK || pDoclist[iOff]==0x01 ){ iOff++; continue; @@ -241852,6 +244244,10 @@ static void fts5FlushOneHash(Fts5Index *p){ */ static void fts5IndexFlush(Fts5Index *p){ /* Unless it is empty, flush the hash table to disk */ + if( p->flushRc ){ + p->rc = p->flushRc; + return; + } if( p->nPendingData || p->nContentlessDelete ){ assert( p->pHash ); fts5FlushOneHash(p); @@ -241860,6 +244256,8 @@ static void fts5IndexFlush(Fts5Index *p){ p->nPendingData = 0; p->nPendingRow = 0; p->nContentlessDelete = 0; + }else if( p->nPendingData || p->nContentlessDelete ){ + p->flushRc = p->rc; } } } @@ -241938,8 +244336,9 @@ static int sqlite3Fts5IndexOptimize(Fts5Index *p){ assert( p->rc==SQLITE_OK ); fts5IndexFlush(p); - assert( p->nContentlessDelete==0 ); + assert( p->rc!=SQLITE_OK || p->nContentlessDelete==0 ); pStruct = fts5StructureRead(p); + assert( p->rc!=SQLITE_OK || pStruct!=0 ); fts5StructureInvalidate(p); if( pStruct ){ @@ -242345,7 +244744,7 @@ static void fts5SetupPrefixIter( u8 *pToken, /* Buffer containing prefix to match */ int nToken, /* Size of buffer pToken in bytes */ Fts5Colset *pColset, /* Restrict matches to these columns */ - Fts5Iter **ppIter /* OUT: New iterator */ + Fts5Iter **ppIter /* OUT: New iterator */ ){ Fts5Structure *pStruct; Fts5Buffer *aBuf; @@ -242366,8 +244765,9 @@ static void fts5SetupPrefixIter( aBuf = (Fts5Buffer*)fts5IdxMalloc(p, sizeof(Fts5Buffer)*nBuf); pStruct = fts5StructureRead(p); + assert( p->rc!=SQLITE_OK || (aBuf && pStruct) ); - if( aBuf && pStruct ){ + if( p->rc==SQLITE_OK ){ const int flags = FTS5INDEX_QUERY_SCAN | FTS5INDEX_QUERY_SKIPEMPTY | FTS5INDEX_QUERY_NOOUTPUT; @@ -242379,6 +244779,12 @@ static void fts5SetupPrefixIter( int bNewTerm = 1; memset(&doclist, 0, sizeof(doclist)); + + /* If iIdx is non-zero, then it is the number of a prefix-index for + ** prefixes 1 character longer than the prefix being queried for. That + ** index contains all the doclists required, except for the one + ** corresponding to the prefix itself. That one is extracted from the + ** main term index here. */ if( iIdx!=0 ){ int dummy = 0; const int f2 = FTS5INDEX_QUERY_SKIPEMPTY|FTS5INDEX_QUERY_NOOUTPUT; @@ -242402,6 +244808,7 @@ static void fts5SetupPrefixIter( pToken[0] = FTS5_MAIN_PREFIX + iIdx; fts5MultiIterNew(p, pStruct, flags, pColset, pToken, nToken, -1, 0, &p1); fts5IterSetOutputCb(&p->rc, p1); + for( /* no-op */ ; fts5MultiIterEof(p, p1)==0; fts5MultiIterNext2(p, p1, &bNewTerm) @@ -242417,7 +244824,6 @@ static void fts5SetupPrefixIter( } if( p1->base.nData==0 ) continue; - if( p1->base.iRowid<=iLastRowid && doclist.n>0 ){ for(i=0; p->rc==SQLITE_OK && doclist.n; i++){ int i1 = i*nMerge; @@ -242456,7 +244862,7 @@ static void fts5SetupPrefixIter( } fts5MultiIterFree(p1); - pData = fts5IdxMalloc(p, sizeof(Fts5Data)+doclist.n+FTS5_DATA_ZERO_PADDING); + pData = fts5IdxMalloc(p, sizeof(*pData)+doclist.n+FTS5_DATA_ZERO_PADDING); if( pData ){ pData->p = (u8*)&pData[1]; pData->nn = pData->szLeaf = doclist.n; @@ -242599,6 +245005,7 @@ static int sqlite3Fts5IndexClose(Fts5Index *p){ sqlite3_finalize(p->pIdxWriter); sqlite3_finalize(p->pIdxDeleter); sqlite3_finalize(p->pIdxSelect); + sqlite3_finalize(p->pIdxNextSelect); sqlite3_finalize(p->pDataVersion); sqlite3_finalize(p->pDeleteFromIdx); sqlite3Fts5HashFree(p->pHash); @@ -242694,6 +245101,454 @@ static int sqlite3Fts5IndexWrite( return rc; } +/* +** pToken points to a buffer of size nToken bytes containing a search +** term, including the index number at the start, used on a tokendata=1 +** table. This function returns true if the term in buffer pBuf matches +** token pToken/nToken. +*/ +static int fts5IsTokendataPrefix( + Fts5Buffer *pBuf, + const u8 *pToken, + int nToken +){ + return ( + pBuf->n>=nToken + && 0==memcmp(pBuf->p, pToken, nToken) + && (pBuf->n==nToken || pBuf->p[nToken]==0x00) + ); +} + +/* +** Ensure the segment-iterator passed as the only argument points to EOF. +*/ +static void fts5SegIterSetEOF(Fts5SegIter *pSeg){ + fts5DataRelease(pSeg->pLeaf); + pSeg->pLeaf = 0; +} + +/* +** Usually, a tokendata=1 iterator (struct Fts5TokenDataIter) accumulates an +** array of these for each row it visits. Or, for an iterator used by an +** "ORDER BY rank" query, it accumulates an array of these for the entire +** query. +** +** Each instance in the array indicates the iterator (and therefore term) +** associated with position iPos of rowid iRowid. This is used by the +** xInstToken() API. +*/ +struct Fts5TokenDataMap { + i64 iRowid; /* Row this token is located in */ + i64 iPos; /* Position of token */ + int iIter; /* Iterator token was read from */ +}; + +/* +** An object used to supplement Fts5Iter for tokendata=1 iterators. +*/ +struct Fts5TokenDataIter { + int nIter; + int nIterAlloc; + + int nMap; + int nMapAlloc; + Fts5TokenDataMap *aMap; + + Fts5PoslistReader *aPoslistReader; + int *aPoslistToIter; + Fts5Iter *apIter[1]; +}; + +/* +** This function appends iterator pAppend to Fts5TokenDataIter pIn and +** returns the result. +*/ +static Fts5TokenDataIter *fts5AppendTokendataIter( + Fts5Index *p, /* Index object (for error code) */ + Fts5TokenDataIter *pIn, /* Current Fts5TokenDataIter struct */ + Fts5Iter *pAppend /* Append this iterator */ +){ + Fts5TokenDataIter *pRet = pIn; + + if( p->rc==SQLITE_OK ){ + if( pIn==0 || pIn->nIter==pIn->nIterAlloc ){ + int nAlloc = pIn ? pIn->nIterAlloc*2 : 16; + int nByte = nAlloc * sizeof(Fts5Iter*) + sizeof(Fts5TokenDataIter); + Fts5TokenDataIter *pNew = (Fts5TokenDataIter*)sqlite3_realloc(pIn, nByte); + + if( pNew==0 ){ + p->rc = SQLITE_NOMEM; + }else{ + if( pIn==0 ) memset(pNew, 0, nByte); + pRet = pNew; + pNew->nIterAlloc = nAlloc; + } + } + } + if( p->rc ){ + sqlite3Fts5IterClose((Fts5IndexIter*)pAppend); + }else{ + pRet->apIter[pRet->nIter++] = pAppend; + } + assert( pRet==0 || pRet->nIter<=pRet->nIterAlloc ); + + return pRet; +} + +/* +** Delete an Fts5TokenDataIter structure and its contents. +*/ +static void fts5TokendataIterDelete(Fts5TokenDataIter *pSet){ + if( pSet ){ + int ii; + for(ii=0; iinIter; ii++){ + fts5MultiIterFree(pSet->apIter[ii]); + } + sqlite3_free(pSet->aPoslistReader); + sqlite3_free(pSet->aMap); + sqlite3_free(pSet); + } +} + +/* +** Append a mapping to the token-map belonging to object pT. +*/ +static void fts5TokendataIterAppendMap( + Fts5Index *p, + Fts5TokenDataIter *pT, + int iIter, + i64 iRowid, + i64 iPos +){ + if( p->rc==SQLITE_OK ){ + if( pT->nMap==pT->nMapAlloc ){ + int nNew = pT->nMapAlloc ? pT->nMapAlloc*2 : 64; + int nByte = nNew * sizeof(Fts5TokenDataMap); + Fts5TokenDataMap *aNew; + + aNew = (Fts5TokenDataMap*)sqlite3_realloc(pT->aMap, nByte); + if( aNew==0 ){ + p->rc = SQLITE_NOMEM; + return; + } + + pT->aMap = aNew; + pT->nMapAlloc = nNew; + } + + pT->aMap[pT->nMap].iRowid = iRowid; + pT->aMap[pT->nMap].iPos = iPos; + pT->aMap[pT->nMap].iIter = iIter; + pT->nMap++; + } +} + +/* +** The iterator passed as the only argument must be a tokendata=1 iterator +** (pIter->pTokenDataIter!=0). This function sets the iterator output +** variables (pIter->base.*) according to the contents of the current +** row. +*/ +static void fts5IterSetOutputsTokendata(Fts5Iter *pIter){ + int ii; + int nHit = 0; + i64 iRowid = SMALLEST_INT64; + int iMin = 0; + + Fts5TokenDataIter *pT = pIter->pTokenDataIter; + + pIter->base.nData = 0; + pIter->base.pData = 0; + + for(ii=0; iinIter; ii++){ + Fts5Iter *p = pT->apIter[ii]; + if( p->base.bEof==0 ){ + if( nHit==0 || p->base.iRowidbase.iRowid; + nHit = 1; + pIter->base.pData = p->base.pData; + pIter->base.nData = p->base.nData; + iMin = ii; + }else if( p->base.iRowid==iRowid ){ + nHit++; + } + } + } + + if( nHit==0 ){ + pIter->base.bEof = 1; + }else{ + int eDetail = pIter->pIndex->pConfig->eDetail; + pIter->base.bEof = 0; + pIter->base.iRowid = iRowid; + + if( nHit==1 && eDetail==FTS5_DETAIL_FULL ){ + fts5TokendataIterAppendMap(pIter->pIndex, pT, iMin, iRowid, -1); + }else + if( nHit>1 && eDetail!=FTS5_DETAIL_NONE ){ + int nReader = 0; + int nByte = 0; + i64 iPrev = 0; + + /* Allocate array of iterators if they are not already allocated. */ + if( pT->aPoslistReader==0 ){ + pT->aPoslistReader = (Fts5PoslistReader*)sqlite3Fts5MallocZero( + &pIter->pIndex->rc, + pT->nIter * (sizeof(Fts5PoslistReader) + sizeof(int)) + ); + if( pT->aPoslistReader==0 ) return; + pT->aPoslistToIter = (int*)&pT->aPoslistReader[pT->nIter]; + } + + /* Populate an iterator for each poslist that will be merged */ + for(ii=0; iinIter; ii++){ + Fts5Iter *p = pT->apIter[ii]; + if( iRowid==p->base.iRowid ){ + pT->aPoslistToIter[nReader] = ii; + sqlite3Fts5PoslistReaderInit( + p->base.pData, p->base.nData, &pT->aPoslistReader[nReader++] + ); + nByte += p->base.nData; + } + } + + /* Ensure the output buffer is large enough */ + if( fts5BufferGrow(&pIter->pIndex->rc, &pIter->poslist, nByte+nHit*10) ){ + return; + } + + /* Ensure the token-mapping is large enough */ + if( eDetail==FTS5_DETAIL_FULL && pT->nMapAlloc<(pT->nMap + nByte) ){ + int nNew = (pT->nMapAlloc + nByte) * 2; + Fts5TokenDataMap *aNew = (Fts5TokenDataMap*)sqlite3_realloc( + pT->aMap, nNew*sizeof(Fts5TokenDataMap) + ); + if( aNew==0 ){ + pIter->pIndex->rc = SQLITE_NOMEM; + return; + } + pT->aMap = aNew; + pT->nMapAlloc = nNew; + } + + pIter->poslist.n = 0; + + while( 1 ){ + i64 iMinPos = LARGEST_INT64; + + /* Find smallest position */ + iMin = 0; + for(ii=0; iiaPoslistReader[ii]; + if( pReader->bEof==0 ){ + if( pReader->iPosiPos; + iMin = ii; + } + } + } + + /* If all readers were at EOF, break out of the loop. */ + if( iMinPos==LARGEST_INT64 ) break; + + sqlite3Fts5PoslistSafeAppend(&pIter->poslist, &iPrev, iMinPos); + sqlite3Fts5PoslistReaderNext(&pT->aPoslistReader[iMin]); + + if( eDetail==FTS5_DETAIL_FULL ){ + pT->aMap[pT->nMap].iPos = iMinPos; + pT->aMap[pT->nMap].iIter = pT->aPoslistToIter[iMin]; + pT->aMap[pT->nMap].iRowid = iRowid; + pT->nMap++; + } + } + + pIter->base.pData = pIter->poslist.p; + pIter->base.nData = pIter->poslist.n; + } + } +} + +/* +** The iterator passed as the only argument must be a tokendata=1 iterator +** (pIter->pTokenDataIter!=0). This function advances the iterator. If +** argument bFrom is false, then the iterator is advanced to the next +** entry. Or, if bFrom is true, it is advanced to the first entry with +** a rowid of iFrom or greater. +*/ +static void fts5TokendataIterNext(Fts5Iter *pIter, int bFrom, i64 iFrom){ + int ii; + Fts5TokenDataIter *pT = pIter->pTokenDataIter; + + for(ii=0; iinIter; ii++){ + Fts5Iter *p = pT->apIter[ii]; + if( p->base.bEof==0 + && (p->base.iRowid==pIter->base.iRowid || (bFrom && p->base.iRowidpIndex, p, bFrom, iFrom); + while( bFrom && p->base.bEof==0 + && p->base.iRowidpIndex->rc==SQLITE_OK + ){ + fts5MultiIterNext(p->pIndex, p, 0, 0); + } + } + } + + fts5IterSetOutputsTokendata(pIter); +} + +/* +** If the segment-iterator passed as the first argument is at EOF, then +** set pIter->term to a copy of buffer pTerm. +*/ +static void fts5TokendataSetTermIfEof(Fts5Iter *pIter, Fts5Buffer *pTerm){ + if( pIter && pIter->aSeg[0].pLeaf==0 ){ + fts5BufferSet(&pIter->pIndex->rc, &pIter->aSeg[0].term, pTerm->n, pTerm->p); + } +} + +/* +** This function sets up an iterator to use for a non-prefix query on a +** tokendata=1 table. +*/ +static Fts5Iter *fts5SetupTokendataIter( + Fts5Index *p, /* FTS index to query */ + const u8 *pToken, /* Buffer containing query term */ + int nToken, /* Size of buffer pToken in bytes */ + Fts5Colset *pColset /* Colset to filter on */ +){ + Fts5Iter *pRet = 0; + Fts5TokenDataIter *pSet = 0; + Fts5Structure *pStruct = 0; + const int flags = FTS5INDEX_QUERY_SCANONETERM | FTS5INDEX_QUERY_SCAN; + + Fts5Buffer bSeek = {0, 0, 0}; + Fts5Buffer *pSmall = 0; + + fts5IndexFlush(p); + pStruct = fts5StructureRead(p); + + while( p->rc==SQLITE_OK ){ + Fts5Iter *pPrev = pSet ? pSet->apIter[pSet->nIter-1] : 0; + Fts5Iter *pNew = 0; + Fts5SegIter *pNewIter = 0; + Fts5SegIter *pPrevIter = 0; + + int iLvl, iSeg, ii; + + pNew = fts5MultiIterAlloc(p, pStruct->nSegment); + if( pSmall ){ + fts5BufferSet(&p->rc, &bSeek, pSmall->n, pSmall->p); + fts5BufferAppendBlob(&p->rc, &bSeek, 1, (const u8*)"\0"); + }else{ + fts5BufferSet(&p->rc, &bSeek, nToken, pToken); + } + if( p->rc ){ + sqlite3Fts5IterClose((Fts5IndexIter*)pNew); + break; + } + + pNewIter = &pNew->aSeg[0]; + pPrevIter = (pPrev ? &pPrev->aSeg[0] : 0); + for(iLvl=0; iLvlnLevel; iLvl++){ + for(iSeg=pStruct->aLevel[iLvl].nSeg-1; iSeg>=0; iSeg--){ + Fts5StructureSegment *pSeg = &pStruct->aLevel[iLvl].aSeg[iSeg]; + int bDone = 0; + + if( pPrevIter ){ + if( fts5BufferCompare(pSmall, &pPrevIter->term) ){ + memcpy(pNewIter, pPrevIter, sizeof(Fts5SegIter)); + memset(pPrevIter, 0, sizeof(Fts5SegIter)); + bDone = 1; + }else if( pPrevIter->iEndofDoclist>pPrevIter->pLeaf->szLeaf ){ + fts5SegIterNextInit(p,(const char*)bSeek.p,bSeek.n-1,pSeg,pNewIter); + bDone = 1; + } + } + + if( bDone==0 ){ + fts5SegIterSeekInit(p, bSeek.p, bSeek.n, flags, pSeg, pNewIter); + } + + if( pPrevIter ){ + if( pPrevIter->pTombArray ){ + pNewIter->pTombArray = pPrevIter->pTombArray; + pNewIter->pTombArray->nRef++; + } + }else{ + fts5SegIterAllocTombstone(p, pNewIter); + } + + pNewIter++; + if( pPrevIter ) pPrevIter++; + if( p->rc ) break; + } + } + fts5TokendataSetTermIfEof(pPrev, pSmall); + + pNew->bSkipEmpty = 1; + pNew->pColset = pColset; + fts5IterSetOutputCb(&p->rc, pNew); + + /* Loop through all segments in the new iterator. Find the smallest + ** term that any segment-iterator points to. Iterator pNew will be + ** used for this term. Also, set any iterator that points to a term that + ** does not match pToken/nToken to point to EOF */ + pSmall = 0; + for(ii=0; iinSeg; ii++){ + Fts5SegIter *pII = &pNew->aSeg[ii]; + if( 0==fts5IsTokendataPrefix(&pII->term, pToken, nToken) ){ + fts5SegIterSetEOF(pII); + } + if( pII->pLeaf && (!pSmall || fts5BufferCompare(pSmall, &pII->term)>0) ){ + pSmall = &pII->term; + } + } + + /* If pSmall is still NULL at this point, then the new iterator does + ** not point to any terms that match the query. So delete it and break + ** out of the loop - all required iterators have been collected. */ + if( pSmall==0 ){ + sqlite3Fts5IterClose((Fts5IndexIter*)pNew); + break; + } + + /* Append this iterator to the set and continue. */ + pSet = fts5AppendTokendataIter(p, pSet, pNew); + } + + if( p->rc==SQLITE_OK && pSet ){ + int ii; + for(ii=0; iinIter; ii++){ + Fts5Iter *pIter = pSet->apIter[ii]; + int iSeg; + for(iSeg=0; iSegnSeg; iSeg++){ + pIter->aSeg[iSeg].flags |= FTS5_SEGITER_ONETERM; + } + fts5MultiIterFinishSetup(p, pIter); + } + } + + if( p->rc==SQLITE_OK ){ + pRet = fts5MultiIterAlloc(p, 0); + } + if( pRet ){ + pRet->pTokenDataIter = pSet; + if( pSet ){ + fts5IterSetOutputsTokendata(pRet); + }else{ + pRet->base.bEof = 1; + } + }else{ + fts5TokendataIterDelete(pSet); + } + + fts5StructureRelease(pStruct); + fts5BufferFree(&bSeek); + return pRet; +} + + /* ** Open a new iterator to iterate though all rowid that match the ** specified token or token prefix. @@ -242715,8 +245570,13 @@ static int sqlite3Fts5IndexQuery( if( sqlite3Fts5BufferSize(&p->rc, &buf, nToken+1)==0 ){ int iIdx = 0; /* Index to search */ int iPrefixIdx = 0; /* +1 prefix index */ + int bTokendata = pConfig->bTokendata; if( nToken>0 ) memcpy(&buf.p[1], pToken, nToken); + if( flags & (FTS5INDEX_QUERY_NOTOKENDATA|FTS5INDEX_QUERY_SCAN) ){ + bTokendata = 0; + } + /* Figure out which index to search and set iIdx accordingly. If this ** is a prefix query for which there is no prefix index, set iIdx to ** greater than pConfig->nPrefix to indicate that the query will be @@ -242742,7 +245602,10 @@ static int sqlite3Fts5IndexQuery( } } - if( iIdx<=pConfig->nPrefix ){ + if( bTokendata && iIdx==0 ){ + buf.p[0] = '0'; + pRet = fts5SetupTokendataIter(p, buf.p, nToken+1, pColset); + }else if( iIdx<=pConfig->nPrefix ){ /* Straight index lookup */ Fts5Structure *pStruct = fts5StructureRead(p); buf.p[0] = (u8)(FTS5_MAIN_PREFIX + iIdx); @@ -242789,7 +245652,11 @@ static int sqlite3Fts5IndexQuery( static int sqlite3Fts5IterNext(Fts5IndexIter *pIndexIter){ Fts5Iter *pIter = (Fts5Iter*)pIndexIter; assert( pIter->pIndex->rc==SQLITE_OK ); - fts5MultiIterNext(pIter->pIndex, pIter, 0, 0); + if( pIter->pTokenDataIter ){ + fts5TokendataIterNext(pIter, 0, 0); + }else{ + fts5MultiIterNext(pIter->pIndex, pIter, 0, 0); + } return fts5IndexReturn(pIter->pIndex); } @@ -242822,7 +245689,11 @@ static int sqlite3Fts5IterNextScan(Fts5IndexIter *pIndexIter){ */ static int sqlite3Fts5IterNextFrom(Fts5IndexIter *pIndexIter, i64 iMatch){ Fts5Iter *pIter = (Fts5Iter*)pIndexIter; - fts5MultiIterNextFrom(pIter->pIndex, pIter, iMatch); + if( pIter->pTokenDataIter ){ + fts5TokendataIterNext(pIter, 1, iMatch); + }else{ + fts5MultiIterNextFrom(pIter->pIndex, pIter, iMatch); + } return fts5IndexReturn(pIter->pIndex); } @@ -242837,6 +245708,99 @@ static const char *sqlite3Fts5IterTerm(Fts5IndexIter *pIndexIter, int *pn){ return (z ? &z[1] : 0); } +/* +** This is used by xInstToken() to access the token at offset iOff, column +** iCol of row iRowid. The token is returned via output variables *ppOut +** and *pnOut. The iterator passed as the first argument must be a tokendata=1 +** iterator (pIter->pTokenDataIter!=0). +*/ +static int sqlite3Fts5IterToken( + Fts5IndexIter *pIndexIter, + i64 iRowid, + int iCol, + int iOff, + const char **ppOut, int *pnOut +){ + Fts5Iter *pIter = (Fts5Iter*)pIndexIter; + Fts5TokenDataIter *pT = pIter->pTokenDataIter; + Fts5TokenDataMap *aMap = pT->aMap; + i64 iPos = (((i64)iCol)<<32) + iOff; + + int i1 = 0; + int i2 = pT->nMap; + int iTest = 0; + + while( i2>i1 ){ + iTest = (i1 + i2) / 2; + + if( aMap[iTest].iRowidiRowid ){ + i2 = iTest; + }else{ + if( aMap[iTest].iPosiPos ){ + i2 = iTest; + }else{ + break; + } + } + } + + if( i2>i1 ){ + Fts5Iter *pMap = pT->apIter[aMap[iTest].iIter]; + *ppOut = (const char*)pMap->aSeg[0].term.p+1; + *pnOut = pMap->aSeg[0].term.n-1; + } + + return SQLITE_OK; +} + +/* +** Clear any existing entries from the token-map associated with the +** iterator passed as the only argument. +*/ +static void sqlite3Fts5IndexIterClearTokendata(Fts5IndexIter *pIndexIter){ + Fts5Iter *pIter = (Fts5Iter*)pIndexIter; + if( pIter && pIter->pTokenDataIter ){ + pIter->pTokenDataIter->nMap = 0; + } +} + +/* +** Set a token-mapping for the iterator passed as the first argument. This +** is used in detail=column or detail=none mode when a token is requested +** using the xInstToken() API. In this case the caller tokenizers the +** current row and configures the token-mapping via multiple calls to this +** function. +*/ +static int sqlite3Fts5IndexIterWriteTokendata( + Fts5IndexIter *pIndexIter, + const char *pToken, int nToken, + i64 iRowid, int iCol, int iOff +){ + Fts5Iter *pIter = (Fts5Iter*)pIndexIter; + Fts5TokenDataIter *pT = pIter->pTokenDataIter; + Fts5Index *p = pIter->pIndex; + int ii; + + assert( p->pConfig->eDetail!=FTS5_DETAIL_FULL ); + assert( pIter->pTokenDataIter ); + + for(ii=0; iinIter; ii++){ + Fts5Buffer *pTerm = &pT->apIter[ii]->aSeg[0].term; + if( nToken==pTerm->n-1 && memcmp(pToken, pTerm->p+1, nToken)==0 ) break; + } + if( iinIter ){ + fts5TokendataIterAppendMap(p, pT, ii, iRowid, (((i64)iCol)<<32) + iOff); + } + return fts5IndexReturn(p); +} + /* ** Close an iterator opened by an earlier call to sqlite3Fts5IndexQuery(). */ @@ -242844,6 +245808,7 @@ static void sqlite3Fts5IterClose(Fts5IndexIter *pIndexIter){ if( pIndexIter ){ Fts5Iter *pIter = (Fts5Iter*)pIndexIter; Fts5Index *pIndex = pIter->pIndex; + fts5TokendataIterDelete(pIter->pTokenDataIter); fts5MultiIterFree(pIter); sqlite3Fts5IndexCloseReader(pIndex); } @@ -243351,7 +246316,9 @@ static int fts5QueryCksum( int eDetail = p->pConfig->eDetail; u64 cksum = *pCksum; Fts5IndexIter *pIter = 0; - int rc = sqlite3Fts5IndexQuery(p, z, n, flags, 0, &pIter); + int rc = sqlite3Fts5IndexQuery( + p, z, n, (flags | FTS5INDEX_QUERY_NOTOKENDATA), 0, &pIter + ); while( rc==SQLITE_OK && ALWAYS(pIter!=0) && 0==sqlite3Fts5IterEof(pIter) ){ i64 rowid = pIter->iRowid; @@ -243518,7 +246485,7 @@ static void fts5IndexIntegrityCheckEmpty( } static void fts5IntegrityCheckPgidx(Fts5Index *p, Fts5Data *pLeaf){ - int iTermOff = 0; + i64 iTermOff = 0; int ii; Fts5Buffer buf1 = {0,0,0}; @@ -243527,7 +246494,7 @@ static void fts5IntegrityCheckPgidx(Fts5Index *p, Fts5Data *pLeaf){ ii = pLeaf->szLeaf; while( iinn && p->rc==SQLITE_OK ){ int res; - int iOff; + i64 iOff; int nIncr; ii += fts5GetVarint32(&pLeaf->p[ii], nIncr); @@ -244049,6 +247016,24 @@ static void fts5DecodeRowidList( } #endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */ +#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) +static void fts5BufferAppendTerm(int *pRc, Fts5Buffer *pBuf, Fts5Buffer *pTerm){ + int ii; + fts5BufferGrow(pRc, pBuf, pTerm->n*2 + 1); + if( *pRc==SQLITE_OK ){ + for(ii=0; iin; ii++){ + if( pTerm->p[ii]==0x00 ){ + pBuf->p[pBuf->n++] = '\\'; + pBuf->p[pBuf->n++] = '0'; + }else{ + pBuf->p[pBuf->n++] = pTerm->p[ii]; + } + } + pBuf->p[pBuf->n] = 0x00; + } +} +#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */ + #if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) /* ** The implementation of user-defined scalar function fts5_decode(). @@ -244156,9 +247141,8 @@ static void fts5DecodeFunction( iOff += fts5GetVarint32(&a[iOff], nAppend); term.n = nKeep; fts5BufferAppendBlob(&rc, &term, nAppend, &a[iOff]); - sqlite3Fts5BufferAppendPrintf( - &rc, &s, " term=%.*s", term.n, (const char*)term.p - ); + sqlite3Fts5BufferAppendPrintf(&rc, &s, " term="); + fts5BufferAppendTerm(&rc, &s, &term); iOff += nAppend; /* Figure out where the doclist for this term ends */ @@ -244266,9 +247250,8 @@ static void fts5DecodeFunction( fts5BufferAppendBlob(&rc, &term, nByte, &a[iOff]); iOff += nByte; - sqlite3Fts5BufferAppendPrintf( - &rc, &s, " term=%.*s", term.n, (const char*)term.p - ); + sqlite3Fts5BufferAppendPrintf(&rc, &s, " term="); + fts5BufferAppendTerm(&rc, &s, &term); iOff += fts5DecodeDoclist(&rc, &s, &a[iOff], iEnd-iOff); } @@ -244743,7 +247726,7 @@ struct Fts5FullTable { Fts5Global *pGlobal; /* Global (connection wide) data */ Fts5Cursor *pSortCsr; /* Sort data from this cursor */ int iSavepoint; /* Successful xSavepoint()+1 */ - int bInSavepoint; + #ifdef SQLITE_DEBUG struct Fts5TransactionState ts; #endif @@ -245281,12 +248264,15 @@ static int fts5BestIndexMethod(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ } idxStr[iIdxStr] = '\0'; - /* Set idxFlags flags for the ORDER BY clause */ + /* Set idxFlags flags for the ORDER BY clause + ** + ** Note that tokendata=1 tables cannot currently handle "ORDER BY rowid DESC". + */ if( pInfo->nOrderBy==1 ){ int iSort = pInfo->aOrderBy[0].iColumn; if( iSort==(pConfig->nCol+1) && bSeenMatch ){ idxFlags |= FTS5_BI_ORDER_RANK; - }else if( iSort==-1 ){ + }else if( iSort==-1 && (!pInfo->aOrderBy[0].desc || !pConfig->bTokendata) ){ idxFlags |= FTS5_BI_ORDER_ROWID; } if( BitFlagTest(idxFlags, FTS5_BI_ORDER_RANK|FTS5_BI_ORDER_ROWID) ){ @@ -245538,6 +248524,16 @@ static int fts5NextMethod(sqlite3_vtab_cursor *pCursor){ ); assert( !CsrFlagTest(pCsr, FTS5CSR_EOF) ); + /* If this cursor uses FTS5_PLAN_MATCH and this is a tokendata=1 table, + ** clear any token mappings accumulated at the fts5_index.c level. In + ** other cases, specifically FTS5_PLAN_SOURCE and FTS5_PLAN_SORTED_MATCH, + ** we need to retain the mappings for the entire query. */ + if( pCsr->ePlan==FTS5_PLAN_MATCH + && ((Fts5Table*)pCursor->pVtab)->pConfig->bTokendata + ){ + sqlite3Fts5ExprClearTokens(pCsr->pExpr); + } + if( pCsr->ePlan<3 ){ int bSkip = 0; if( (rc = fts5CursorReseek(pCsr, &bSkip)) || bSkip ) return rc; @@ -246198,7 +249194,10 @@ static int fts5SpecialInsert( }else if( 0==sqlite3_stricmp("flush", zCmd) ){ rc = sqlite3Fts5FlushToDisk(&pTab->p); }else{ - rc = sqlite3Fts5IndexLoadConfig(pTab->p.pIndex); + rc = sqlite3Fts5FlushToDisk(&pTab->p); + if( rc==SQLITE_OK ){ + rc = sqlite3Fts5IndexLoadConfig(pTab->p.pIndex); + } if( rc==SQLITE_OK ){ rc = sqlite3Fts5ConfigSetValue(pTab->p.pConfig, zCmd, pVal, &bError); } @@ -246523,7 +249522,10 @@ static int fts5ApiColumnText( ){ int rc = SQLITE_OK; Fts5Cursor *pCsr = (Fts5Cursor*)pCtx; - if( fts5IsContentless((Fts5FullTable*)(pCsr->base.pVtab)) + Fts5Table *pTab = (Fts5Table*)(pCsr->base.pVtab); + if( iCol<0 || iCol>=pTab->pConfig->nCol ){ + rc = SQLITE_RANGE; + }else if( fts5IsContentless((Fts5FullTable*)(pCsr->base.pVtab)) || pCsr->ePlan==FTS5_PLAN_SPECIAL ){ *pz = 0; @@ -246548,8 +249550,9 @@ static int fts5CsrPoslist( int rc = SQLITE_OK; int bLive = (pCsr->pSorter==0); - if( CsrFlagTest(pCsr, FTS5CSR_REQUIRE_POSLIST) ){ - + if( iPhrase<0 || iPhrase>=sqlite3Fts5ExprPhraseCount(pCsr->pExpr) ){ + rc = SQLITE_RANGE; + }else if( CsrFlagTest(pCsr, FTS5CSR_REQUIRE_POSLIST) ){ if( pConfig->eDetail!=FTS5_DETAIL_FULL ){ Fts5PoslistPopulator *aPopulator; int i; @@ -246573,15 +249576,21 @@ static int fts5CsrPoslist( CsrFlagClear(pCsr, FTS5CSR_REQUIRE_POSLIST); } - if( pCsr->pSorter && pConfig->eDetail==FTS5_DETAIL_FULL ){ - Fts5Sorter *pSorter = pCsr->pSorter; - int i1 = (iPhrase==0 ? 0 : pSorter->aIdx[iPhrase-1]); - *pn = pSorter->aIdx[iPhrase] - i1; - *pa = &pSorter->aPoslist[i1]; + if( rc==SQLITE_OK ){ + if( pCsr->pSorter && pConfig->eDetail==FTS5_DETAIL_FULL ){ + Fts5Sorter *pSorter = pCsr->pSorter; + int i1 = (iPhrase==0 ? 0 : pSorter->aIdx[iPhrase-1]); + *pn = pSorter->aIdx[iPhrase] - i1; + *pa = &pSorter->aPoslist[i1]; + }else{ + *pn = sqlite3Fts5ExprPoslist(pCsr->pExpr, iPhrase, pa); + } }else{ - *pn = sqlite3Fts5ExprPoslist(pCsr->pExpr, iPhrase, pa); + *pa = 0; + *pn = 0; } + return rc; } @@ -246688,12 +249697,6 @@ static int fts5ApiInst( ){ if( iIdx<0 || iIdx>=pCsr->nInstCount ){ rc = SQLITE_RANGE; -#if 0 - }else if( fts5IsOffsetless((Fts5Table*)pCsr->base.pVtab) ){ - *piPhrase = pCsr->aInst[iIdx*3]; - *piCol = pCsr->aInst[iIdx*3 + 2]; - *piOff = -1; -#endif }else{ *piPhrase = pCsr->aInst[iIdx*3]; *piCol = pCsr->aInst[iIdx*3 + 1]; @@ -246948,13 +249951,56 @@ static int fts5ApiPhraseFirstColumn( return rc; } +/* +** xQueryToken() API implemenetation. +*/ +static int fts5ApiQueryToken( + Fts5Context* pCtx, + int iPhrase, + int iToken, + const char **ppOut, + int *pnOut +){ + Fts5Cursor *pCsr = (Fts5Cursor*)pCtx; + return sqlite3Fts5ExprQueryToken(pCsr->pExpr, iPhrase, iToken, ppOut, pnOut); +} + +/* +** xInstToken() API implemenetation. +*/ +static int fts5ApiInstToken( + Fts5Context *pCtx, + int iIdx, + int iToken, + const char **ppOut, int *pnOut +){ + Fts5Cursor *pCsr = (Fts5Cursor*)pCtx; + int rc = SQLITE_OK; + if( CsrFlagTest(pCsr, FTS5CSR_REQUIRE_INST)==0 + || SQLITE_OK==(rc = fts5CacheInstArray(pCsr)) + ){ + if( iIdx<0 || iIdx>=pCsr->nInstCount ){ + rc = SQLITE_RANGE; + }else{ + int iPhrase = pCsr->aInst[iIdx*3]; + int iCol = pCsr->aInst[iIdx*3 + 1]; + int iOff = pCsr->aInst[iIdx*3 + 2]; + i64 iRowid = fts5CursorRowid(pCsr); + rc = sqlite3Fts5ExprInstToken( + pCsr->pExpr, iRowid, iPhrase, iCol, iOff, iToken, ppOut, pnOut + ); + } + } + return rc; +} + static int fts5ApiQueryPhrase(Fts5Context*, int, void*, int(*)(const Fts5ExtensionApi*, Fts5Context*, void*) ); static const Fts5ExtensionApi sFts5Api = { - 2, /* iVersion */ + 3, /* iVersion */ fts5ApiUserData, fts5ApiColumnCount, fts5ApiRowCount, @@ -246974,6 +250020,8 @@ static const Fts5ExtensionApi sFts5Api = { fts5ApiPhraseNext, fts5ApiPhraseFirstColumn, fts5ApiPhraseNextColumn, + fts5ApiQueryToken, + fts5ApiInstToken }; /* @@ -247240,9 +250288,7 @@ static int fts5RenameMethod( ){ int rc; Fts5FullTable *pTab = (Fts5FullTable*)pVtab; - pTab->bInSavepoint = 1; rc = sqlite3Fts5StorageRename(pTab->pStorage, zName); - pTab->bInSavepoint = 0; return rc; } @@ -247259,26 +250305,12 @@ static int sqlite3Fts5FlushToDisk(Fts5Table *pTab){ static int fts5SavepointMethod(sqlite3_vtab *pVtab, int iSavepoint){ Fts5FullTable *pTab = (Fts5FullTable*)pVtab; int rc = SQLITE_OK; - char *zSql = 0; - fts5CheckTransactionState(pTab, FTS5_SAVEPOINT, iSavepoint); - if( pTab->bInSavepoint==0 ){ - zSql = sqlite3_mprintf("INSERT INTO %Q.%Q(%Q) VALUES('flush')", - pTab->p.pConfig->zDb, pTab->p.pConfig->zName, pTab->p.pConfig->zName - ); - if( zSql ){ - pTab->bInSavepoint = 1; - rc = sqlite3_exec(pTab->p.pConfig->db, zSql, 0, 0, 0); - pTab->bInSavepoint = 0; - sqlite3_free(zSql); - }else{ - rc = SQLITE_NOMEM; - } - if( rc==SQLITE_OK ){ - pTab->iSavepoint = iSavepoint+1; - } + fts5CheckTransactionState(pTab, FTS5_SAVEPOINT, iSavepoint); + rc = sqlite3Fts5FlushToDisk((Fts5Table*)pVtab); + if( rc==SQLITE_OK ){ + pTab->iSavepoint = iSavepoint+1; } - return rc; } @@ -247310,8 +250342,8 @@ static int fts5RollbackToMethod(sqlite3_vtab *pVtab, int iSavepoint){ int rc = SQLITE_OK; fts5CheckTransactionState(pTab, FTS5_ROLLBACKTO, iSavepoint); fts5TripCursors(pTab); - pTab->p.pConfig->pgsz = 0; if( (iSavepoint+1)<=pTab->iSavepoint ){ + pTab->p.pConfig->pgsz = 0; rc = sqlite3Fts5StorageRollback(pTab->pStorage); } return rc; @@ -247516,7 +250548,7 @@ static void fts5SourceIdFunc( ){ assert( nArg==0 ); UNUSED_PARAM2(nArg, apUnused); - sqlite3_result_text(pCtx, "fts5: 2023-11-01 11:23:50 17129ba1ff7f0daf37100ee82d507aef7827cf38de1866e2633096ae6ad81301", -1, SQLITE_TRANSIENT); + sqlite3_result_text(pCtx, "fts5: 2024-01-30 16:01:20 e876e51a0ed5c5b3126f52e532044363a014bc594cfefa87ffb5b82257cc467a", -1, SQLITE_TRANSIENT); } /* @@ -247539,7 +250571,7 @@ static int fts5ShadowName(const char *zName){ ** if anything is found amiss. Return a NULL pointer if everything is ** OK. */ -static int fts5Integrity( +static int fts5IntegrityMethod( sqlite3_vtab *pVtab, /* the FTS5 virtual table to check */ const char *zSchema, /* Name of schema in which this table lives */ const char *zTabname, /* Name of the table itself */ @@ -247547,27 +250579,21 @@ static int fts5Integrity( char **pzErr /* Write error message here */ ){ Fts5FullTable *pTab = (Fts5FullTable*)pVtab; - Fts5Config *pConfig = pTab->p.pConfig; - char *zSql; - char *zErr = 0; int rc; + assert( pzErr!=0 && *pzErr==0 ); UNUSED_PARAM(isQuick); - zSql = sqlite3_mprintf( - "INSERT INTO \"%w\".\"%w\"(\"%w\") VALUES('integrity-check');", - zSchema, zTabname, pConfig->zName); - if( zSql==0 ) return SQLITE_NOMEM; - rc = sqlite3_exec(pConfig->db, zSql, 0, 0, &zErr); - sqlite3_free(zSql); + rc = sqlite3Fts5StorageIntegrity(pTab->pStorage, 0); if( (rc&0xff)==SQLITE_CORRUPT ){ *pzErr = sqlite3_mprintf("malformed inverted index for FTS5 table %s.%s", zSchema, zTabname); }else if( rc!=SQLITE_OK ){ *pzErr = sqlite3_mprintf("unable to validate the inverted index for" " FTS5 table %s.%s: %s", - zSchema, zTabname, zErr); + zSchema, zTabname, sqlite3_errstr(rc)); } - sqlite3_free(zErr); + sqlite3Fts5IndexCloseReader(pTab->p.pIndex); + return SQLITE_OK; } @@ -247597,7 +250623,7 @@ static int fts5Init(sqlite3 *db){ /* xRelease */ fts5ReleaseMethod, /* xRollbackTo */ fts5RollbackToMethod, /* xShadowName */ fts5ShadowName, - /* xIntegrity */ fts5Integrity + /* xIntegrity */ fts5IntegrityMethod }; int rc; @@ -248363,7 +251389,7 @@ static int sqlite3Fts5StorageRebuild(Fts5Storage *p){ } if( rc==SQLITE_OK ){ - rc = fts5StorageGetStmt(p, FTS5_STMT_SCAN, &pScan, 0); + rc = fts5StorageGetStmt(p, FTS5_STMT_SCAN, &pScan, pConfig->pzErrmsg); } while( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pScan) ){ @@ -249150,6 +252176,12 @@ static const unsigned char sqlite3Utf8Trans1[] = { #endif /* ifndef SQLITE_AMALGAMATION */ +#define FTS5_SKIP_UTF8(zIn) { \ + if( ((unsigned char)(*(zIn++)))>=0xc0 ){ \ + while( (((unsigned char)*zIn) & 0xc0)==0x80 ){ zIn++; } \ + } \ +} + typedef struct Unicode61Tokenizer Unicode61Tokenizer; struct Unicode61Tokenizer { unsigned char aTokenChar[128]; /* ASCII range token characters */ @@ -250185,6 +253217,7 @@ static int fts5PorterTokenize( typedef struct TrigramTokenizer TrigramTokenizer; struct TrigramTokenizer { int bFold; /* True to fold to lower-case */ + int iFoldParam; /* Parameter to pass to Fts5UnicodeFold() */ }; /* @@ -250211,6 +253244,7 @@ static int fts5TriCreate( }else{ int i; pNew->bFold = 1; + pNew->iFoldParam = 0; for(i=0; rc==SQLITE_OK && ibFold = (zArg[0]=='0'); } + }else if( 0==sqlite3_stricmp(azArg[i], "remove_diacritics") ){ + if( (zArg[0]!='0' && zArg[0]!='1' && zArg[0]!='2') || zArg[1] ){ + rc = SQLITE_ERROR; + }else{ + pNew->iFoldParam = (zArg[0]!='0') ? 2 : 0; + } }else{ rc = SQLITE_ERROR; } } + + if( pNew->iFoldParam!=0 && pNew->bFold==0 ){ + rc = SQLITE_ERROR; + } + if( rc!=SQLITE_OK ){ fts5TriDelete((Fts5Tokenizer*)pNew); pNew = 0; @@ -250245,40 +253290,62 @@ static int fts5TriTokenize( TrigramTokenizer *p = (TrigramTokenizer*)pTok; int rc = SQLITE_OK; char aBuf[32]; + char *zOut = aBuf; + int ii; const unsigned char *zIn = (const unsigned char*)pText; const unsigned char *zEof = &zIn[nText]; u32 iCode; + int aStart[3]; /* Input offset of each character in aBuf[] */ UNUSED_PARAM(unusedFlags); - while( 1 ){ - char *zOut = aBuf; - int iStart = zIn - (const unsigned char*)pText; - const unsigned char *zNext; - - READ_UTF8(zIn, zEof, iCode); - if( iCode==0 ) break; - zNext = zIn; - if( zInbFold ) iCode = sqlite3Fts5UnicodeFold(iCode, 0); - WRITE_UTF8(zOut, iCode); + + /* Populate aBuf[] with the characters for the first trigram. */ + for(ii=0; ii<3; ii++){ + do { + aStart[ii] = zIn - (const unsigned char*)pText; READ_UTF8(zIn, zEof, iCode); - if( iCode==0 ) break; - }else{ - break; - } - if( zInbFold ) iCode = sqlite3Fts5UnicodeFold(iCode, 0); - WRITE_UTF8(zOut, iCode); + if( iCode==0 ) return SQLITE_OK; + if( p->bFold ) iCode = sqlite3Fts5UnicodeFold(iCode, p->iFoldParam); + }while( iCode==0 ); + WRITE_UTF8(zOut, iCode); + } + + /* At the start of each iteration of this loop: + ** + ** aBuf: Contains 3 characters. The 3 characters of the next trigram. + ** zOut: Points to the byte following the last character in aBuf. + ** aStart[3]: Contains the byte offset in the input text corresponding + ** to the start of each of the three characters in the buffer. + */ + assert( zIn<=zEof ); + while( 1 ){ + int iNext; /* Start of character following current tri */ + const char *z1; + + /* Read characters from the input up until the first non-diacritic */ + do { + iNext = zIn - (const unsigned char*)pText; READ_UTF8(zIn, zEof, iCode); if( iCode==0 ) break; - if( p->bFold ) iCode = sqlite3Fts5UnicodeFold(iCode, 0); - WRITE_UTF8(zOut, iCode); - }else{ - break; - } - rc = xToken(pCtx, 0, aBuf, zOut-aBuf, iStart, iStart + zOut-aBuf); - if( rc!=SQLITE_OK ) break; - zIn = zNext; + if( p->bFold ) iCode = sqlite3Fts5UnicodeFold(iCode, p->iFoldParam); + }while( iCode==0 ); + + /* Pass the current trigram back to fts5 */ + rc = xToken(pCtx, 0, aBuf, zOut-aBuf, aStart[0], iNext); + if( iCode==0 || rc!=SQLITE_OK ) break; + + /* Remove the first character from buffer aBuf[]. Append the character + ** with codepoint iCode. */ + z1 = aBuf; + FTS5_SKIP_UTF8(z1); + memmove(aBuf, z1, zOut - z1); + zOut -= (z1 - aBuf); + WRITE_UTF8(zOut, iCode); + + /* Update the aStart[] array */ + aStart[0] = aStart[1]; + aStart[1] = aStart[2]; + aStart[2] = iNext; } return rc; @@ -250301,7 +253368,9 @@ static int sqlite3Fts5TokenizerPattern( ){ if( xCreate==fts5TriCreate ){ TrigramTokenizer *p = (TrigramTokenizer*)pTok; - return p->bFold ? FTS5_PATTERN_LIKE : FTS5_PATTERN_GLOB; + if( p->iFoldParam==0 ){ + return p->bFold ? FTS5_PATTERN_LIKE : FTS5_PATTERN_GLOB; + } } return FTS5_PATTERN_NONE; } @@ -252090,7 +255159,7 @@ static int fts5VocabFilterMethod( if( pEq ){ zTerm = (const char *)sqlite3_value_text(pEq); nTerm = sqlite3_value_bytes(pEq); - f = 0; + f = FTS5INDEX_QUERY_NOTOKENDATA; }else{ if( pGe ){ zTerm = (const char *)sqlite3_value_text(pGe); diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h index 4eff82d9d..a07a51952 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h @@ -147,9 +147,9 @@ extern "C" { ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.44.0" -#define SQLITE_VERSION_NUMBER 3044000 -#define SQLITE_SOURCE_ID "2023-11-01 11:23:50 17129ba1ff7f0daf37100ee82d507aef7827cf38de1866e2633096ae6ad81301" +#define SQLITE_VERSION "3.45.1" +#define SQLITE_VERSION_NUMBER 3045001 +#define SQLITE_SOURCE_ID "2024-01-30 16:01:20 e876e51a0ed5c5b3126f52e532044363a014bc594cfefa87ffb5b82257cc467a" /* ** CAPI3REF: Run-Time Library Version Numbers @@ -3955,15 +3955,17 @@ SQLITE_API void sqlite3_free_filename(sqlite3_filename); ** ** ** ^The sqlite3_errmsg() and sqlite3_errmsg16() return English-language -** text that describes the error, as either UTF-8 or UTF-16 respectively. +** text that describes the error, as either UTF-8 or UTF-16 respectively, +** or NULL if no error message is available. ** (See how SQLite handles [invalid UTF] for exceptions to this rule.) ** ^(Memory to hold the error message string is managed internally. ** The application does not need to worry about freeing the result. ** However, the error string might be overwritten or deallocated by ** subsequent calls to other SQLite interface functions.)^ ** -** ^The sqlite3_errstr() interface returns the English-language text -** that describes the [result code], as UTF-8. +** ^The sqlite3_errstr(E) interface returns the English-language text +** that describes the [result code] E, as UTF-8, or NULL if E is not an +** result code for which a text error message is available. ** ^(Memory to hold the error message string is managed internally ** and must not be freed by the application)^. ** @@ -5574,13 +5576,27 @@ SQLITE_API int sqlite3_create_window_function( ** ** ** [[SQLITE_SUBTYPE]]
    SQLITE_SUBTYPE
    -** The SQLITE_SUBTYPE flag indicates to SQLite that a function may call +** The SQLITE_SUBTYPE flag indicates to SQLite that a function might call ** [sqlite3_value_subtype()] to inspect the sub-types of its arguments. -** Specifying this flag makes no difference for scalar or aggregate user -** functions. However, if it is not specified for a user-defined window -** function, then any sub-types belonging to arguments passed to the window -** function may be discarded before the window function is called (i.e. -** sqlite3_value_subtype() will always return 0). +** This flag instructs SQLite to omit some corner-case optimizations that +** might disrupt the operation of the [sqlite3_value_subtype()] function, +** causing it to return zero rather than the correct subtype(). +** SQL functions that invokes [sqlite3_value_subtype()] should have this +** property. If the SQLITE_SUBTYPE property is omitted, then the return +** value from [sqlite3_value_subtype()] might sometimes be zero even though +** a non-zero subtype was specified by the function argument expression. +** +** [[SQLITE_RESULT_SUBTYPE]]
    SQLITE_RESULT_SUBTYPE
    +** The SQLITE_RESULT_SUBTYPE flag indicates to SQLite that a function might call +** [sqlite3_result_subtype()] to cause a sub-type to be associated with its +** result. +** Every function that invokes [sqlite3_result_subtype()] should have this +** property. If it does not, then the call to [sqlite3_result_subtype()] +** might become a no-op if the function is used as term in an +** [expression index]. On the other hand, SQL functions that never invoke +** [sqlite3_result_subtype()] should avoid setting this property, as the +** purpose of this property is to disable certain optimizations that are +** incompatible with subtypes. **
    ** */ @@ -5588,6 +5604,7 @@ SQLITE_API int sqlite3_create_window_function( #define SQLITE_DIRECTONLY 0x000080000 #define SQLITE_SUBTYPE 0x000100000 #define SQLITE_INNOCUOUS 0x000200000 +#define SQLITE_RESULT_SUBTYPE 0x001000000 /* ** CAPI3REF: Deprecated Functions @@ -5784,6 +5801,12 @@ SQLITE_API int sqlite3_value_encoding(sqlite3_value*); ** information can be used to pass a limited amount of context from ** one SQL function to another. Use the [sqlite3_result_subtype()] ** routine to set the subtype for the return value of an SQL function. +** +** Every [application-defined SQL function] that invoke this interface +** should include the [SQLITE_SUBTYPE] property in the text +** encoding argument when the function is [sqlite3_create_function|registered]. +** If the [SQLITE_SUBTYPE] property is omitted, then sqlite3_value_subtype() +** might return zero instead of the upstream subtype in some corner cases. */ SQLITE_API unsigned int sqlite3_value_subtype(sqlite3_value*); @@ -5914,14 +5937,22 @@ SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context*); **
  • ^(when sqlite3_set_auxdata() is invoked again on the same ** parameter)^, or **
  • ^(during the original sqlite3_set_auxdata() call when a memory -** allocation error occurs.)^ +** allocation error occurs.)^ +**
  • ^(during the original sqlite3_set_auxdata() call if the function +** is evaluated during query planning instead of during query execution, +** as sometimes happens with [SQLITE_ENABLE_STAT4].)^ ** -** Note the last bullet in particular. The destructor X in +** Note the last two bullets in particular. The destructor X in ** sqlite3_set_auxdata(C,N,P,X) might be called immediately, before the ** sqlite3_set_auxdata() interface even returns. Hence sqlite3_set_auxdata() ** should be called near the end of the function implementation and the ** function implementation should not make any use of P after -** sqlite3_set_auxdata() has been called. +** sqlite3_set_auxdata() has been called. Furthermore, a call to +** sqlite3_get_auxdata() that occurs immediately after a corresponding call +** to sqlite3_set_auxdata() might still return NULL if an out-of-memory +** condition occurred during the sqlite3_set_auxdata() call or if the +** function is being evaluated during query planning rather than during +** query execution. ** ** ^(In practice, auxiliary data is preserved between function calls for ** function parameters that are compile-time constants, including literal @@ -6195,6 +6226,20 @@ SQLITE_API int sqlite3_result_zeroblob64(sqlite3_context*, sqlite3_uint64 n); ** higher order bits are discarded. ** The number of subtype bytes preserved by SQLite might increase ** in future releases of SQLite. +** +** Every [application-defined SQL function] that invokes this interface +** should include the [SQLITE_RESULT_SUBTYPE] property in its +** text encoding argument when the SQL function is +** [sqlite3_create_function|registered]. If the [SQLITE_RESULT_SUBTYPE] +** property is omitted from the function that invokes sqlite3_result_subtype(), +** then in some cases the sqlite3_result_subtype() might fail to set +** the result subtype. +** +** If SQLite is compiled with -DSQLITE_STRICT_SUBTYPE=1, then any +** SQL function that invokes the sqlite3_result_subtype() interface +** and that does not have the SQLITE_RESULT_SUBTYPE property will raise +** an error. Future versions of SQLite might enable -DSQLITE_STRICT_SUBTYPE=1 +** by default. */ SQLITE_API void sqlite3_result_subtype(sqlite3_context*,unsigned int); @@ -7995,9 +8040,11 @@ SQLITE_API int sqlite3_vfs_unregister(sqlite3_vfs*); ** ** ^(Some systems (for example, Windows 95) do not support the operation ** implemented by sqlite3_mutex_try(). On those systems, sqlite3_mutex_try() -** will always return SQLITE_BUSY. The SQLite core only ever uses -** sqlite3_mutex_try() as an optimization so this is acceptable -** behavior.)^ +** will always return SQLITE_BUSY. In most cases the SQLite core only uses +** sqlite3_mutex_try() as an optimization, so this is acceptable +** behavior. The exceptions are unix builds that set the +** SQLITE_ENABLE_SETLK_TIMEOUT build option. In that case a working +** sqlite3_mutex_try() is required.)^ ** ** ^The sqlite3_mutex_leave() routine exits a mutex that was ** previously entered by the same thread. The behavior @@ -8256,6 +8303,7 @@ SQLITE_API int sqlite3_test_control(int op, ...); #define SQLITE_TESTCTRL_ASSERT 12 #define SQLITE_TESTCTRL_ALWAYS 13 #define SQLITE_TESTCTRL_RESERVE 14 /* NOT USED */ +#define SQLITE_TESTCTRL_JSON_SELFCHECK 14 #define SQLITE_TESTCTRL_OPTIMIZATIONS 15 #define SQLITE_TESTCTRL_ISKEYWORD 16 /* NOT USED */ #define SQLITE_TESTCTRL_SCRATCHMALLOC 17 /* NOT USED */ @@ -12769,8 +12817,11 @@ struct Fts5PhraseIter { ** created with the "columnsize=0" option. ** ** xColumnText: -** This function attempts to retrieve the text of column iCol of the -** current document. If successful, (*pz) is set to point to a buffer +** If parameter iCol is less than zero, or greater than or equal to the +** number of columns in the table, SQLITE_RANGE is returned. +** +** Otherwise, this function attempts to retrieve the text of column iCol of +** the current document. If successful, (*pz) is set to point to a buffer ** containing the text in utf-8 encoding, (*pn) is set to the size in bytes ** (not characters) of the buffer and SQLITE_OK is returned. Otherwise, ** if an error occurs, an SQLite error code is returned and the final values @@ -12780,8 +12831,10 @@ struct Fts5PhraseIter { ** Returns the number of phrases in the current query expression. ** ** xPhraseSize: -** Returns the number of tokens in phrase iPhrase of the query. Phrases -** are numbered starting from zero. +** If parameter iCol is less than zero, or greater than or equal to the +** number of phrases in the current query, as returned by xPhraseCount, +** 0 is returned. Otherwise, this function returns the number of tokens in +** phrase iPhrase of the query. Phrases are numbered starting from zero. ** ** xInstCount: ** Set *pnInst to the total number of occurrences of all phrases within @@ -12797,12 +12850,13 @@ struct Fts5PhraseIter { ** Query for the details of phrase match iIdx within the current row. ** Phrase matches are numbered starting from zero, so the iIdx argument ** should be greater than or equal to zero and smaller than the value -** output by xInstCount(). +** output by xInstCount(). If iIdx is less than zero or greater than +** or equal to the value returned by xInstCount(), SQLITE_RANGE is returned. ** -** Usually, output parameter *piPhrase is set to the phrase number, *piCol +** Otherwise, output parameter *piPhrase is set to the phrase number, *piCol ** to the column in which it occurs and *piOff the token offset of the -** first token of the phrase. Returns SQLITE_OK if successful, or an error -** code (i.e. SQLITE_NOMEM) if an error occurs. +** first token of the phrase. SQLITE_OK is returned if successful, or an +** error code (i.e. SQLITE_NOMEM) if an error occurs. ** ** This API can be quite slow if used with an FTS5 table created with the ** "detail=none" or "detail=column" option. @@ -12828,6 +12882,10 @@ struct Fts5PhraseIter { ** Invoking Api.xUserData() returns a copy of the pointer passed as ** the third argument to pUserData. ** +** If parameter iPhrase is less than zero, or greater than or equal to +** the number of phrases in the query, as returned by xPhraseCount(), +** this function returns SQLITE_RANGE. +** ** If the callback function returns any value other than SQLITE_OK, the ** query is abandoned and the xQueryPhrase function returns immediately. ** If the returned value is SQLITE_DONE, xQueryPhrase returns SQLITE_OK. @@ -12942,9 +13000,42 @@ struct Fts5PhraseIter { ** ** xPhraseNextColumn() ** See xPhraseFirstColumn above. +** +** xQueryToken(pFts5, iPhrase, iToken, ppToken, pnToken) +** This is used to access token iToken of phrase iPhrase of the current +** query. Before returning, output parameter *ppToken is set to point +** to a buffer containing the requested token, and *pnToken to the +** size of this buffer in bytes. +** +** If iPhrase or iToken are less than zero, or if iPhrase is greater than +** or equal to the number of phrases in the query as reported by +** xPhraseCount(), or if iToken is equal to or greater than the number of +** tokens in the phrase, SQLITE_RANGE is returned and *ppToken and *pnToken + are both zeroed. +** +** The output text is not a copy of the query text that specified the +** token. It is the output of the tokenizer module. For tokendata=1 +** tables, this includes any embedded 0x00 and trailing data. +** +** xInstToken(pFts5, iIdx, iToken, ppToken, pnToken) +** This is used to access token iToken of phrase hit iIdx within the +** current row. If iIdx is less than zero or greater than or equal to the +** value returned by xInstCount(), SQLITE_RANGE is returned. Otherwise, +** output variable (*ppToken) is set to point to a buffer containing the +** matching document token, and (*pnToken) to the size of that buffer in +** bytes. This API is not available if the specified token matches a +** prefix query term. In that case both output variables are always set +** to 0. +** +** The output text is not a copy of the document text that was tokenized. +** It is the output of the tokenizer module. For tokendata=1 tables, this +** includes any embedded 0x00 and trailing data. +** +** This API can be quite slow if used with an FTS5 table created with the +** "detail=none" or "detail=column" option. */ struct Fts5ExtensionApi { - int iVersion; /* Currently always set to 2 */ + int iVersion; /* Currently always set to 3 */ void *(*xUserData)(Fts5Context*); @@ -12979,6 +13070,13 @@ struct Fts5ExtensionApi { int (*xPhraseFirstColumn)(Fts5Context*, int iPhrase, Fts5PhraseIter*, int*); void (*xPhraseNextColumn)(Fts5Context*, Fts5PhraseIter*, int *piCol); + + /* Below this point are iVersion>=3 only */ + int (*xQueryToken)(Fts5Context*, + int iPhrase, int iToken, + const char **ppToken, int *pnToken + ); + int (*xInstToken)(Fts5Context*, int iIdx, int iToken, const char**, int*); }; /* diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3.go b/vendor/github.com/mattn/go-sqlite3/sqlite3.go index 5e4e2ff57..4b3b6cab5 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3.go @@ -21,7 +21,6 @@ package sqlite3 #cgo CFLAGS: -DSQLITE_DEFAULT_WAL_SYNCHRONOUS=1 #cgo CFLAGS: -DSQLITE_ENABLE_UPDATE_DELETE_LIMIT #cgo CFLAGS: -Wno-deprecated-declarations -#cgo linux,!android CFLAGS: -DHAVE_PREAD64=1 -DHAVE_PWRITE64=1 #cgo openbsd CFLAGS: -I/usr/local/include #cgo openbsd LDFLAGS: -L/usr/local/lib #ifndef USE_LIBSQLITE3 @@ -48,6 +47,18 @@ package sqlite3 # define SQLITE_DETERMINISTIC 0 #endif +#if defined(HAVE_PREAD64) && defined(HAVE_PWRITE64) +# undef USE_PREAD +# undef USE_PWRITE +# define USE_PREAD64 1 +# define USE_PWRITE64 1 +#elif defined(HAVE_PREAD) && defined(HAVE_PWRITE) +# undef USE_PREAD +# undef USE_PWRITE +# define USE_PREAD64 1 +# define USE_PWRITE64 1 +#endif + static int _sqlite3_open_v2(const char *filename, sqlite3 **ppDb, int flags, const char *zVfs) { #ifdef SQLITE_OPEN_URI @@ -596,10 +607,9 @@ func (c *SQLiteConn) RegisterAuthorizer(callback func(int, string, string, strin // RegisterFunc makes a Go function available as a SQLite function. // // The Go function can have arguments of the following types: any -// numeric type except complex, bool, []byte, string and -// interface{}. interface{} arguments are given the direct translation -// of the SQLite data type: int64 for INTEGER, float64 for FLOAT, -// []byte for BLOB, string for TEXT. +// numeric type except complex, bool, []byte, string and any. +// any arguments are given the direct translation of the SQLite data type: +// int64 for INTEGER, float64 for FLOAT, []byte for BLOB, string for TEXT. // // The function can additionally be variadic, as long as the type of // the variadic argument is one of the above. @@ -609,7 +619,7 @@ func (c *SQLiteConn) RegisterAuthorizer(callback func(int, string, string, strin // optimizations in its queries. // // See _example/go_custom_funcs for a detailed example. -func (c *SQLiteConn) RegisterFunc(name string, impl interface{}, pure bool) error { +func (c *SQLiteConn) RegisterFunc(name string, impl any, pure bool) error { var fi functionInfo fi.f = reflect.ValueOf(impl) t := fi.f.Type() @@ -691,7 +701,7 @@ func sqlite3CreateFunction(db *C.sqlite3, zFunctionName *C.char, nArg C.int, eTe // return an error in addition to their other return values. // // See _example/go_custom_funcs for a detailed example. -func (c *SQLiteConn) RegisterAggregator(name string, impl interface{}, pure bool) error { +func (c *SQLiteConn) RegisterAggregator(name string, impl any, pure bool) error { var ai aggInfo ai.constructor = reflect.ValueOf(impl) t := ai.constructor.Type() @@ -865,14 +875,16 @@ func (c *SQLiteConn) exec(ctx context.Context, query string, args []driver.Named // consume the number of arguments used in the current // statement and append all named arguments not // contained therein - stmtArgs = append(stmtArgs, args[start:start+na]...) - for i := range args { - if (i < start || i >= na) && args[i].Name != "" { - stmtArgs = append(stmtArgs, args[i]) + if len(args[start:start+na]) > 0 { + stmtArgs = append(stmtArgs, args[start:start+na]...) + for i := range args { + if (i < start || i >= na) && args[i].Name != "" { + stmtArgs = append(stmtArgs, args[i]) + } + } + for i := range stmtArgs { + stmtArgs[i].Ordinal = i + 1 } - } - for i := range stmtArgs { - stmtArgs[i].Ordinal = i + 1 } res, err = s.(*SQLiteStmt).exec(ctx, stmtArgs) if err != nil && err != driver.ErrSkip { @@ -965,103 +977,104 @@ func (c *SQLiteConn) begin(ctx context.Context) (driver.Tx, error) { // The argument is may be either in parentheses or it may be separated from // the pragma name by an equal sign. The two syntaxes yield identical results. // In many pragmas, the argument is a boolean. The boolean can be one of: -// 1 yes true on -// 0 no false off +// +// 1 yes true on +// 0 no false off // // You can specify a DSN string using a URI as the filename. -// test.db -// file:test.db?cache=shared&mode=memory -// :memory: -// file::memory: // -// mode -// Access mode of the database. -// https://www.sqlite.org/c3ref/open.html -// Values: -// - ro -// - rw -// - rwc -// - memory +// test.db +// file:test.db?cache=shared&mode=memory +// :memory: +// file::memory: // -// cache -// SQLite Shared-Cache Mode -// https://www.sqlite.org/sharedcache.html -// Values: -// - shared -// - private +// mode +// Access mode of the database. +// https://www.sqlite.org/c3ref/open.html +// Values: +// - ro +// - rw +// - rwc +// - memory // -// immutable=Boolean -// The immutable parameter is a boolean query parameter that indicates -// that the database file is stored on read-only media. When immutable is set, -// SQLite assumes that the database file cannot be changed, -// even by a process with higher privilege, -// and so the database is opened read-only and all locking and change detection is disabled. -// Caution: Setting the immutable property on a database file that -// does in fact change can result in incorrect query results and/or SQLITE_CORRUPT errors. +// cache +// SQLite Shared-Cache Mode +// https://www.sqlite.org/sharedcache.html +// Values: +// - shared +// - private // -// go-sqlite3 adds the following query parameters to those used by SQLite: -// _loc=XXX -// Specify location of time format. It's possible to specify "auto". +// immutable=Boolean +// The immutable parameter is a boolean query parameter that indicates +// that the database file is stored on read-only media. When immutable is set, +// SQLite assumes that the database file cannot be changed, +// even by a process with higher privilege, +// and so the database is opened read-only and all locking and change detection is disabled. +// Caution: Setting the immutable property on a database file that +// does in fact change can result in incorrect query results and/or SQLITE_CORRUPT errors. // -// _mutex=XXX -// Specify mutex mode. XXX can be "no", "full". +// go-sqlite3 adds the following query parameters to those used by SQLite: // -// _txlock=XXX -// Specify locking behavior for transactions. XXX can be "immediate", -// "deferred", "exclusive". +// _loc=XXX +// Specify location of time format. It's possible to specify "auto". // -// _auto_vacuum=X | _vacuum=X -// 0 | none - Auto Vacuum disabled -// 1 | full - Auto Vacuum FULL -// 2 | incremental - Auto Vacuum Incremental +// _mutex=XXX +// Specify mutex mode. XXX can be "no", "full". // -// _busy_timeout=XXX"| _timeout=XXX -// Specify value for sqlite3_busy_timeout. +// _txlock=XXX +// Specify locking behavior for transactions. XXX can be "immediate", +// "deferred", "exclusive". // -// _case_sensitive_like=Boolean | _cslike=Boolean -// https://www.sqlite.org/pragma.html#pragma_case_sensitive_like -// Default or disabled the LIKE operation is case-insensitive. -// When enabling this options behaviour of LIKE will become case-sensitive. +// _auto_vacuum=X | _vacuum=X +// 0 | none - Auto Vacuum disabled +// 1 | full - Auto Vacuum FULL +// 2 | incremental - Auto Vacuum Incremental // -// _defer_foreign_keys=Boolean | _defer_fk=Boolean -// Defer Foreign Keys until outermost transaction is committed. +// _busy_timeout=XXX"| _timeout=XXX +// Specify value for sqlite3_busy_timeout. // -// _foreign_keys=Boolean | _fk=Boolean -// Enable or disable enforcement of foreign keys. +// _case_sensitive_like=Boolean | _cslike=Boolean +// https://www.sqlite.org/pragma.html#pragma_case_sensitive_like +// Default or disabled the LIKE operation is case-insensitive. +// When enabling this options behaviour of LIKE will become case-sensitive. // -// _ignore_check_constraints=Boolean -// This pragma enables or disables the enforcement of CHECK constraints. -// The default setting is off, meaning that CHECK constraints are enforced by default. +// _defer_foreign_keys=Boolean | _defer_fk=Boolean +// Defer Foreign Keys until outermost transaction is committed. // -// _journal_mode=MODE | _journal=MODE -// Set journal mode for the databases associated with the current connection. -// https://www.sqlite.org/pragma.html#pragma_journal_mode +// _foreign_keys=Boolean | _fk=Boolean +// Enable or disable enforcement of foreign keys. // -// _locking_mode=X | _locking=X -// Sets the database connection locking-mode. -// The locking-mode is either NORMAL or EXCLUSIVE. -// https://www.sqlite.org/pragma.html#pragma_locking_mode +// _ignore_check_constraints=Boolean +// This pragma enables or disables the enforcement of CHECK constraints. +// The default setting is off, meaning that CHECK constraints are enforced by default. // -// _query_only=Boolean -// The query_only pragma prevents all changes to database files when enabled. +// _journal_mode=MODE | _journal=MODE +// Set journal mode for the databases associated with the current connection. +// https://www.sqlite.org/pragma.html#pragma_journal_mode // -// _recursive_triggers=Boolean | _rt=Boolean -// Enable or disable recursive triggers. +// _locking_mode=X | _locking=X +// Sets the database connection locking-mode. +// The locking-mode is either NORMAL or EXCLUSIVE. +// https://www.sqlite.org/pragma.html#pragma_locking_mode // -// _secure_delete=Boolean|FAST -// When secure_delete is on, SQLite overwrites deleted content with zeros. -// https://www.sqlite.org/pragma.html#pragma_secure_delete +// _query_only=Boolean +// The query_only pragma prevents all changes to database files when enabled. // -// _synchronous=X | _sync=X -// Change the setting of the "synchronous" flag. -// https://www.sqlite.org/pragma.html#pragma_synchronous +// _recursive_triggers=Boolean | _rt=Boolean +// Enable or disable recursive triggers. // -// _writable_schema=Boolean -// When this pragma is on, the SQLITE_MASTER tables in which database -// can be changed using ordinary UPDATE, INSERT, and DELETE statements. -// Warning: misuse of this pragma can easily result in a corrupt database file. +// _secure_delete=Boolean|FAST +// When secure_delete is on, SQLite overwrites deleted content with zeros. +// https://www.sqlite.org/pragma.html#pragma_secure_delete // +// _synchronous=X | _sync=X +// Change the setting of the "synchronous" flag. +// https://www.sqlite.org/pragma.html#pragma_synchronous // +// _writable_schema=Boolean +// When this pragma is on, the SQLITE_MASTER tables in which database +// can be changed using ordinary UPDATE, INSERT, and DELETE statements. +// Warning: misuse of this pragma can easily result in a corrupt database file. func (d *SQLiteDriver) Open(dsn string) (driver.Conn, error) { if C.sqlite3_threadsafe() == 0 { return nil, errors.New("sqlite library was not compiled for thread-safe operation") @@ -1895,6 +1908,7 @@ func (s *SQLiteStmt) Close() error { if rv != C.SQLITE_OK { return s.c.lastError() } + s.c = nil runtime.SetFinalizer(s, nil) return nil } @@ -2000,6 +2014,7 @@ func (s *SQLiteStmt) query(ctx context.Context, args []driver.NamedValue) (drive closed: false, ctx: ctx, } + runtime.SetFinalizer(rows, (*SQLiteRows).Close) return rows, nil } @@ -2045,6 +2060,7 @@ func (s *SQLiteStmt) exec(ctx context.Context, args []driver.NamedValue) (driver err error } resultCh := make(chan result) + defer close(resultCh) go func() { r, err := s.execSync(args) resultCh <- result{r, err} @@ -2111,6 +2127,8 @@ func (rc *SQLiteRows) Close() error { return rc.s.c.lastError() } rc.s.mu.Unlock() + rc.s = nil + runtime.SetFinalizer(rc, nil) return nil } @@ -2157,6 +2175,7 @@ func (rc *SQLiteRows) Next(dest []driver.Value) error { return rc.nextSyncLocked(dest) } resultCh := make(chan error) + defer close(resultCh) go func() { resultCh <- rc.nextSyncLocked(dest) }() diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_func_crypt.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_func_crypt.go index afd93333d..bd9a3bc09 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_func_crypt.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_func_crypt.go @@ -50,15 +50,15 @@ import ( // perhaps using a cryptographic hash function like SHA1. // CryptEncoderSHA1 encodes a password with SHA1 -func CryptEncoderSHA1(pass []byte, hash interface{}) []byte { +func CryptEncoderSHA1(pass []byte, hash any) []byte { h := sha1.Sum(pass) return h[:] } // CryptEncoderSSHA1 encodes a password with SHA1 with the // configured salt. -func CryptEncoderSSHA1(salt string) func(pass []byte, hash interface{}) []byte { - return func(pass []byte, hash interface{}) []byte { +func CryptEncoderSSHA1(salt string) func(pass []byte, hash any) []byte { + return func(pass []byte, hash any) []byte { s := []byte(salt) p := append(pass, s...) h := sha1.Sum(p) @@ -67,15 +67,15 @@ func CryptEncoderSSHA1(salt string) func(pass []byte, hash interface{}) []byte { } // CryptEncoderSHA256 encodes a password with SHA256 -func CryptEncoderSHA256(pass []byte, hash interface{}) []byte { +func CryptEncoderSHA256(pass []byte, hash any) []byte { h := sha256.Sum256(pass) return h[:] } // CryptEncoderSSHA256 encodes a password with SHA256 // with the configured salt -func CryptEncoderSSHA256(salt string) func(pass []byte, hash interface{}) []byte { - return func(pass []byte, hash interface{}) []byte { +func CryptEncoderSSHA256(salt string) func(pass []byte, hash any) []byte { + return func(pass []byte, hash any) []byte { s := []byte(salt) p := append(pass, s...) h := sha256.Sum256(p) @@ -84,15 +84,15 @@ func CryptEncoderSSHA256(salt string) func(pass []byte, hash interface{}) []byte } // CryptEncoderSHA384 encodes a password with SHA384 -func CryptEncoderSHA384(pass []byte, hash interface{}) []byte { +func CryptEncoderSHA384(pass []byte, hash any) []byte { h := sha512.Sum384(pass) return h[:] } // CryptEncoderSSHA384 encodes a password with SHA384 // with the configured salt -func CryptEncoderSSHA384(salt string) func(pass []byte, hash interface{}) []byte { - return func(pass []byte, hash interface{}) []byte { +func CryptEncoderSSHA384(salt string) func(pass []byte, hash any) []byte { + return func(pass []byte, hash any) []byte { s := []byte(salt) p := append(pass, s...) h := sha512.Sum384(p) @@ -101,15 +101,15 @@ func CryptEncoderSSHA384(salt string) func(pass []byte, hash interface{}) []byte } // CryptEncoderSHA512 encodes a password with SHA512 -func CryptEncoderSHA512(pass []byte, hash interface{}) []byte { +func CryptEncoderSHA512(pass []byte, hash any) []byte { h := sha512.Sum512(pass) return h[:] } // CryptEncoderSSHA512 encodes a password with SHA512 // with the configured salt -func CryptEncoderSSHA512(salt string) func(pass []byte, hash interface{}) []byte { - return func(pass []byte, hash interface{}) []byte { +func CryptEncoderSSHA512(salt string) func(pass []byte, hash any) []byte { + return func(pass []byte, hash any) []byte { s := []byte(salt) p := append(pass, s...) h := sha512.Sum512(p) diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_go18.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_go18.go index 514fd7ecb..34cad08e4 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_go18.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_go18.go @@ -3,8 +3,8 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. -// +build cgo -// +build go1.8 +//go:build cgo && go1.8 +// +build cgo,go1.8 package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_libsqlite3.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_libsqlite3.go index ac609c956..95cc7c0b6 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_libsqlite3.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_libsqlite3.go @@ -3,6 +3,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build libsqlite3 // +build libsqlite3 package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_load_extension.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_load_extension.go index 9433fea82..03cbc8b68 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_load_extension.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_load_extension.go @@ -3,6 +3,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build !sqlite_omit_load_extension // +build !sqlite_omit_load_extension package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_load_extension_omit.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_load_extension_omit.go index 8c75f9bda..d4f8ce651 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_load_extension_omit.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_load_extension_omit.go @@ -3,6 +3,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build sqlite_omit_load_extension // +build sqlite_omit_load_extension package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_allow_uri_authority.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_allow_uri_authority.go index 8c4d4d20d..51240cbf6 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_allow_uri_authority.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_allow_uri_authority.go @@ -4,6 +4,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build sqlite_allow_uri_authority // +build sqlite_allow_uri_authority package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_app_armor.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_app_armor.go index 63c80cfea..565dbc298 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_app_armor.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_app_armor.go @@ -4,8 +4,8 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. -// +build !windows -// +build sqlite_app_armor +//go:build !windows && sqlite_app_armor +// +build !windows,sqlite_app_armor package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_column_metadata.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_column_metadata.go index c67fa82b1..63659b46b 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_column_metadata.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_column_metadata.go @@ -1,3 +1,4 @@ +//go:build sqlite_column_metadata // +build sqlite_column_metadata package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_foreign_keys.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_foreign_keys.go index a676e097a..82c944e1b 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_foreign_keys.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_foreign_keys.go @@ -4,6 +4,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build sqlite_foreign_keys // +build sqlite_foreign_keys package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_fts5.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_fts5.go index 0f38df75d..2645f284b 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_fts5.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_fts5.go @@ -3,6 +3,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build sqlite_fts5 || fts5 // +build sqlite_fts5 fts5 package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_icu.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_icu.go index f82bdd0d4..2d47827be 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_icu.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_icu.go @@ -3,6 +3,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build sqlite_icu || icu // +build sqlite_icu icu package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_introspect.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_introspect.go index 6512b2b3b..cd2e54011 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_introspect.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_introspect.go @@ -4,6 +4,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build sqlite_introspect // +build sqlite_introspect package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_math_functions.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_math_functions.go index 7cd68d3f5..bd62d9a2a 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_math_functions.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_math_functions.go @@ -3,6 +3,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build sqlite_math_functions // +build sqlite_math_functions package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_preupdate.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_preupdate.go index cea032e3e..ed725eeb9 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_preupdate.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_preupdate.go @@ -4,6 +4,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build cgo // +build cgo package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_preupdate_hook.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_preupdate_hook.go index b43e4821b..8cce278fd 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_preupdate_hook.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_preupdate_hook.go @@ -4,6 +4,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build sqlite_preupdate_hook // +build sqlite_preupdate_hook package sqlite3 @@ -54,10 +55,10 @@ func (d *SQLitePreUpdateData) Count() int { return int(C.sqlite3_preupdate_count(d.Conn.db)) } -func (d *SQLitePreUpdateData) row(dest []interface{}, new bool) error { +func (d *SQLitePreUpdateData) row(dest []any, new bool) error { for i := 0; i < d.Count() && i < len(dest); i++ { var val *C.sqlite3_value - var src interface{} + var src any // Initially I tried making this just a function pointer argument, but // it's absurdly complicated to pass C function pointers. @@ -95,7 +96,7 @@ func (d *SQLitePreUpdateData) row(dest []interface{}, new bool) error { // Old populates dest with the row data to be replaced. This works similar to // database/sql's Rows.Scan() -func (d *SQLitePreUpdateData) Old(dest ...interface{}) error { +func (d *SQLitePreUpdateData) Old(dest ...any) error { if d.Op == SQLITE_INSERT { return errors.New("There is no old row for INSERT operations") } @@ -104,7 +105,7 @@ func (d *SQLitePreUpdateData) Old(dest ...interface{}) error { // New populates dest with the replacement row data. This works similar to // database/sql's Rows.Scan() -func (d *SQLitePreUpdateData) New(dest ...interface{}) error { +func (d *SQLitePreUpdateData) New(dest ...any) error { if d.Op == SQLITE_DELETE { return errors.New("There is no new row for DELETE operations") } diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_preupdate_omit.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_preupdate_omit.go index c510a15b4..f60da6c16 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_preupdate_omit.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_preupdate_omit.go @@ -4,6 +4,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build !sqlite_preupdate_hook && cgo // +build !sqlite_preupdate_hook,cgo package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_secure_delete.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_secure_delete.go index 934fa6b8e..6bb05b843 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_secure_delete.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_secure_delete.go @@ -4,6 +4,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build sqlite_secure_delete // +build sqlite_secure_delete package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_secure_delete_fast.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_secure_delete_fast.go index b0de130ff..982020aeb 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_secure_delete_fast.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_secure_delete_fast.go @@ -4,6 +4,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build sqlite_secure_delete_fast // +build sqlite_secure_delete_fast package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_serialize.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_serialize.go index 2560c43a7..f1710c1c3 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_serialize.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_serialize.go @@ -1,3 +1,4 @@ +//go:build !libsqlite3 || sqlite_serialize // +build !libsqlite3 sqlite_serialize package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_serialize_omit.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_serialize_omit.go index b154dd34d..d00ead0b6 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_serialize_omit.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_serialize_omit.go @@ -1,3 +1,4 @@ +//go:build libsqlite3 && !sqlite_serialize // +build libsqlite3,!sqlite_serialize package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_stat4.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_stat4.go index d4d30f0dd..799fbb0fc 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_stat4.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_stat4.go @@ -4,6 +4,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build sqlite_stat4 // +build sqlite_stat4 package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.go index adfa26c54..76f7bbfb6 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_unlock_notify.go @@ -3,8 +3,8 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. -// +build cgo -// +build sqlite_unlock_notify +//go:build cgo && sqlite_unlock_notify +// +build cgo,sqlite_unlock_notify package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth.go index b62b60840..de9630c27 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth.go @@ -3,6 +3,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build sqlite_userauth // +build sqlite_userauth package sqlite3 @@ -79,7 +80,7 @@ var ( // If a database contains the SQLITE_USER table, then the // call to Authenticate must be invoked with an // appropriate username and password prior to enable read and write -//access to the database. +// access to the database. // // Return SQLITE_OK on success or SQLITE_ERROR if the username/password // combination is incorrect or unknown. @@ -103,9 +104,10 @@ func (c *SQLiteConn) Authenticate(username, password string) error { // It is however exported for usage within SQL by the user. // // Returns: -// C.SQLITE_OK (0) -// C.SQLITE_ERROR (1) -// C.SQLITE_AUTH (23) +// +// C.SQLITE_OK (0) +// C.SQLITE_ERROR (1) +// C.SQLITE_AUTH (23) func (c *SQLiteConn) authenticate(username, password string) int { // Allocate C Variables cuser := C.CString(username) @@ -155,9 +157,10 @@ func (c *SQLiteConn) AuthUserAdd(username, password string, admin bool) error { // It is however exported for usage within SQL by the user. // // Returns: -// C.SQLITE_OK (0) -// C.SQLITE_ERROR (1) -// C.SQLITE_AUTH (23) +// +// C.SQLITE_OK (0) +// C.SQLITE_ERROR (1) +// C.SQLITE_AUTH (23) func (c *SQLiteConn) authUserAdd(username, password string, admin int) int { // Allocate C Variables cuser := C.CString(username) @@ -207,9 +210,10 @@ func (c *SQLiteConn) AuthUserChange(username, password string, admin bool) error // It is however exported for usage within SQL by the user. // // Returns: -// C.SQLITE_OK (0) -// C.SQLITE_ERROR (1) -// C.SQLITE_AUTH (23) +// +// C.SQLITE_OK (0) +// C.SQLITE_ERROR (1) +// C.SQLITE_AUTH (23) func (c *SQLiteConn) authUserChange(username, password string, admin int) int { // Allocate C Variables cuser := C.CString(username) @@ -249,9 +253,10 @@ func (c *SQLiteConn) AuthUserDelete(username string) error { // It is however exported for usage within SQL by the user. // // Returns: -// C.SQLITE_OK (0) -// C.SQLITE_ERROR (1) -// C.SQLITE_AUTH (23) +// +// C.SQLITE_OK (0) +// C.SQLITE_ERROR (1) +// C.SQLITE_AUTH (23) func (c *SQLiteConn) authUserDelete(username string) int { // Allocate C Variables cuser := C.CString(username) @@ -280,8 +285,9 @@ func (c *SQLiteConn) AuthEnabled() (exists bool) { // It is however exported for usage within SQL by the user. // // Returns: -// 0 - Disabled -// 1 - Enabled +// +// 0 - Disabled +// 1 - Enabled func (c *SQLiteConn) authEnabled() int { return int(C._sqlite3_auth_enabled(c.db)) } diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth_omit.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth_omit.go index 302cd57a9..15370df70 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth_omit.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth_omit.go @@ -3,6 +3,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build !sqlite_userauth // +build !sqlite_userauth package sqlite3 @@ -17,7 +18,7 @@ import ( // If a database contains the SQLITE_USER table, then the // call to Authenticate must be invoked with an // appropriate username and password prior to enable read and write -//access to the database. +// access to the database. // // Return SQLITE_OK on success or SQLITE_ERROR if the username/password // combination is incorrect or unknown. @@ -34,9 +35,10 @@ func (c *SQLiteConn) Authenticate(username, password string) error { // It is however exported for usage within SQL by the user. // // Returns: -// C.SQLITE_OK (0) -// C.SQLITE_ERROR (1) -// C.SQLITE_AUTH (23) +// +// C.SQLITE_OK (0) +// C.SQLITE_ERROR (1) +// C.SQLITE_AUTH (23) func (c *SQLiteConn) authenticate(username, password string) int { // NOOP return 0 @@ -65,9 +67,10 @@ func (c *SQLiteConn) AuthUserAdd(username, password string, admin bool) error { // It is however exported for usage within SQL by the user. // // Returns: -// C.SQLITE_OK (0) -// C.SQLITE_ERROR (1) -// C.SQLITE_AUTH (23) +// +// C.SQLITE_OK (0) +// C.SQLITE_ERROR (1) +// C.SQLITE_AUTH (23) func (c *SQLiteConn) authUserAdd(username, password string, admin int) int { // NOOP return 0 @@ -96,9 +99,10 @@ func (c *SQLiteConn) AuthUserChange(username, password string, admin bool) error // It is however exported for usage within SQL by the user. // // Returns: -// C.SQLITE_OK (0) -// C.SQLITE_ERROR (1) -// C.SQLITE_AUTH (23) +// +// C.SQLITE_OK (0) +// C.SQLITE_ERROR (1) +// C.SQLITE_AUTH (23) func (c *SQLiteConn) authUserChange(username, password string, admin int) int { // NOOP return 0 @@ -122,9 +126,10 @@ func (c *SQLiteConn) AuthUserDelete(username string) error { // It is however exported for usage within SQL by the user. // // Returns: -// C.SQLITE_OK (0) -// C.SQLITE_ERROR (1) -// C.SQLITE_AUTH (23) +// +// C.SQLITE_OK (0) +// C.SQLITE_ERROR (1) +// C.SQLITE_AUTH (23) func (c *SQLiteConn) authUserDelete(username string) int { // NOOP return 0 @@ -142,8 +147,9 @@ func (c *SQLiteConn) AuthEnabled() (exists bool) { // It is however exported for usage within SQL by the user. // // Returns: -// 0 - Disabled -// 1 - Enabled +// +// 0 - Disabled +// 1 - Enabled func (c *SQLiteConn) authEnabled() int { // NOOP return 0 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_vacuum_full.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_vacuum_full.go index 5185a96d0..df13c9d2d 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_vacuum_full.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_vacuum_full.go @@ -4,6 +4,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build sqlite_vacuum_full // +build sqlite_vacuum_full package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_vacuum_incr.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_vacuum_incr.go index a9d8a185c..a2e48814b 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_vacuum_incr.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_vacuum_incr.go @@ -4,6 +4,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build sqlite_vacuum_incr // +build sqlite_vacuum_incr package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_vtable.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_vtable.go index 4a93c4652..9b164b3e0 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_vtable.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_vtable.go @@ -3,6 +3,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build sqlite_vtable || vtable // +build sqlite_vtable vtable package sqlite3 @@ -516,7 +517,7 @@ func goMDestroy(pClientData unsafe.Pointer) { func goVFilter(pCursor unsafe.Pointer, idxNum C.int, idxName *C.char, argc C.int, argv **C.sqlite3_value) *C.char { vtc := lookupHandle(pCursor).(*sqliteVTabCursor) args := (*[(math.MaxInt32 - 1) / unsafe.Sizeof((*C.sqlite3_value)(nil))]*C.sqlite3_value)(unsafe.Pointer(argv))[:argc:argc] - vals := make([]interface{}, 0, argc) + vals := make([]any, 0, argc) for _, v := range args { conv, err := callbackArgGeneric(v) if err != nil { @@ -588,7 +589,7 @@ func goVUpdate(pVTab unsafe.Pointer, argc C.int, argv **C.sqlite3_value, pRowid if v, ok := vt.vTab.(VTabUpdater); ok { // convert argv args := (*[(math.MaxInt32 - 1) / unsafe.Sizeof((*C.sqlite3_value)(nil))]*C.sqlite3_value)(unsafe.Pointer(argv))[:argc:argc] - vals := make([]interface{}, 0, argc) + vals := make([]any, 0, argc) for _, v := range args { conv, err := callbackArgGeneric(v) if err != nil { @@ -662,9 +663,9 @@ type VTab interface { // deleted. // See: https://sqlite.org/vtab.html#xupdate type VTabUpdater interface { - Delete(interface{}) error - Insert(interface{}, []interface{}) (int64, error) - Update(interface{}, []interface{}) error + Delete(any) error + Insert(any, []any) (int64, error) + Update(any, []any) error } // VTabCursor describes cursors that point into the virtual table and are used @@ -673,7 +674,7 @@ type VTabCursor interface { // http://sqlite.org/vtab.html#xclose Close() error // http://sqlite.org/vtab.html#xfilter - Filter(idxNum int, idxStr string, vals []interface{}) error + Filter(idxNum int, idxStr string, vals []any) error // http://sqlite.org/vtab.html#xnext Next() error // http://sqlite.org/vtab.html#xeof diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_other.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_other.go index 077d3c644..1f9a75506 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_other.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_other.go @@ -3,6 +3,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build !windows // +build !windows package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_solaris.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_solaris.go index 102f90c9a..fb4d32517 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_solaris.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_solaris.go @@ -3,6 +3,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build solaris // +build solaris package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_trace.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_trace.go index 56bb91490..6c47cce19 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_trace.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_trace.go @@ -3,6 +3,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build sqlite_trace || trace // +build sqlite_trace trace package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_type.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_type.go index 0fd8210bb..20537a097 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_type.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_type.go @@ -74,7 +74,7 @@ func scanType(cdt string) reflect.Type { case SQLITE_TIME: return reflect.TypeOf(sql.NullTime{}) } - return reflect.TypeOf(new(interface{})) + return reflect.TypeOf(new(any)) } func databaseTypeConvSqlite(t string) int { diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_usleep_windows.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_usleep_windows.go index b6739bf62..6527f6fd9 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_usleep_windows.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_usleep_windows.go @@ -3,6 +3,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build cgo // +build cgo package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_windows.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_windows.go index 81aa2abde..f863bcd36 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_windows.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_windows.go @@ -3,6 +3,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build windows // +build windows package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/static_mock.go b/vendor/github.com/mattn/go-sqlite3/static_mock.go index f19e842ff..d2c5a2760 100644 --- a/vendor/github.com/mattn/go-sqlite3/static_mock.go +++ b/vendor/github.com/mattn/go-sqlite3/static_mock.go @@ -3,6 +3,7 @@ // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file. +//go:build !cgo // +build !cgo package sqlite3 @@ -28,10 +29,10 @@ type ( ) func (SQLiteDriver) Open(s string) (driver.Conn, error) { return nil, errorMsg } -func (c *SQLiteConn) RegisterAggregator(string, interface{}, bool) error { return errorMsg } +func (c *SQLiteConn) RegisterAggregator(string, any, bool) error { return errorMsg } func (c *SQLiteConn) RegisterAuthorizer(func(int, string, string, string) int) {} func (c *SQLiteConn) RegisterCollation(string, func(string, string) int) error { return errorMsg } func (c *SQLiteConn) RegisterCommitHook(func() int) {} -func (c *SQLiteConn) RegisterFunc(string, interface{}, bool) error { return errorMsg } +func (c *SQLiteConn) RegisterFunc(string, any, bool) error { return errorMsg } func (c *SQLiteConn) RegisterRollbackHook(func()) {} func (c *SQLiteConn) RegisterUpdateHook(func(int, string, string, int64)) {} diff --git a/vendor/k8s.io/kubernetes/LICENSE b/vendor/github.com/moby/sys/user/LICENSE similarity index 100% rename from vendor/k8s.io/kubernetes/LICENSE rename to vendor/github.com/moby/sys/user/LICENSE diff --git a/vendor/github.com/moby/sys/user/lookup_unix.go b/vendor/github.com/moby/sys/user/lookup_unix.go new file mode 100644 index 000000000..f95c1409f --- /dev/null +++ b/vendor/github.com/moby/sys/user/lookup_unix.go @@ -0,0 +1,157 @@ +//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package user + +import ( + "io" + "os" + "strconv" + + "golang.org/x/sys/unix" +) + +// Unix-specific path to the passwd and group formatted files. +const ( + unixPasswdPath = "/etc/passwd" + unixGroupPath = "/etc/group" +) + +// LookupUser looks up a user by their username in /etc/passwd. If the user +// cannot be found (or there is no /etc/passwd file on the filesystem), then +// LookupUser returns an error. +func LookupUser(username string) (User, error) { + return lookupUserFunc(func(u User) bool { + return u.Name == username + }) +} + +// LookupUid looks up a user by their user id in /etc/passwd. If the user cannot +// be found (or there is no /etc/passwd file on the filesystem), then LookupId +// returns an error. +func LookupUid(uid int) (User, error) { + return lookupUserFunc(func(u User) bool { + return u.Uid == uid + }) +} + +func lookupUserFunc(filter func(u User) bool) (User, error) { + // Get operating system-specific passwd reader-closer. + passwd, err := GetPasswd() + if err != nil { + return User{}, err + } + defer passwd.Close() + + // Get the users. + users, err := ParsePasswdFilter(passwd, filter) + if err != nil { + return User{}, err + } + + // No user entries found. + if len(users) == 0 { + return User{}, ErrNoPasswdEntries + } + + // Assume the first entry is the "correct" one. + return users[0], nil +} + +// LookupGroup looks up a group by its name in /etc/group. If the group cannot +// be found (or there is no /etc/group file on the filesystem), then LookupGroup +// returns an error. +func LookupGroup(groupname string) (Group, error) { + return lookupGroupFunc(func(g Group) bool { + return g.Name == groupname + }) +} + +// LookupGid looks up a group by its group id in /etc/group. If the group cannot +// be found (or there is no /etc/group file on the filesystem), then LookupGid +// returns an error. +func LookupGid(gid int) (Group, error) { + return lookupGroupFunc(func(g Group) bool { + return g.Gid == gid + }) +} + +func lookupGroupFunc(filter func(g Group) bool) (Group, error) { + // Get operating system-specific group reader-closer. + group, err := GetGroup() + if err != nil { + return Group{}, err + } + defer group.Close() + + // Get the users. + groups, err := ParseGroupFilter(group, filter) + if err != nil { + return Group{}, err + } + + // No user entries found. + if len(groups) == 0 { + return Group{}, ErrNoGroupEntries + } + + // Assume the first entry is the "correct" one. + return groups[0], nil +} + +func GetPasswdPath() (string, error) { + return unixPasswdPath, nil +} + +func GetPasswd() (io.ReadCloser, error) { + return os.Open(unixPasswdPath) +} + +func GetGroupPath() (string, error) { + return unixGroupPath, nil +} + +func GetGroup() (io.ReadCloser, error) { + return os.Open(unixGroupPath) +} + +// CurrentUser looks up the current user by their user id in /etc/passwd. If the +// user cannot be found (or there is no /etc/passwd file on the filesystem), +// then CurrentUser returns an error. +func CurrentUser() (User, error) { + return LookupUid(unix.Getuid()) +} + +// CurrentGroup looks up the current user's group by their primary group id's +// entry in /etc/passwd. If the group cannot be found (or there is no +// /etc/group file on the filesystem), then CurrentGroup returns an error. +func CurrentGroup() (Group, error) { + return LookupGid(unix.Getgid()) +} + +func currentUserSubIDs(fileName string) ([]SubID, error) { + u, err := CurrentUser() + if err != nil { + return nil, err + } + filter := func(entry SubID) bool { + return entry.Name == u.Name || entry.Name == strconv.Itoa(u.Uid) + } + return ParseSubIDFileFilter(fileName, filter) +} + +func CurrentUserSubUIDs() ([]SubID, error) { + return currentUserSubIDs("/etc/subuid") +} + +func CurrentUserSubGIDs() ([]SubID, error) { + return currentUserSubIDs("/etc/subgid") +} + +func CurrentProcessUIDMap() ([]IDMap, error) { + return ParseIDMapFile("/proc/self/uid_map") +} + +func CurrentProcessGIDMap() ([]IDMap, error) { + return ParseIDMapFile("/proc/self/gid_map") +} diff --git a/vendor/github.com/moby/sys/user/user.go b/vendor/github.com/moby/sys/user/user.go new file mode 100644 index 000000000..984466d1a --- /dev/null +++ b/vendor/github.com/moby/sys/user/user.go @@ -0,0 +1,605 @@ +package user + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +const ( + minID = 0 + maxID = 1<<31 - 1 // for 32-bit systems compatibility +) + +var ( + // ErrNoPasswdEntries is returned if no matching entries were found in /etc/group. + ErrNoPasswdEntries = errors.New("no matching entries in passwd file") + // ErrNoGroupEntries is returned if no matching entries were found in /etc/passwd. + ErrNoGroupEntries = errors.New("no matching entries in group file") + // ErrRange is returned if a UID or GID is outside of the valid range. + ErrRange = fmt.Errorf("uids and gids must be in range %d-%d", minID, maxID) +) + +type User struct { + Name string + Pass string + Uid int + Gid int + Gecos string + Home string + Shell string +} + +type Group struct { + Name string + Pass string + Gid int + List []string +} + +// SubID represents an entry in /etc/sub{u,g}id +type SubID struct { + Name string + SubID int64 + Count int64 +} + +// IDMap represents an entry in /proc/PID/{u,g}id_map +type IDMap struct { + ID int64 + ParentID int64 + Count int64 +} + +func parseLine(line []byte, v ...interface{}) { + parseParts(bytes.Split(line, []byte(":")), v...) +} + +func parseParts(parts [][]byte, v ...interface{}) { + if len(parts) == 0 { + return + } + + for i, p := range parts { + // Ignore cases where we don't have enough fields to populate the arguments. + // Some configuration files like to misbehave. + if len(v) <= i { + break + } + + // Use the type of the argument to figure out how to parse it, scanf() style. + // This is legit. + switch e := v[i].(type) { + case *string: + *e = string(p) + case *int: + // "numbers", with conversion errors ignored because of some misbehaving configuration files. + *e, _ = strconv.Atoi(string(p)) + case *int64: + *e, _ = strconv.ParseInt(string(p), 10, 64) + case *[]string: + // Comma-separated lists. + if len(p) != 0 { + *e = strings.Split(string(p), ",") + } else { + *e = []string{} + } + default: + // Someone goof'd when writing code using this function. Scream so they can hear us. + panic(fmt.Sprintf("parseLine only accepts {*string, *int, *int64, *[]string} as arguments! %#v is not a pointer!", e)) + } + } +} + +func ParsePasswdFile(path string) ([]User, error) { + passwd, err := os.Open(path) + if err != nil { + return nil, err + } + defer passwd.Close() + return ParsePasswd(passwd) +} + +func ParsePasswd(passwd io.Reader) ([]User, error) { + return ParsePasswdFilter(passwd, nil) +} + +func ParsePasswdFileFilter(path string, filter func(User) bool) ([]User, error) { + passwd, err := os.Open(path) + if err != nil { + return nil, err + } + defer passwd.Close() + return ParsePasswdFilter(passwd, filter) +} + +func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) { + if r == nil { + return nil, errors.New("nil source for passwd-formatted data") + } + + var ( + s = bufio.NewScanner(r) + out = []User{} + ) + + for s.Scan() { + line := bytes.TrimSpace(s.Bytes()) + if len(line) == 0 { + continue + } + + // see: man 5 passwd + // name:password:UID:GID:GECOS:directory:shell + // Name:Pass:Uid:Gid:Gecos:Home:Shell + // root:x:0:0:root:/root:/bin/bash + // adm:x:3:4:adm:/var/adm:/bin/false + p := User{} + parseLine(line, &p.Name, &p.Pass, &p.Uid, &p.Gid, &p.Gecos, &p.Home, &p.Shell) + + if filter == nil || filter(p) { + out = append(out, p) + } + } + if err := s.Err(); err != nil { + return nil, err + } + + return out, nil +} + +func ParseGroupFile(path string) ([]Group, error) { + group, err := os.Open(path) + if err != nil { + return nil, err + } + + defer group.Close() + return ParseGroup(group) +} + +func ParseGroup(group io.Reader) ([]Group, error) { + return ParseGroupFilter(group, nil) +} + +func ParseGroupFileFilter(path string, filter func(Group) bool) ([]Group, error) { + group, err := os.Open(path) + if err != nil { + return nil, err + } + defer group.Close() + return ParseGroupFilter(group, filter) +} + +func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) { + if r == nil { + return nil, errors.New("nil source for group-formatted data") + } + rd := bufio.NewReader(r) + out := []Group{} + + // Read the file line-by-line. + for { + var ( + isPrefix bool + wholeLine []byte + err error + ) + + // Read the next line. We do so in chunks (as much as reader's + // buffer is able to keep), check if we read enough columns + // already on each step and store final result in wholeLine. + for { + var line []byte + line, isPrefix, err = rd.ReadLine() + + if err != nil { + // We should return no error if EOF is reached + // without a match. + if err == io.EOF { + err = nil + } + return out, err + } + + // Simple common case: line is short enough to fit in a + // single reader's buffer. + if !isPrefix && len(wholeLine) == 0 { + wholeLine = line + break + } + + wholeLine = append(wholeLine, line...) + + // Check if we read the whole line already. + if !isPrefix { + break + } + } + + // There's no spec for /etc/passwd or /etc/group, but we try to follow + // the same rules as the glibc parser, which allows comments and blank + // space at the beginning of a line. + wholeLine = bytes.TrimSpace(wholeLine) + if len(wholeLine) == 0 || wholeLine[0] == '#' { + continue + } + + // see: man 5 group + // group_name:password:GID:user_list + // Name:Pass:Gid:List + // root:x:0:root + // adm:x:4:root,adm,daemon + p := Group{} + parseLine(wholeLine, &p.Name, &p.Pass, &p.Gid, &p.List) + + if filter == nil || filter(p) { + out = append(out, p) + } + } +} + +type ExecUser struct { + Uid int + Gid int + Sgids []int + Home string +} + +// GetExecUserPath is a wrapper for GetExecUser. It reads data from each of the +// given file paths and uses that data as the arguments to GetExecUser. If the +// files cannot be opened for any reason, the error is ignored and a nil +// io.Reader is passed instead. +func GetExecUserPath(userSpec string, defaults *ExecUser, passwdPath, groupPath string) (*ExecUser, error) { + var passwd, group io.Reader + + if passwdFile, err := os.Open(passwdPath); err == nil { + passwd = passwdFile + defer passwdFile.Close() + } + + if groupFile, err := os.Open(groupPath); err == nil { + group = groupFile + defer groupFile.Close() + } + + return GetExecUser(userSpec, defaults, passwd, group) +} + +// GetExecUser parses a user specification string (using the passwd and group +// readers as sources for /etc/passwd and /etc/group data, respectively). In +// the case of blank fields or missing data from the sources, the values in +// defaults is used. +// +// GetExecUser will return an error if a user or group literal could not be +// found in any entry in passwd and group respectively. +// +// Examples of valid user specifications are: +// - "" +// - "user" +// - "uid" +// - "user:group" +// - "uid:gid +// - "user:gid" +// - "uid:group" +// +// It should be noted that if you specify a numeric user or group id, they will +// not be evaluated as usernames (only the metadata will be filled). So attempting +// to parse a user with user.Name = "1337" will produce the user with a UID of +// 1337. +func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (*ExecUser, error) { + if defaults == nil { + defaults = new(ExecUser) + } + + // Copy over defaults. + user := &ExecUser{ + Uid: defaults.Uid, + Gid: defaults.Gid, + Sgids: defaults.Sgids, + Home: defaults.Home, + } + + // Sgids slice *cannot* be nil. + if user.Sgids == nil { + user.Sgids = []int{} + } + + // Allow for userArg to have either "user" syntax, or optionally "user:group" syntax + var userArg, groupArg string + parseLine([]byte(userSpec), &userArg, &groupArg) + + // Convert userArg and groupArg to be numeric, so we don't have to execute + // Atoi *twice* for each iteration over lines. + uidArg, uidErr := strconv.Atoi(userArg) + gidArg, gidErr := strconv.Atoi(groupArg) + + // Find the matching user. + users, err := ParsePasswdFilter(passwd, func(u User) bool { + if userArg == "" { + // Default to current state of the user. + return u.Uid == user.Uid + } + + if uidErr == nil { + // If the userArg is numeric, always treat it as a UID. + return uidArg == u.Uid + } + + return u.Name == userArg + }) + + // If we can't find the user, we have to bail. + if err != nil && passwd != nil { + if userArg == "" { + userArg = strconv.Itoa(user.Uid) + } + return nil, fmt.Errorf("unable to find user %s: %w", userArg, err) + } + + var matchedUserName string + if len(users) > 0 { + // First match wins, even if there's more than one matching entry. + matchedUserName = users[0].Name + user.Uid = users[0].Uid + user.Gid = users[0].Gid + user.Home = users[0].Home + } else if userArg != "" { + // If we can't find a user with the given username, the only other valid + // option is if it's a numeric username with no associated entry in passwd. + + if uidErr != nil { + // Not numeric. + return nil, fmt.Errorf("unable to find user %s: %w", userArg, ErrNoPasswdEntries) + } + user.Uid = uidArg + + // Must be inside valid uid range. + if user.Uid < minID || user.Uid > maxID { + return nil, ErrRange + } + + // Okay, so it's numeric. We can just roll with this. + } + + // On to the groups. If we matched a username, we need to do this because of + // the supplementary group IDs. + if groupArg != "" || matchedUserName != "" { + groups, err := ParseGroupFilter(group, func(g Group) bool { + // If the group argument isn't explicit, we'll just search for it. + if groupArg == "" { + // Check if user is a member of this group. + for _, u := range g.List { + if u == matchedUserName { + return true + } + } + return false + } + + if gidErr == nil { + // If the groupArg is numeric, always treat it as a GID. + return gidArg == g.Gid + } + + return g.Name == groupArg + }) + if err != nil && group != nil { + return nil, fmt.Errorf("unable to find groups for spec %v: %w", matchedUserName, err) + } + + // Only start modifying user.Gid if it is in explicit form. + if groupArg != "" { + if len(groups) > 0 { + // First match wins, even if there's more than one matching entry. + user.Gid = groups[0].Gid + } else { + // If we can't find a group with the given name, the only other valid + // option is if it's a numeric group name with no associated entry in group. + + if gidErr != nil { + // Not numeric. + return nil, fmt.Errorf("unable to find group %s: %w", groupArg, ErrNoGroupEntries) + } + user.Gid = gidArg + + // Must be inside valid gid range. + if user.Gid < minID || user.Gid > maxID { + return nil, ErrRange + } + + // Okay, so it's numeric. We can just roll with this. + } + } else if len(groups) > 0 { + // Supplementary group ids only make sense if in the implicit form. + user.Sgids = make([]int, len(groups)) + for i, group := range groups { + user.Sgids[i] = group.Gid + } + } + } + + return user, nil +} + +// GetAdditionalGroups looks up a list of groups by name or group id +// against the given /etc/group formatted data. If a group name cannot +// be found, an error will be returned. If a group id cannot be found, +// or the given group data is nil, the id will be returned as-is +// provided it is in the legal range. +func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, error) { + groups := []Group{} + if group != nil { + var err error + groups, err = ParseGroupFilter(group, func(g Group) bool { + for _, ag := range additionalGroups { + if g.Name == ag || strconv.Itoa(g.Gid) == ag { + return true + } + } + return false + }) + if err != nil { + return nil, fmt.Errorf("Unable to find additional groups %v: %w", additionalGroups, err) + } + } + + gidMap := make(map[int]struct{}) + for _, ag := range additionalGroups { + var found bool + for _, g := range groups { + // if we found a matched group either by name or gid, take the + // first matched as correct + if g.Name == ag || strconv.Itoa(g.Gid) == ag { + if _, ok := gidMap[g.Gid]; !ok { + gidMap[g.Gid] = struct{}{} + found = true + break + } + } + } + // we asked for a group but didn't find it. let's check to see + // if we wanted a numeric group + if !found { + gid, err := strconv.ParseInt(ag, 10, 64) + if err != nil { + // Not a numeric ID either. + return nil, fmt.Errorf("Unable to find group %s: %w", ag, ErrNoGroupEntries) + } + // Ensure gid is inside gid range. + if gid < minID || gid > maxID { + return nil, ErrRange + } + gidMap[int(gid)] = struct{}{} + } + } + gids := []int{} + for gid := range gidMap { + gids = append(gids, gid) + } + return gids, nil +} + +// GetAdditionalGroupsPath is a wrapper around GetAdditionalGroups +// that opens the groupPath given and gives it as an argument to +// GetAdditionalGroups. +func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int, error) { + var group io.Reader + + if groupFile, err := os.Open(groupPath); err == nil { + group = groupFile + defer groupFile.Close() + } + return GetAdditionalGroups(additionalGroups, group) +} + +func ParseSubIDFile(path string) ([]SubID, error) { + subid, err := os.Open(path) + if err != nil { + return nil, err + } + defer subid.Close() + return ParseSubID(subid) +} + +func ParseSubID(subid io.Reader) ([]SubID, error) { + return ParseSubIDFilter(subid, nil) +} + +func ParseSubIDFileFilter(path string, filter func(SubID) bool) ([]SubID, error) { + subid, err := os.Open(path) + if err != nil { + return nil, err + } + defer subid.Close() + return ParseSubIDFilter(subid, filter) +} + +func ParseSubIDFilter(r io.Reader, filter func(SubID) bool) ([]SubID, error) { + if r == nil { + return nil, errors.New("nil source for subid-formatted data") + } + + var ( + s = bufio.NewScanner(r) + out = []SubID{} + ) + + for s.Scan() { + line := bytes.TrimSpace(s.Bytes()) + if len(line) == 0 { + continue + } + + // see: man 5 subuid + p := SubID{} + parseLine(line, &p.Name, &p.SubID, &p.Count) + + if filter == nil || filter(p) { + out = append(out, p) + } + } + if err := s.Err(); err != nil { + return nil, err + } + + return out, nil +} + +func ParseIDMapFile(path string) ([]IDMap, error) { + r, err := os.Open(path) + if err != nil { + return nil, err + } + defer r.Close() + return ParseIDMap(r) +} + +func ParseIDMap(r io.Reader) ([]IDMap, error) { + return ParseIDMapFilter(r, nil) +} + +func ParseIDMapFileFilter(path string, filter func(IDMap) bool) ([]IDMap, error) { + r, err := os.Open(path) + if err != nil { + return nil, err + } + defer r.Close() + return ParseIDMapFilter(r, filter) +} + +func ParseIDMapFilter(r io.Reader, filter func(IDMap) bool) ([]IDMap, error) { + if r == nil { + return nil, errors.New("nil source for idmap-formatted data") + } + + var ( + s = bufio.NewScanner(r) + out = []IDMap{} + ) + + for s.Scan() { + line := bytes.TrimSpace(s.Bytes()) + if len(line) == 0 { + continue + } + + // see: man 7 user_namespaces + p := IDMap{} + parseParts(bytes.Fields(line), &p.ID, &p.ParentID, &p.Count) + + if filter == nil || filter(p) { + out = append(out, p) + } + } + if err := s.Err(); err != nil { + return nil, err + } + + return out, nil +} diff --git a/vendor/github.com/moby/sys/user/user_fuzzer.go b/vendor/github.com/moby/sys/user/user_fuzzer.go new file mode 100644 index 000000000..e018eae61 --- /dev/null +++ b/vendor/github.com/moby/sys/user/user_fuzzer.go @@ -0,0 +1,43 @@ +//go:build gofuzz +// +build gofuzz + +package user + +import ( + "io" + "strings" +) + +func IsDivisbleBy(n int, divisibleby int) bool { + return (n % divisibleby) == 0 +} + +func FuzzUser(data []byte) int { + if len(data) == 0 { + return -1 + } + if !IsDivisbleBy(len(data), 5) { + return -1 + } + + var divided [][]byte + + chunkSize := len(data) / 5 + + for i := 0; i < len(data); i += chunkSize { + end := i + chunkSize + + divided = append(divided, data[i:end]) + } + + _, _ = ParsePasswdFilter(strings.NewReader(string(divided[0])), nil) + + var passwd, group io.Reader + + group = strings.NewReader(string(divided[1])) + _, _ = GetAdditionalGroups([]string{string(divided[2])}, group) + + passwd = strings.NewReader(string(divided[3])) + _, _ = GetExecUser(string(divided[4]), nil, passwd, group) + return 1 +} diff --git a/vendor/github.com/opencontainers/go-digest/digestset/set.go b/vendor/github.com/opencontainers/go-digest/digestset/set.go deleted file mode 100644 index 71f24184c..000000000 --- a/vendor/github.com/opencontainers/go-digest/digestset/set.go +++ /dev/null @@ -1,262 +0,0 @@ -// Copyright 2020, 2020 OCI Contributors -// Copyright 2017 Docker, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package digestset - -import ( - "errors" - "sort" - "strings" - "sync" - - digest "github.com/opencontainers/go-digest" -) - -var ( - // ErrDigestNotFound is used when a matching digest - // could not be found in a set. - ErrDigestNotFound = errors.New("digest not found") - - // ErrDigestAmbiguous is used when multiple digests - // are found in a set. None of the matching digests - // should be considered valid matches. - ErrDigestAmbiguous = errors.New("ambiguous digest string") -) - -// Set is used to hold a unique set of digests which -// may be easily referenced by easily referenced by a string -// representation of the digest as well as short representation. -// The uniqueness of the short representation is based on other -// digests in the set. If digests are omitted from this set, -// collisions in a larger set may not be detected, therefore it -// is important to always do short representation lookups on -// the complete set of digests. To mitigate collisions, an -// appropriately long short code should be used. -type Set struct { - mutex sync.RWMutex - entries digestEntries -} - -// NewSet creates an empty set of digests -// which may have digests added. -func NewSet() *Set { - return &Set{ - entries: digestEntries{}, - } -} - -// checkShortMatch checks whether two digests match as either whole -// values or short values. This function does not test equality, -// rather whether the second value could match against the first -// value. -func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool { - if len(hex) == len(shortHex) { - if hex != shortHex { - return false - } - if len(shortAlg) > 0 && string(alg) != shortAlg { - return false - } - } else if !strings.HasPrefix(hex, shortHex) { - return false - } else if len(shortAlg) > 0 && string(alg) != shortAlg { - return false - } - return true -} - -// Lookup looks for a digest matching the given string representation. -// If no digests could be found ErrDigestNotFound will be returned -// with an empty digest value. If multiple matches are found -// ErrDigestAmbiguous will be returned with an empty digest value. -func (dst *Set) Lookup(d string) (digest.Digest, error) { - dst.mutex.RLock() - defer dst.mutex.RUnlock() - if len(dst.entries) == 0 { - return "", ErrDigestNotFound - } - var ( - searchFunc func(int) bool - alg digest.Algorithm - hex string - ) - dgst, err := digest.Parse(d) - if err == digest.ErrDigestInvalidFormat { - hex = d - searchFunc = func(i int) bool { - return dst.entries[i].val >= d - } - } else { - hex = dgst.Hex() - alg = dgst.Algorithm() - searchFunc = func(i int) bool { - if dst.entries[i].val == hex { - return dst.entries[i].alg >= alg - } - return dst.entries[i].val >= hex - } - } - idx := sort.Search(len(dst.entries), searchFunc) - if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) { - return "", ErrDigestNotFound - } - if dst.entries[idx].alg == alg && dst.entries[idx].val == hex { - return dst.entries[idx].digest, nil - } - if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) { - return "", ErrDigestAmbiguous - } - - return dst.entries[idx].digest, nil -} - -// Add adds the given digest to the set. An error will be returned -// if the given digest is invalid. If the digest already exists in the -// set, this operation will be a no-op. -func (dst *Set) Add(d digest.Digest) error { - if err := d.Validate(); err != nil { - return err - } - dst.mutex.Lock() - defer dst.mutex.Unlock() - entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} - searchFunc := func(i int) bool { - if dst.entries[i].val == entry.val { - return dst.entries[i].alg >= entry.alg - } - return dst.entries[i].val >= entry.val - } - idx := sort.Search(len(dst.entries), searchFunc) - if idx == len(dst.entries) { - dst.entries = append(dst.entries, entry) - return nil - } else if dst.entries[idx].digest == d { - return nil - } - - entries := append(dst.entries, nil) - copy(entries[idx+1:], entries[idx:len(entries)-1]) - entries[idx] = entry - dst.entries = entries - return nil -} - -// Remove removes the given digest from the set. An err will be -// returned if the given digest is invalid. If the digest does -// not exist in the set, this operation will be a no-op. -func (dst *Set) Remove(d digest.Digest) error { - if err := d.Validate(); err != nil { - return err - } - dst.mutex.Lock() - defer dst.mutex.Unlock() - entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} - searchFunc := func(i int) bool { - if dst.entries[i].val == entry.val { - return dst.entries[i].alg >= entry.alg - } - return dst.entries[i].val >= entry.val - } - idx := sort.Search(len(dst.entries), searchFunc) - // Not found if idx is after or value at idx is not digest - if idx == len(dst.entries) || dst.entries[idx].digest != d { - return nil - } - - entries := dst.entries - copy(entries[idx:], entries[idx+1:]) - entries = entries[:len(entries)-1] - dst.entries = entries - - return nil -} - -// All returns all the digests in the set -func (dst *Set) All() []digest.Digest { - dst.mutex.RLock() - defer dst.mutex.RUnlock() - retValues := make([]digest.Digest, len(dst.entries)) - for i := range dst.entries { - retValues[i] = dst.entries[i].digest - } - - return retValues -} - -// ShortCodeTable returns a map of Digest to unique short codes. The -// length represents the minimum value, the maximum length may be the -// entire value of digest if uniqueness cannot be achieved without the -// full value. This function will attempt to make short codes as short -// as possible to be unique. -func ShortCodeTable(dst *Set, length int) map[digest.Digest]string { - dst.mutex.RLock() - defer dst.mutex.RUnlock() - m := make(map[digest.Digest]string, len(dst.entries)) - l := length - resetIdx := 0 - for i := 0; i < len(dst.entries); i++ { - var short string - extended := true - for extended { - extended = false - if len(dst.entries[i].val) <= l { - short = dst.entries[i].digest.String() - } else { - short = dst.entries[i].val[:l] - for j := i + 1; j < len(dst.entries); j++ { - if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) { - if j > resetIdx { - resetIdx = j - } - extended = true - } else { - break - } - } - if extended { - l++ - } - } - } - m[dst.entries[i].digest] = short - if i >= resetIdx { - l = length - } - } - return m -} - -type digestEntry struct { - alg digest.Algorithm - val string - digest digest.Digest -} - -type digestEntries []*digestEntry - -func (d digestEntries) Len() int { - return len(d) -} - -func (d digestEntries) Less(i, j int) bool { - if d[i].val != d[j].val { - return d[i].val < d[j].val - } - return d[i].alg < d[j].alg -} - -func (d digestEntries) Swap(i, j int) { - d[i], d[j] = d[j], d[i] -} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go index 892ba3de9..ce8313e79 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go @@ -21,12 +21,20 @@ const ( // MediaTypeLayoutHeader specifies the media type for the oci-layout. MediaTypeLayoutHeader = "application/vnd.oci.layout.header.v1+json" + // MediaTypeImageIndex specifies the media type for an image index. + MediaTypeImageIndex = "application/vnd.oci.image.index.v1+json" + // MediaTypeImageManifest specifies the media type for an image manifest. MediaTypeImageManifest = "application/vnd.oci.image.manifest.v1+json" - // MediaTypeImageIndex specifies the media type for an image index. - MediaTypeImageIndex = "application/vnd.oci.image.index.v1+json" + // MediaTypeImageConfig specifies the media type for the image configuration. + MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json" + + // MediaTypeEmptyJSON specifies the media type for an unused blob containing the value "{}". + MediaTypeEmptyJSON = "application/vnd.oci.empty.v1+json" +) +const ( // MediaTypeImageLayer is the media type used for layers referenced by the manifest. MediaTypeImageLayer = "application/vnd.oci.image.layer.v1.tar" @@ -37,7 +45,15 @@ const ( // MediaTypeImageLayerZstd is the media type used for zstd compressed // layers referenced by the manifest. MediaTypeImageLayerZstd = "application/vnd.oci.image.layer.v1.tar+zstd" +) +// Non-distributable layer media-types. +// +// Deprecated: Non-distributable layers are deprecated, and not recommended +// for future use. Implementations SHOULD NOT produce new non-distributable +// layers. +// https://github.com/opencontainers/image-spec/pull/965 +const ( // MediaTypeImageLayerNonDistributable is the media type for layers referenced by // the manifest but with distribution restrictions. // @@ -66,10 +82,4 @@ const ( // layers. // https://github.com/opencontainers/image-spec/pull/965 MediaTypeImageLayerNonDistributableZstd = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd" - - // MediaTypeImageConfig specifies the media type for the image configuration. - MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json" - - // MediaTypeEmptyJSON specifies the media type for an unused blob containing the value `{}` - MediaTypeEmptyJSON = "application/vnd.oci.empty.v1+json" ) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go index 11e09b584..7069ae44d 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/version.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/version.go @@ -25,7 +25,7 @@ const ( VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "-rc.5" + VersionDev = "" ) // Version is the specification version that the package types support. diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go index 4e7717d53..d1236ba72 100644 --- a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go +++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go @@ -187,6 +187,10 @@ type Hook struct { type Hooks struct { // Prestart is Deprecated. Prestart is a list of hooks to be run before the container process is executed. // It is called in the Runtime Namespace + // + // Deprecated: use [Hooks.CreateRuntime], [Hooks.CreateContainer], and + // [Hooks.StartContainer] instead, which allow more granular hook control + // during the create and start phase. Prestart []Hook `json:"prestart,omitempty"` // CreateRuntime is a list of hooks to be run after the container has been created but before pivot_root or any equivalent operation has been called // It is called in the Runtime Namespace @@ -371,6 +375,12 @@ type LinuxMemory struct { // Total memory limit (memory + swap). Swap *int64 `json:"swap,omitempty"` // Kernel memory limit (in bytes). + // + // Deprecated: kernel-memory limits are not supported in cgroups v2, and + // were obsoleted in [kernel v5.4]. This field should no longer be used, + // as it may be ignored by runtimes. + // + // [kernel v5.4]: https://github.com/torvalds/linux/commit/0158115f702b0ba208ab0 Kernel *int64 `json:"kernel,omitempty"` // Kernel memory limit for tcp (in bytes) KernelTCP *int64 `json:"kernelTCP,omitempty"` diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go index 35358c2c5..503971e05 100644 --- a/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go +++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go @@ -6,12 +6,12 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 1 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 1 + VersionMinor = 2 // VersionPatch is for backwards-compatible bug fixes VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "+dev" + VersionDev = "" ) // Version is the specification version that the package types support. diff --git a/vendor/github.com/openshift/imagebuilder/.travis.yml b/vendor/github.com/openshift/imagebuilder/.travis.yml index 9885aee24..124944ebd 100644 --- a/vendor/github.com/openshift/imagebuilder/.travis.yml +++ b/vendor/github.com/openshift/imagebuilder/.travis.yml @@ -6,7 +6,6 @@ services: - docker go: - - "1.19" - "1.20" before_install: diff --git a/vendor/github.com/openshift/imagebuilder/builder.go b/vendor/github.com/openshift/imagebuilder/builder.go index 74e118c3a..5776f4812 100644 --- a/vendor/github.com/openshift/imagebuilder/builder.go +++ b/vendor/github.com/openshift/imagebuilder/builder.go @@ -198,6 +198,21 @@ func (stages Stages) ByName(name string) (Stage, bool) { return stage, true } } + if i, err := strconv.Atoi(name); err == nil { + return stages.ByPosition(i) + } + return Stage{}, false +} + +func (stages Stages) ByPosition(position int) (Stage, bool) { + for _, stage := range stages { + // stage.Position is expected to be the same as the unnamed + // index variable for this loop, but comparing to the Position + // field's value is easier to explain + if stage.Position == position { + return stage, true + } + } return Stage{}, false } @@ -211,6 +226,16 @@ func (stages Stages) ByTarget(target string) (Stages, bool) { return stages[i : i+1], true } } + if position, err := strconv.Atoi(target); err == nil { + for i, stage := range stages { + // stage.Position is expected to be the same as the unnamed + // index variable for this loop, but comparing to the Position + // field's value is easier to explain + if stage.Position == position { + return stages[i : i+1], true + } + } + } return nil, false } @@ -224,6 +249,16 @@ func (stages Stages) ThroughTarget(target string) (Stages, bool) { return stages[0 : i+1], true } } + if position, err := strconv.Atoi(target); err == nil { + for i, stage := range stages { + // stage.Position is expected to be the same as the unnamed + // index variable for this loop, but comparing to the Position + // field's value is easier to explain + if stage.Position == position { + return stages[0 : i+1], true + } + } + } return nil, false } diff --git a/vendor/github.com/openshift/imagebuilder/dispatchers.go b/vendor/github.com/openshift/imagebuilder/dispatchers.go index fdb2aded6..f43adacb4 100644 --- a/vendor/github.com/openshift/imagebuilder/dispatchers.go +++ b/vendor/github.com/openshift/imagebuilder/dispatchers.go @@ -184,6 +184,9 @@ func add(b *Builder, args []string, attributes map[string]bool, flagArgs []strin switch { case strings.HasPrefix(arg, "--chown="): chown = strings.TrimPrefix(arg, "--chown=") + if chown == "" { + return fmt.Errorf("no value specified for --chown=") + } case strings.HasPrefix(arg, "--chmod="): chmod = strings.TrimPrefix(arg, "--chmod=") err = checkChmodConversion(chmod) @@ -192,6 +195,9 @@ func add(b *Builder, args []string, attributes map[string]bool, flagArgs []strin } case strings.HasPrefix(arg, "--checksum="): checksum = strings.TrimPrefix(arg, "--checksum=") + if checksum == "" { + return fmt.Errorf("no value specified for --checksum=") + } default: return fmt.Errorf("ADD only supports the --chmod=, --chown=, and --checksum= flags") } @@ -232,6 +238,9 @@ func dispatchCopy(b *Builder, args []string, attributes map[string]bool, flagArg switch { case strings.HasPrefix(arg, "--chown="): chown = strings.TrimPrefix(arg, "--chown=") + if chown == "" { + return fmt.Errorf("no value specified for --chown=") + } case strings.HasPrefix(arg, "--chmod="): chmod = strings.TrimPrefix(arg, "--chmod=") err = checkChmodConversion(chmod) @@ -240,6 +249,9 @@ func dispatchCopy(b *Builder, args []string, attributes map[string]bool, flagArg } case strings.HasPrefix(arg, "--from="): from = strings.TrimPrefix(arg, "--from=") + if from == "" { + return fmt.Errorf("no value specified for --from=") + } default: return fmt.Errorf("COPY only supports the --chmod= --chown= and the --from= flags") } @@ -302,6 +314,9 @@ func from(b *Builder, args []string, attributes map[string]bool, flagArgs []stri switch { case strings.HasPrefix(arg, "--platform="): platformString := strings.TrimPrefix(arg, "--platform=") + if platformString == "" { + return fmt.Errorf("no value specified for --platform=") + } b.Platform = platformString default: return fmt.Errorf("FROM only supports the --platform flag") @@ -393,9 +408,15 @@ func run(b *Builder, args []string, attributes map[string]bool, flagArgs []strin switch { case strings.HasPrefix(arg, "--mount="): mount := strings.TrimPrefix(arg, "--mount=") + if mount == "" { + return fmt.Errorf("no value specified for --mount=") + } mounts = append(mounts, mount) case strings.HasPrefix(arg, "--network="): network = strings.TrimPrefix(arg, "--network=") + if network == "" { + return fmt.Errorf("no value specified for --network=") + } default: return fmt.Errorf("RUN only supports the --mount and --network flag") } diff --git a/vendor/github.com/openshift/imagebuilder/imagebuilder.spec b/vendor/github.com/openshift/imagebuilder/imagebuilder.spec index 194707c54..5d4f2b701 100644 --- a/vendor/github.com/openshift/imagebuilder/imagebuilder.spec +++ b/vendor/github.com/openshift/imagebuilder/imagebuilder.spec @@ -12,7 +12,7 @@ # %global golang_version 1.8.1 -%{!?version: %global version 1.2.6-dev} +%{!?version: %global version 1.2.6} %{!?release: %global release 1} %global package_name imagebuilder %global product_name Container Image Builder diff --git a/vendor/github.com/rivo/uniseg/README.md b/vendor/github.com/rivo/uniseg/README.md index 25e934687..a8191b815 100644 --- a/vendor/github.com/rivo/uniseg/README.md +++ b/vendor/github.com/rivo/uniseg/README.md @@ -3,7 +3,7 @@ [![Go Reference](https://pkg.go.dev/badge/github.com/rivo/uniseg.svg)](https://pkg.go.dev/github.com/rivo/uniseg) [![Go Report](https://img.shields.io/badge/go%20report-A%2B-brightgreen.svg)](https://goreportcard.com/report/github.com/rivo/uniseg) -This Go package implements Unicode Text Segmentation according to [Unicode Standard Annex #29](https://unicode.org/reports/tr29/), Unicode Line Breaking according to [Unicode Standard Annex #14](https://unicode.org/reports/tr14/) (Unicode version 14.0.0), and monospace font string width calculation similar to [wcwidth](https://man7.org/linux/man-pages/man3/wcwidth.3.html). +This Go package implements Unicode Text Segmentation according to [Unicode Standard Annex #29](https://unicode.org/reports/tr29/), Unicode Line Breaking according to [Unicode Standard Annex #14](https://unicode.org/reports/tr14/) (Unicode version 15.0.0), and monospace font string width calculation similar to [wcwidth](https://man7.org/linux/man-pages/man3/wcwidth.3.html). ## Background @@ -73,7 +73,7 @@ for gr.Next() { ### Using the [`Step`](https://pkg.go.dev/github.com/rivo/uniseg#Step) or [`StepString`](https://pkg.go.dev/github.com/rivo/uniseg#StepString) Function -This is orders of magnitude faster than the `Graphemes` class, but it requires the handling of states and boundaries: +This avoids allocating a new `Graphemes` object but it requires the handling of states and boundaries: ```go str := "🇩🇪🏳️‍🌈" @@ -88,29 +88,7 @@ for len(str) > 0 { ### Advanced Examples -Breaking into grapheme clusters and evaluating line breaks: - -```go -str := "First line.\nSecond line." -state := -1 -var ( - c string - boundaries int -) -for len(str) > 0 { - c, str, boundaries, state = uniseg.StepString(str, state) - fmt.Print(c) - if boundaries&uniseg.MaskLine == uniseg.LineCanBreak { - fmt.Print("|") - } else if boundaries&uniseg.MaskLine == uniseg.LineMustBreak { - fmt.Print("‖") - } -} -// First |line. -// ‖Second |line.‖ -``` - -If you're only interested in word segmentation, use [`FirstWord`](https://pkg.go.dev/github.com/rivo/uniseg#FirstWord) or [`FirstWordInString`](https://pkg.go.dev/github.com/rivo/uniseg#FirstWordInString): +The [`Graphemes`](https://pkg.go.dev/github.com/rivo/uniseg#Graphemes) class offers the most convenient way to access all functionality of this package. But in some cases, it may be better to use the specialized functions directly. For example, if you're only interested in word segmentation, use [`FirstWord`](https://pkg.go.dev/github.com/rivo/uniseg#FirstWord) or [`FirstWordInString`](https://pkg.go.dev/github.com/rivo/uniseg#FirstWordInString): ```go str := "Hello, world!" @@ -133,6 +111,8 @@ Similarly, use - [`FirstSentence`](https://pkg.go.dev/github.com/rivo/uniseg#FirstSentence) or [`FirstSentenceInString`](https://pkg.go.dev/github.com/rivo/uniseg#FirstSentenceInString) for sentence segmentation only, and - [`FirstLineSegment`](https://pkg.go.dev/github.com/rivo/uniseg#FirstLineSegment) or [`FirstLineSegmentInString`](https://pkg.go.dev/github.com/rivo/uniseg#FirstLineSegmentInString) for line breaking / word wrapping (although using [`Step`](https://pkg.go.dev/github.com/rivo/uniseg#Step) or [`StepString`](https://pkg.go.dev/github.com/rivo/uniseg#StepString) is preferred as it will observe grapheme cluster boundaries). +If you're only interested in the width of characters, use [`FirstGraphemeCluster`](https://pkg.go.dev/github.com/rivo/uniseg#FirstGraphemeCluster) or [`FirstGraphemeClusterInString`](https://pkg.go.dev/github.com/rivo/uniseg#FirstGraphemeClusterInString). It is much faster than using [`Step`](https://pkg.go.dev/github.com/rivo/uniseg#Step), [`StepString`](https://pkg.go.dev/github.com/rivo/uniseg#StepString), or the [`Graphemes`](https://pkg.go.dev/github.com/rivo/uniseg#Graphemes) class because it does not include the logic for word / sentence / line boundaries. + Finally, if you need to reverse a string while preserving grapheme clusters, use [`ReverseString`](https://pkg.go.dev/github.com/rivo/uniseg#ReverseString): ```go diff --git a/vendor/github.com/rivo/uniseg/eastasianwidth.go b/vendor/github.com/rivo/uniseg/eastasianwidth.go index 661934ac2..5fc54d991 100644 --- a/vendor/github.com/rivo/uniseg/eastasianwidth.go +++ b/vendor/github.com/rivo/uniseg/eastasianwidth.go @@ -1,13 +1,13 @@ -package uniseg - // Code generated via go generate from gen_properties.go. DO NOT EDIT. +package uniseg + // eastAsianWidth are taken from -// https://www.unicode.org/Public/14.0.0/ucd/EastAsianWidth.txt +// https://www.unicode.org/Public/15.0.0/ucd/EastAsianWidth.txt // and -// https://unicode.org/Public/14.0.0/ucd/emoji/emoji-data.txt +// https://unicode.org/Public/15.0.0/ucd/emoji/emoji-data.txt // ("Extended_Pictographic" only) -// on September 10, 2022. See https://www.unicode.org/license.html for the Unicode +// on September 5, 2023. See https://www.unicode.org/license.html for the Unicode // license agreement. var eastAsianWidth = [][3]int{ {0x0000, 0x001F, prN}, // Cc [32] .. @@ -504,6 +504,7 @@ var eastAsianWidth = [][3]int{ {0x0CE2, 0x0CE3, prN}, // Mn [2] KANNADA VOWEL SIGN VOCALIC L..KANNADA VOWEL SIGN VOCALIC LL {0x0CE6, 0x0CEF, prN}, // Nd [10] KANNADA DIGIT ZERO..KANNADA DIGIT NINE {0x0CF1, 0x0CF2, prN}, // Lo [2] KANNADA SIGN JIHVAMULIYA..KANNADA SIGN UPADHMANIYA + {0x0CF3, 0x0CF3, prN}, // Mc KANNADA SIGN COMBINING ANUSVARA ABOVE RIGHT {0x0D00, 0x0D01, prN}, // Mn [2] MALAYALAM SIGN COMBINING ANUSVARA ABOVE..MALAYALAM SIGN CANDRABINDU {0x0D02, 0x0D03, prN}, // Mc [2] MALAYALAM SIGN ANUSVARA..MALAYALAM SIGN VISARGA {0x0D04, 0x0D0C, prN}, // Lo [9] MALAYALAM LETTER VEDIC ANUSVARA..MALAYALAM LETTER VOCALIC L @@ -565,7 +566,7 @@ var eastAsianWidth = [][3]int{ {0x0EBD, 0x0EBD, prN}, // Lo LAO SEMIVOWEL SIGN NYO {0x0EC0, 0x0EC4, prN}, // Lo [5] LAO VOWEL SIGN E..LAO VOWEL SIGN AI {0x0EC6, 0x0EC6, prN}, // Lm LAO KO LA - {0x0EC8, 0x0ECD, prN}, // Mn [6] LAO TONE MAI EK..LAO NIGGAHITA + {0x0EC8, 0x0ECE, prN}, // Mn [7] LAO TONE MAI EK..LAO YAMAKKAN {0x0ED0, 0x0ED9, prN}, // Nd [10] LAO DIGIT ZERO..LAO DIGIT NINE {0x0EDC, 0x0EDF, prN}, // Lo [4] LAO HO NO..LAO LETTER KHMU NYO {0x0F00, 0x0F00, prN}, // Lo TIBETAN SYLLABLE OM @@ -1916,6 +1917,7 @@ var eastAsianWidth = [][3]int{ {0x10EAB, 0x10EAC, prN}, // Mn [2] YEZIDI COMBINING HAMZA MARK..YEZIDI COMBINING MADDA MARK {0x10EAD, 0x10EAD, prN}, // Pd YEZIDI HYPHENATION MARK {0x10EB0, 0x10EB1, prN}, // Lo [2] YEZIDI LETTER LAM WITH DOT ABOVE..YEZIDI LETTER YOT WITH CIRCUMFLEX ABOVE + {0x10EFD, 0x10EFF, prN}, // Mn [3] ARABIC SMALL LOW WORD SAKTA..ARABIC SMALL LOW WORD MADDA {0x10F00, 0x10F1C, prN}, // Lo [29] OLD SOGDIAN LETTER ALEPH..OLD SOGDIAN LETTER FINAL TAW WITH VERTICAL TAIL {0x10F1D, 0x10F26, prN}, // No [10] OLD SOGDIAN NUMBER ONE..OLD SOGDIAN FRACTION ONE HALF {0x10F27, 0x10F27, prN}, // Lo OLD SOGDIAN LIGATURE AYIN-DALETH @@ -1998,6 +2000,8 @@ var eastAsianWidth = [][3]int{ {0x11236, 0x11237, prN}, // Mn [2] KHOJKI SIGN NUKTA..KHOJKI SIGN SHADDA {0x11238, 0x1123D, prN}, // Po [6] KHOJKI DANDA..KHOJKI ABBREVIATION SIGN {0x1123E, 0x1123E, prN}, // Mn KHOJKI SIGN SUKUN + {0x1123F, 0x11240, prN}, // Lo [2] KHOJKI LETTER QA..KHOJKI LETTER SHORT I + {0x11241, 0x11241, prN}, // Mn KHOJKI VOWEL SIGN VOCALIC R {0x11280, 0x11286, prN}, // Lo [7] MULTANI LETTER A..MULTANI LETTER GA {0x11288, 0x11288, prN}, // Lo MULTANI LETTER GHA {0x1128A, 0x1128D, prN}, // Lo [4] MULTANI LETTER CA..MULTANI LETTER JJA @@ -2160,6 +2164,7 @@ var eastAsianWidth = [][3]int{ {0x11A9E, 0x11AA2, prN}, // Po [5] SOYOMBO HEAD MARK WITH MOON AND SUN AND TRIPLE FLAME..SOYOMBO TERMINAL MARK-2 {0x11AB0, 0x11ABF, prN}, // Lo [16] CANADIAN SYLLABICS NATTILIK HI..CANADIAN SYLLABICS SPA {0x11AC0, 0x11AF8, prN}, // Lo [57] PAU CIN HAU LETTER PA..PAU CIN HAU GLOTTAL STOP FINAL + {0x11B00, 0x11B09, prN}, // Po [10] DEVANAGARI HEAD MARK..DEVANAGARI SIGN MINDU {0x11C00, 0x11C08, prN}, // Lo [9] BHAIKSUKI LETTER A..BHAIKSUKI LETTER VOCALIC L {0x11C0A, 0x11C2E, prN}, // Lo [37] BHAIKSUKI LETTER E..BHAIKSUKI LETTER HA {0x11C2F, 0x11C2F, prN}, // Mc BHAIKSUKI VOWEL SIGN AA @@ -2205,6 +2210,19 @@ var eastAsianWidth = [][3]int{ {0x11EF3, 0x11EF4, prN}, // Mn [2] MAKASAR VOWEL SIGN I..MAKASAR VOWEL SIGN U {0x11EF5, 0x11EF6, prN}, // Mc [2] MAKASAR VOWEL SIGN E..MAKASAR VOWEL SIGN O {0x11EF7, 0x11EF8, prN}, // Po [2] MAKASAR PASSIMBANG..MAKASAR END OF SECTION + {0x11F00, 0x11F01, prN}, // Mn [2] KAWI SIGN CANDRABINDU..KAWI SIGN ANUSVARA + {0x11F02, 0x11F02, prN}, // Lo KAWI SIGN REPHA + {0x11F03, 0x11F03, prN}, // Mc KAWI SIGN VISARGA + {0x11F04, 0x11F10, prN}, // Lo [13] KAWI LETTER A..KAWI LETTER O + {0x11F12, 0x11F33, prN}, // Lo [34] KAWI LETTER KA..KAWI LETTER JNYA + {0x11F34, 0x11F35, prN}, // Mc [2] KAWI VOWEL SIGN AA..KAWI VOWEL SIGN ALTERNATE AA + {0x11F36, 0x11F3A, prN}, // Mn [5] KAWI VOWEL SIGN I..KAWI VOWEL SIGN VOCALIC R + {0x11F3E, 0x11F3F, prN}, // Mc [2] KAWI VOWEL SIGN E..KAWI VOWEL SIGN AI + {0x11F40, 0x11F40, prN}, // Mn KAWI VOWEL SIGN EU + {0x11F41, 0x11F41, prN}, // Mc KAWI SIGN KILLER + {0x11F42, 0x11F42, prN}, // Mn KAWI CONJOINER + {0x11F43, 0x11F4F, prN}, // Po [13] KAWI DANDA..KAWI PUNCTUATION CLOSING SPIRAL + {0x11F50, 0x11F59, prN}, // Nd [10] KAWI DIGIT ZERO..KAWI DIGIT NINE {0x11FB0, 0x11FB0, prN}, // Lo LISU LETTER YHA {0x11FC0, 0x11FD4, prN}, // No [21] TAMIL FRACTION ONE THREE-HUNDRED-AND-TWENTIETH..TAMIL FRACTION DOWNSCALING FACTOR KIIZH {0x11FD5, 0x11FDC, prN}, // So [8] TAMIL SIGN NEL..TAMIL SIGN MUKKURUNI @@ -2217,8 +2235,11 @@ var eastAsianWidth = [][3]int{ {0x12480, 0x12543, prN}, // Lo [196] CUNEIFORM SIGN AB TIMES NUN TENU..CUNEIFORM SIGN ZU5 TIMES THREE DISH TENU {0x12F90, 0x12FF0, prN}, // Lo [97] CYPRO-MINOAN SIGN CM001..CYPRO-MINOAN SIGN CM114 {0x12FF1, 0x12FF2, prN}, // Po [2] CYPRO-MINOAN SIGN CM301..CYPRO-MINOAN SIGN CM302 - {0x13000, 0x1342E, prN}, // Lo [1071] EGYPTIAN HIEROGLYPH A001..EGYPTIAN HIEROGLYPH AA032 - {0x13430, 0x13438, prN}, // Cf [9] EGYPTIAN HIEROGLYPH VERTICAL JOINER..EGYPTIAN HIEROGLYPH END SEGMENT + {0x13000, 0x1342F, prN}, // Lo [1072] EGYPTIAN HIEROGLYPH A001..EGYPTIAN HIEROGLYPH V011D + {0x13430, 0x1343F, prN}, // Cf [16] EGYPTIAN HIEROGLYPH VERTICAL JOINER..EGYPTIAN HIEROGLYPH END WALLED ENCLOSURE + {0x13440, 0x13440, prN}, // Mn EGYPTIAN HIEROGLYPH MIRROR HORIZONTALLY + {0x13441, 0x13446, prN}, // Lo [6] EGYPTIAN HIEROGLYPH FULL BLANK..EGYPTIAN HIEROGLYPH WIDE LOST SIGN + {0x13447, 0x13455, prN}, // Mn [15] EGYPTIAN HIEROGLYPH MODIFIER DAMAGED AT TOP START..EGYPTIAN HIEROGLYPH MODIFIER DAMAGED {0x14400, 0x14646, prN}, // Lo [583] ANATOLIAN HIEROGLYPH A001..ANATOLIAN HIEROGLYPH A530 {0x16800, 0x16A38, prN}, // Lo [569] BAMUM LETTER PHASE-A NGKUE MFON..BAMUM LETTER PHASE-F VUEQ {0x16A40, 0x16A5E, prN}, // Lo [31] MRO LETTER TA..MRO LETTER TEK @@ -2263,7 +2284,9 @@ var eastAsianWidth = [][3]int{ {0x1AFFD, 0x1AFFE, prW}, // Lm [2] KATAKANA LETTER MINNAN NASALIZED TONE-7..KATAKANA LETTER MINNAN NASALIZED TONE-8 {0x1B000, 0x1B0FF, prW}, // Lo [256] KATAKANA LETTER ARCHAIC E..HENTAIGANA LETTER RE-2 {0x1B100, 0x1B122, prW}, // Lo [35] HENTAIGANA LETTER RE-3..KATAKANA LETTER ARCHAIC WU + {0x1B132, 0x1B132, prW}, // Lo HIRAGANA LETTER SMALL KO {0x1B150, 0x1B152, prW}, // Lo [3] HIRAGANA LETTER SMALL WI..HIRAGANA LETTER SMALL WO + {0x1B155, 0x1B155, prW}, // Lo KATAKANA LETTER SMALL KO {0x1B164, 0x1B167, prW}, // Lo [4] KATAKANA LETTER SMALL WI..KATAKANA LETTER SMALL N {0x1B170, 0x1B2FB, prW}, // Lo [396] NUSHU CHARACTER-1B170..NUSHU CHARACTER-1B2FB {0x1BC00, 0x1BC6A, prN}, // Lo [107] DUPLOYAN LETTER H..DUPLOYAN LETTER VOCALIC M @@ -2294,6 +2317,7 @@ var eastAsianWidth = [][3]int{ {0x1D200, 0x1D241, prN}, // So [66] GREEK VOCAL NOTATION SYMBOL-1..GREEK INSTRUMENTAL NOTATION SYMBOL-54 {0x1D242, 0x1D244, prN}, // Mn [3] COMBINING GREEK MUSICAL TRISEME..COMBINING GREEK MUSICAL PENTASEME {0x1D245, 0x1D245, prN}, // So GREEK MUSICAL LEIMMA + {0x1D2C0, 0x1D2D3, prN}, // No [20] KAKTOVIK NUMERAL ZERO..KAKTOVIK NUMERAL NINETEEN {0x1D2E0, 0x1D2F3, prN}, // No [20] MAYAN NUMERAL ZERO..MAYAN NUMERAL NINETEEN {0x1D300, 0x1D356, prN}, // So [87] MONOGRAM FOR EARTH..TETRAGRAM FOR FOSTERING {0x1D360, 0x1D378, prN}, // No [25] COUNTING ROD UNIT DIGIT ONE..TALLY MARK FIVE @@ -2353,11 +2377,14 @@ var eastAsianWidth = [][3]int{ {0x1DF00, 0x1DF09, prN}, // Ll [10] LATIN SMALL LETTER FENG DIGRAPH WITH TRILL..LATIN SMALL LETTER T WITH HOOK AND RETROFLEX HOOK {0x1DF0A, 0x1DF0A, prN}, // Lo LATIN LETTER RETROFLEX CLICK WITH RETROFLEX HOOK {0x1DF0B, 0x1DF1E, prN}, // Ll [20] LATIN SMALL LETTER ESH WITH DOUBLE BAR..LATIN SMALL LETTER S WITH CURL + {0x1DF25, 0x1DF2A, prN}, // Ll [6] LATIN SMALL LETTER D WITH MID-HEIGHT LEFT HOOK..LATIN SMALL LETTER T WITH MID-HEIGHT LEFT HOOK {0x1E000, 0x1E006, prN}, // Mn [7] COMBINING GLAGOLITIC LETTER AZU..COMBINING GLAGOLITIC LETTER ZHIVETE {0x1E008, 0x1E018, prN}, // Mn [17] COMBINING GLAGOLITIC LETTER ZEMLJA..COMBINING GLAGOLITIC LETTER HERU {0x1E01B, 0x1E021, prN}, // Mn [7] COMBINING GLAGOLITIC LETTER SHTA..COMBINING GLAGOLITIC LETTER YATI {0x1E023, 0x1E024, prN}, // Mn [2] COMBINING GLAGOLITIC LETTER YU..COMBINING GLAGOLITIC LETTER SMALL YUS {0x1E026, 0x1E02A, prN}, // Mn [5] COMBINING GLAGOLITIC LETTER YO..COMBINING GLAGOLITIC LETTER FITA + {0x1E030, 0x1E06D, prN}, // Lm [62] MODIFIER LETTER CYRILLIC SMALL A..MODIFIER LETTER CYRILLIC SMALL STRAIGHT U WITH STROKE + {0x1E08F, 0x1E08F, prN}, // Mn COMBINING CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I {0x1E100, 0x1E12C, prN}, // Lo [45] NYIAKENG PUACHUE HMONG LETTER MA..NYIAKENG PUACHUE HMONG LETTER W {0x1E130, 0x1E136, prN}, // Mn [7] NYIAKENG PUACHUE HMONG TONE-B..NYIAKENG PUACHUE HMONG TONE-D {0x1E137, 0x1E13D, prN}, // Lm [7] NYIAKENG PUACHUE HMONG SIGN FOR PERSON..NYIAKENG PUACHUE HMONG SYLLABLE LENGTHENER @@ -2370,6 +2397,10 @@ var eastAsianWidth = [][3]int{ {0x1E2EC, 0x1E2EF, prN}, // Mn [4] WANCHO TONE TUP..WANCHO TONE KOINI {0x1E2F0, 0x1E2F9, prN}, // Nd [10] WANCHO DIGIT ZERO..WANCHO DIGIT NINE {0x1E2FF, 0x1E2FF, prN}, // Sc WANCHO NGUN SIGN + {0x1E4D0, 0x1E4EA, prN}, // Lo [27] NAG MUNDARI LETTER O..NAG MUNDARI LETTER ELL + {0x1E4EB, 0x1E4EB, prN}, // Lm NAG MUNDARI SIGN OJOD + {0x1E4EC, 0x1E4EF, prN}, // Mn [4] NAG MUNDARI SIGN MUHOR..NAG MUNDARI SIGN SUTUH + {0x1E4F0, 0x1E4F9, prN}, // Nd [10] NAG MUNDARI DIGIT ZERO..NAG MUNDARI DIGIT NINE {0x1E7E0, 0x1E7E6, prN}, // Lo [7] ETHIOPIC SYLLABLE HHYA..ETHIOPIC SYLLABLE HHYO {0x1E7E8, 0x1E7EB, prN}, // Lo [4] ETHIOPIC SYLLABLE GURAGE HHWA..ETHIOPIC SYLLABLE HHWE {0x1E7ED, 0x1E7EE, prN}, // Lo [2] ETHIOPIC SYLLABLE GURAGE MWI..ETHIOPIC SYLLABLE GURAGE MWEE @@ -2498,13 +2529,14 @@ var eastAsianWidth = [][3]int{ {0x1F6D0, 0x1F6D2, prW}, // So [3] PLACE OF WORSHIP..SHOPPING TROLLEY {0x1F6D3, 0x1F6D4, prN}, // So [2] STUPA..PAGODA {0x1F6D5, 0x1F6D7, prW}, // So [3] HINDU TEMPLE..ELEVATOR - {0x1F6DD, 0x1F6DF, prW}, // So [3] PLAYGROUND SLIDE..RING BUOY + {0x1F6DC, 0x1F6DF, prW}, // So [4] WIRELESS..RING BUOY {0x1F6E0, 0x1F6EA, prN}, // So [11] HAMMER AND WRENCH..NORTHEAST-POINTING AIRPLANE {0x1F6EB, 0x1F6EC, prW}, // So [2] AIRPLANE DEPARTURE..AIRPLANE ARRIVING {0x1F6F0, 0x1F6F3, prN}, // So [4] SATELLITE..PASSENGER SHIP {0x1F6F4, 0x1F6FC, prW}, // So [9] SCOOTER..ROLLER SKATE - {0x1F700, 0x1F773, prN}, // So [116] ALCHEMICAL SYMBOL FOR QUINTESSENCE..ALCHEMICAL SYMBOL FOR HALF OUNCE - {0x1F780, 0x1F7D8, prN}, // So [89] BLACK LEFT-POINTING ISOSCELES RIGHT TRIANGLE..NEGATIVE CIRCLED SQUARE + {0x1F700, 0x1F776, prN}, // So [119] ALCHEMICAL SYMBOL FOR QUINTESSENCE..LUNAR ECLIPSE + {0x1F77B, 0x1F77F, prN}, // So [5] HAUMEA..ORCUS + {0x1F780, 0x1F7D9, prN}, // So [90] BLACK LEFT-POINTING ISOSCELES RIGHT TRIANGLE..NINE POINTED WHITE STAR {0x1F7E0, 0x1F7EB, prW}, // So [12] LARGE ORANGE CIRCLE..LARGE BROWN SQUARE {0x1F7F0, 0x1F7F0, prW}, // So HEAVY EQUALS SIGN {0x1F800, 0x1F80B, prN}, // So [12] LEFTWARDS ARROW WITH SMALL TRIANGLE ARROWHEAD..DOWNWARDS ARROW WITH LARGE TRIANGLE ARROWHEAD @@ -2521,22 +2553,20 @@ var eastAsianWidth = [][3]int{ {0x1F947, 0x1F9FF, prW}, // So [185] FIRST PLACE MEDAL..NAZAR AMULET {0x1FA00, 0x1FA53, prN}, // So [84] NEUTRAL CHESS KING..BLACK CHESS KNIGHT-BISHOP {0x1FA60, 0x1FA6D, prN}, // So [14] XIANGQI RED GENERAL..XIANGQI BLACK SOLDIER - {0x1FA70, 0x1FA74, prW}, // So [5] BALLET SHOES..THONG SANDAL - {0x1FA78, 0x1FA7C, prW}, // So [5] DROP OF BLOOD..CRUTCH - {0x1FA80, 0x1FA86, prW}, // So [7] YO-YO..NESTING DOLLS - {0x1FA90, 0x1FAAC, prW}, // So [29] RINGED PLANET..HAMSA - {0x1FAB0, 0x1FABA, prW}, // So [11] FLY..NEST WITH EGGS - {0x1FAC0, 0x1FAC5, prW}, // So [6] ANATOMICAL HEART..PERSON WITH CROWN - {0x1FAD0, 0x1FAD9, prW}, // So [10] BLUEBERRIES..JAR - {0x1FAE0, 0x1FAE7, prW}, // So [8] MELTING FACE..BUBBLES - {0x1FAF0, 0x1FAF6, prW}, // So [7] HAND WITH INDEX FINGER AND THUMB CROSSED..HEART HANDS + {0x1FA70, 0x1FA7C, prW}, // So [13] BALLET SHOES..CRUTCH + {0x1FA80, 0x1FA88, prW}, // So [9] YO-YO..FLUTE + {0x1FA90, 0x1FABD, prW}, // So [46] RINGED PLANET..WING + {0x1FABF, 0x1FAC5, prW}, // So [7] GOOSE..PERSON WITH CROWN + {0x1FACE, 0x1FADB, prW}, // So [14] MOOSE..PEA POD + {0x1FAE0, 0x1FAE8, prW}, // So [9] MELTING FACE..SHAKING FACE + {0x1FAF0, 0x1FAF8, prW}, // So [9] HAND WITH INDEX FINGER AND THUMB CROSSED..RIGHTWARDS PUSHING HAND {0x1FB00, 0x1FB92, prN}, // So [147] BLOCK SEXTANT-1..UPPER HALF INVERSE MEDIUM SHADE AND LOWER HALF BLOCK {0x1FB94, 0x1FBCA, prN}, // So [55] LEFT HALF INVERSE MEDIUM SHADE AND RIGHT HALF BLOCK..WHITE UP-POINTING CHEVRON {0x1FBF0, 0x1FBF9, prN}, // Nd [10] SEGMENTED DIGIT ZERO..SEGMENTED DIGIT NINE {0x20000, 0x2A6DF, prW}, // Lo [42720] CJK UNIFIED IDEOGRAPH-20000..CJK UNIFIED IDEOGRAPH-2A6DF {0x2A6E0, 0x2A6FF, prW}, // Cn [32] .. - {0x2A700, 0x2B738, prW}, // Lo [4153] CJK UNIFIED IDEOGRAPH-2A700..CJK UNIFIED IDEOGRAPH-2B738 - {0x2B739, 0x2B73F, prW}, // Cn [7] .. + {0x2A700, 0x2B739, prW}, // Lo [4154] CJK UNIFIED IDEOGRAPH-2A700..CJK UNIFIED IDEOGRAPH-2B739 + {0x2B73A, 0x2B73F, prW}, // Cn [6] .. {0x2B740, 0x2B81D, prW}, // Lo [222] CJK UNIFIED IDEOGRAPH-2B740..CJK UNIFIED IDEOGRAPH-2B81D {0x2B81E, 0x2B81F, prW}, // Cn [2] .. {0x2B820, 0x2CEA1, prW}, // Lo [5762] CJK UNIFIED IDEOGRAPH-2B820..CJK UNIFIED IDEOGRAPH-2CEA1 @@ -2547,7 +2577,9 @@ var eastAsianWidth = [][3]int{ {0x2FA1E, 0x2FA1F, prW}, // Cn [2] .. {0x2FA20, 0x2FFFD, prW}, // Cn [1502] .. {0x30000, 0x3134A, prW}, // Lo [4939] CJK UNIFIED IDEOGRAPH-30000..CJK UNIFIED IDEOGRAPH-3134A - {0x3134B, 0x3FFFD, prW}, // Cn [60595] .. + {0x3134B, 0x3134F, prW}, // Cn [5] .. + {0x31350, 0x323AF, prW}, // Lo [4192] CJK UNIFIED IDEOGRAPH-31350..CJK UNIFIED IDEOGRAPH-323AF + {0x323B0, 0x3FFFD, prW}, // Cn [56398] .. {0xE0001, 0xE0001, prN}, // Cf LANGUAGE TAG {0xE0020, 0xE007F, prN}, // Cf [96] TAG SPACE..CANCEL TAG {0xE0100, 0xE01EF, prA}, // Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256 diff --git a/vendor/github.com/rivo/uniseg/emojipresentation.go b/vendor/github.com/rivo/uniseg/emojipresentation.go index fd0f7451a..9b5f499c4 100644 --- a/vendor/github.com/rivo/uniseg/emojipresentation.go +++ b/vendor/github.com/rivo/uniseg/emojipresentation.go @@ -1,13 +1,13 @@ -package uniseg - // Code generated via go generate from gen_properties.go. DO NOT EDIT. +package uniseg + // emojiPresentation are taken from // // and -// https://unicode.org/Public/14.0.0/ucd/emoji/emoji-data.txt +// https://unicode.org/Public/15.0.0/ucd/emoji/emoji-data.txt // ("Extended_Pictographic" only) -// on September 10, 2022. See https://www.unicode.org/license.html for the Unicode +// on September 5, 2023. See https://www.unicode.org/license.html for the Unicode // license agreement. var emojiPresentation = [][3]int{ {0x231A, 0x231B, prEmojiPresentation}, // E0.6 [2] (⌚..⌛) watch..hourglass done @@ -211,6 +211,7 @@ var emojiPresentation = [][3]int{ {0x1F6D1, 0x1F6D2, prEmojiPresentation}, // E3.0 [2] (🛑..🛒) stop sign..shopping cart {0x1F6D5, 0x1F6D5, prEmojiPresentation}, // E12.0 [1] (🛕) hindu temple {0x1F6D6, 0x1F6D7, prEmojiPresentation}, // E13.0 [2] (🛖..🛗) hut..elevator + {0x1F6DC, 0x1F6DC, prEmojiPresentation}, // E15.0 [1] (🛜) wireless {0x1F6DD, 0x1F6DF, prEmojiPresentation}, // E14.0 [3] (🛝..🛟) playground slide..ring buoy {0x1F6EB, 0x1F6EC, prEmojiPresentation}, // E1.0 [2] (🛫..🛬) airplane departure..airplane arrival {0x1F6F4, 0x1F6F6, prEmojiPresentation}, // E3.0 [3] (🛴..🛶) kick scooter..canoe @@ -267,19 +268,28 @@ var emojiPresentation = [][3]int{ {0x1F9E7, 0x1F9FF, prEmojiPresentation}, // E11.0 [25] (🧧..🧿) red envelope..nazar amulet {0x1FA70, 0x1FA73, prEmojiPresentation}, // E12.0 [4] (🩰..🩳) ballet shoes..shorts {0x1FA74, 0x1FA74, prEmojiPresentation}, // E13.0 [1] (🩴) thong sandal + {0x1FA75, 0x1FA77, prEmojiPresentation}, // E15.0 [3] (🩵..🩷) light blue heart..pink heart {0x1FA78, 0x1FA7A, prEmojiPresentation}, // E12.0 [3] (🩸..🩺) drop of blood..stethoscope {0x1FA7B, 0x1FA7C, prEmojiPresentation}, // E14.0 [2] (🩻..🩼) x-ray..crutch {0x1FA80, 0x1FA82, prEmojiPresentation}, // E12.0 [3] (🪀..🪂) yo-yo..parachute {0x1FA83, 0x1FA86, prEmojiPresentation}, // E13.0 [4] (🪃..🪆) boomerang..nesting dolls + {0x1FA87, 0x1FA88, prEmojiPresentation}, // E15.0 [2] (🪇..🪈) maracas..flute {0x1FA90, 0x1FA95, prEmojiPresentation}, // E12.0 [6] (🪐..🪕) ringed planet..banjo {0x1FA96, 0x1FAA8, prEmojiPresentation}, // E13.0 [19] (🪖..🪨) military helmet..rock {0x1FAA9, 0x1FAAC, prEmojiPresentation}, // E14.0 [4] (🪩..🪬) mirror ball..hamsa + {0x1FAAD, 0x1FAAF, prEmojiPresentation}, // E15.0 [3] (🪭..🪯) folding hand fan..khanda {0x1FAB0, 0x1FAB6, prEmojiPresentation}, // E13.0 [7] (🪰..🪶) fly..feather {0x1FAB7, 0x1FABA, prEmojiPresentation}, // E14.0 [4] (🪷..🪺) lotus..nest with eggs + {0x1FABB, 0x1FABD, prEmojiPresentation}, // E15.0 [3] (🪻..🪽) hyacinth..wing + {0x1FABF, 0x1FABF, prEmojiPresentation}, // E15.0 [1] (🪿) goose {0x1FAC0, 0x1FAC2, prEmojiPresentation}, // E13.0 [3] (🫀..🫂) anatomical heart..people hugging {0x1FAC3, 0x1FAC5, prEmojiPresentation}, // E14.0 [3] (🫃..🫅) pregnant man..person with crown + {0x1FACE, 0x1FACF, prEmojiPresentation}, // E15.0 [2] (🫎..🫏) moose..donkey {0x1FAD0, 0x1FAD6, prEmojiPresentation}, // E13.0 [7] (🫐..🫖) blueberries..teapot {0x1FAD7, 0x1FAD9, prEmojiPresentation}, // E14.0 [3] (🫗..🫙) pouring liquid..jar + {0x1FADA, 0x1FADB, prEmojiPresentation}, // E15.0 [2] (🫚..🫛) ginger root..pea pod {0x1FAE0, 0x1FAE7, prEmojiPresentation}, // E14.0 [8] (🫠..🫧) melting face..bubbles + {0x1FAE8, 0x1FAE8, prEmojiPresentation}, // E15.0 [1] (🫨) shaking face {0x1FAF0, 0x1FAF6, prEmojiPresentation}, // E14.0 [7] (🫰..🫶) hand with index finger and thumb crossed..heart hands + {0x1FAF7, 0x1FAF8, prEmojiPresentation}, // E15.0 [2] (🫷..🫸) leftwards pushing hand..rightwards pushing hand } diff --git a/vendor/github.com/rivo/uniseg/gen_breaktest.go b/vendor/github.com/rivo/uniseg/gen_breaktest.go index e613c4cd0..6bfbeb5e7 100644 --- a/vendor/github.com/rivo/uniseg/gen_breaktest.go +++ b/vendor/github.com/rivo/uniseg/gen_breaktest.go @@ -32,7 +32,7 @@ import ( // We want to test against a specific version rather than the latest. When the // package is upgraded to a new version, change these to generate new tests. const ( - testCaseURL = `https://www.unicode.org/Public/14.0.0/ucd/auxiliary/%s.txt` + testCaseURL = `https://www.unicode.org/Public/15.0.0/ucd/auxiliary/%s.txt` ) func main() { @@ -76,9 +76,9 @@ func parse(url string) ([]byte, error) { buf := new(bytes.Buffer) buf.Grow(120 << 10) - buf.WriteString(`package uniseg + buf.WriteString(`// Code generated via go generate from gen_breaktest.go. DO NOT EDIT. -// Code generated via go generate from gen_breaktest.go. DO NOT EDIT. +package uniseg // ` + os.Args[3] + ` are Grapheme testcases taken from // ` + url + ` @@ -136,7 +136,9 @@ var ( // // E.g. for the input b="÷ 0020 × 0308 ÷ 1F1E6 ÷" // it will append -// "\u0020\u0308\U0001F1E6" +// +// "\u0020\u0308\U0001F1E6" +// // and "[][]rune{{0x0020,0x0308},{0x1F1E6},}" // to orig and exp respectively. // diff --git a/vendor/github.com/rivo/uniseg/gen_properties.go b/vendor/github.com/rivo/uniseg/gen_properties.go index 999d5efdd..8992d2c5f 100644 --- a/vendor/github.com/rivo/uniseg/gen_properties.go +++ b/vendor/github.com/rivo/uniseg/gen_properties.go @@ -41,8 +41,8 @@ import ( // We want to test against a specific version rather than the latest. When the // package is upgraded to a new version, change these to generate new tests. const ( - propertyURL = `https://www.unicode.org/Public/14.0.0/ucd/%s.txt` - emojiURL = `https://unicode.org/Public/14.0.0/ucd/emoji/emoji-data.txt` + propertyURL = `https://www.unicode.org/Public/15.0.0/ucd/%s.txt` + emojiURL = `https://unicode.org/Public/15.0.0/ucd/emoji/emoji-data.txt` ) // The regular expression for a line containing a code point range property. @@ -178,6 +178,11 @@ func parse(propertyURL, emojiProperty string, includeGeneralCategory bool) (stri } } + // Avoid overflow during binary search. + if len(properties) >= 1<<31 { + return "", errors.New("too many properties") + } + // Sort properties. sort.Slice(properties, func(i, j int) bool { left, _ := strconv.ParseUint(properties[i][0], 16, 64) @@ -200,9 +205,9 @@ func parse(propertyURL, emojiProperty string, includeGeneralCategory bool) (stri // ` + emojiURL + ` // ("Extended_Pictographic" only)` } - buf.WriteString(`package uniseg + buf.WriteString(`// Code generated via go generate from gen_properties.go. DO NOT EDIT. -// Code generated via go generate from gen_properties.go. DO NOT EDIT. +package uniseg // ` + os.Args[3] + ` are taken from // ` + propertyURL + emojiComment + ` diff --git a/vendor/github.com/rivo/uniseg/grapheme.go b/vendor/github.com/rivo/uniseg/grapheme.go index 0086fc1b2..b12403d43 100644 --- a/vendor/github.com/rivo/uniseg/grapheme.go +++ b/vendor/github.com/rivo/uniseg/grapheme.go @@ -13,9 +13,10 @@ import "unicode/utf8" // well as boundary information and character width is available via the various // methods (see examples below). // -// Using this class to iterate over a string is convenient but it is much slower -// than using this package's [Step] or [StepString] functions or any of the -// other specialized functions starting with "First". +// This class basically wraps the [StepString] parser and provides a convenient +// interface to it. If you are only interested in some parts of this package's +// functionality, using the specialized functions starting with "First" is +// almost always faster. type Graphemes struct { // The original string. original string @@ -222,7 +223,7 @@ func FirstGraphemeCluster(b []byte, state int) (cluster, rest []byte, width, new if len(b) <= length { // If we're already past the end, there is nothing else to parse. var prop int if state < 0 { - prop = property(graphemeCodePoints, r) + prop = propertyGraphemes(r) } else { prop = state >> shiftGraphemePropState } @@ -252,16 +253,14 @@ func FirstGraphemeCluster(b []byte, state int) (cluster, rest []byte, width, new return b[:length], b[length:], width, state | (prop << shiftGraphemePropState) } - if r == vs16 { - width = 2 - } else if firstProp != prExtendedPictographic && firstProp != prRegionalIndicator && firstProp != prL { - width += runeWidth(r, prop) - } else if firstProp == prExtendedPictographic { + if firstProp == prExtendedPictographic { if r == vs15 { width = 1 - } else { + } else if r == vs16 { width = 2 } + } else if firstProp != prRegionalIndicator && firstProp != prL { + width += runeWidth(r, prop) } length += l @@ -284,7 +283,7 @@ func FirstGraphemeClusterInString(str string, state int) (cluster, rest string, if len(str) <= length { // If we're already past the end, there is nothing else to parse. var prop int if state < 0 { - prop = property(graphemeCodePoints, r) + prop = propertyGraphemes(r) } else { prop = state >> shiftGraphemePropState } @@ -314,16 +313,14 @@ func FirstGraphemeClusterInString(str string, state int) (cluster, rest string, return str[:length], str[length:], width, state | (prop << shiftGraphemePropState) } - if r == vs16 { - width = 2 - } else if firstProp != prExtendedPictographic && firstProp != prRegionalIndicator && firstProp != prL { - width += runeWidth(r, prop) - } else if firstProp == prExtendedPictographic { + if firstProp == prExtendedPictographic { if r == vs15 { width = 1 - } else { + } else if r == vs16 { width = 2 } + } else if firstProp != prRegionalIndicator && firstProp != prL { + width += runeWidth(r, prop) } length += l diff --git a/vendor/github.com/rivo/uniseg/graphemeproperties.go b/vendor/github.com/rivo/uniseg/graphemeproperties.go index a87d140bf..0aff4a619 100644 --- a/vendor/github.com/rivo/uniseg/graphemeproperties.go +++ b/vendor/github.com/rivo/uniseg/graphemeproperties.go @@ -1,13 +1,13 @@ -package uniseg - // Code generated via go generate from gen_properties.go. DO NOT EDIT. +package uniseg + // graphemeCodePoints are taken from -// https://www.unicode.org/Public/14.0.0/ucd/auxiliary/GraphemeBreakProperty.txt +// https://www.unicode.org/Public/15.0.0/ucd/auxiliary/GraphemeBreakProperty.txt // and -// https://unicode.org/Public/14.0.0/ucd/emoji/emoji-data.txt +// https://unicode.org/Public/15.0.0/ucd/emoji/emoji-data.txt // ("Extended_Pictographic" only) -// on September 10, 2022. See https://www.unicode.org/license.html for the Unicode +// on September 5, 2023. See https://www.unicode.org/license.html for the Unicode // license agreement. var graphemeCodePoints = [][3]int{ {0x0000, 0x0009, prControl}, // Cc [10] .. @@ -143,6 +143,7 @@ var graphemeCodePoints = [][3]int{ {0x0CCC, 0x0CCD, prExtend}, // Mn [2] KANNADA VOWEL SIGN AU..KANNADA SIGN VIRAMA {0x0CD5, 0x0CD6, prExtend}, // Mc [2] KANNADA LENGTH MARK..KANNADA AI LENGTH MARK {0x0CE2, 0x0CE3, prExtend}, // Mn [2] KANNADA VOWEL SIGN VOCALIC L..KANNADA VOWEL SIGN VOCALIC LL + {0x0CF3, 0x0CF3, prSpacingMark}, // Mc KANNADA SIGN COMBINING ANUSVARA ABOVE RIGHT {0x0D00, 0x0D01, prExtend}, // Mn [2] MALAYALAM SIGN COMBINING ANUSVARA ABOVE..MALAYALAM SIGN CANDRABINDU {0x0D02, 0x0D03, prSpacingMark}, // Mc [2] MALAYALAM SIGN ANUSVARA..MALAYALAM SIGN VISARGA {0x0D3B, 0x0D3C, prExtend}, // Mn [2] MALAYALAM SIGN VERTICAL BAR VIRAMA..MALAYALAM SIGN CIRCULAR VIRAMA @@ -172,7 +173,7 @@ var graphemeCodePoints = [][3]int{ {0x0EB1, 0x0EB1, prExtend}, // Mn LAO VOWEL SIGN MAI KAN {0x0EB3, 0x0EB3, prSpacingMark}, // Lo LAO VOWEL SIGN AM {0x0EB4, 0x0EBC, prExtend}, // Mn [9] LAO VOWEL SIGN I..LAO SEMIVOWEL SIGN LO - {0x0EC8, 0x0ECD, prExtend}, // Mn [6] LAO TONE MAI EK..LAO NIGGAHITA + {0x0EC8, 0x0ECE, prExtend}, // Mn [7] LAO TONE MAI EK..LAO YAMAKKAN {0x0F18, 0x0F19, prExtend}, // Mn [2] TIBETAN ASTROLOGICAL SIGN -KHYUD PA..TIBETAN ASTROLOGICAL SIGN SDONG TSHUGS {0x0F35, 0x0F35, prExtend}, // Mn TIBETAN MARK NGAS BZUNG NYI ZLA {0x0F37, 0x0F37, prExtend}, // Mn TIBETAN MARK NGAS BZUNG SGOR RTAGS @@ -1336,6 +1337,7 @@ var graphemeCodePoints = [][3]int{ {0x10AE5, 0x10AE6, prExtend}, // Mn [2] MANICHAEAN ABBREVIATION MARK ABOVE..MANICHAEAN ABBREVIATION MARK BELOW {0x10D24, 0x10D27, prExtend}, // Mn [4] HANIFI ROHINGYA SIGN HARBAHAY..HANIFI ROHINGYA SIGN TASSI {0x10EAB, 0x10EAC, prExtend}, // Mn [2] YEZIDI COMBINING HAMZA MARK..YEZIDI COMBINING MADDA MARK + {0x10EFD, 0x10EFF, prExtend}, // Mn [3] ARABIC SMALL LOW WORD SAKTA..ARABIC SMALL LOW WORD MADDA {0x10F46, 0x10F50, prExtend}, // Mn [11] SOGDIAN COMBINING DOT BELOW..SOGDIAN COMBINING STROKE BELOW {0x10F82, 0x10F85, prExtend}, // Mn [4] OLD UYGHUR COMBINING DOT ABOVE..OLD UYGHUR COMBINING TWO DOTS BELOW {0x11000, 0x11000, prSpacingMark}, // Mc BRAHMI SIGN CANDRABINDU @@ -1375,6 +1377,7 @@ var graphemeCodePoints = [][3]int{ {0x11235, 0x11235, prSpacingMark}, // Mc KHOJKI SIGN VIRAMA {0x11236, 0x11237, prExtend}, // Mn [2] KHOJKI SIGN NUKTA..KHOJKI SIGN SHADDA {0x1123E, 0x1123E, prExtend}, // Mn KHOJKI SIGN SUKUN + {0x11241, 0x11241, prExtend}, // Mn KHOJKI VOWEL SIGN VOCALIC R {0x112DF, 0x112DF, prExtend}, // Mn KHUDAWADI SIGN ANUSVARA {0x112E0, 0x112E2, prSpacingMark}, // Mc [3] KHUDAWADI VOWEL SIGN AA..KHUDAWADI VOWEL SIGN II {0x112E3, 0x112EA, prExtend}, // Mn [8] KHUDAWADI VOWEL SIGN U..KHUDAWADI SIGN VIRAMA @@ -1494,7 +1497,18 @@ var graphemeCodePoints = [][3]int{ {0x11D97, 0x11D97, prExtend}, // Mn GUNJALA GONDI VIRAMA {0x11EF3, 0x11EF4, prExtend}, // Mn [2] MAKASAR VOWEL SIGN I..MAKASAR VOWEL SIGN U {0x11EF5, 0x11EF6, prSpacingMark}, // Mc [2] MAKASAR VOWEL SIGN E..MAKASAR VOWEL SIGN O - {0x13430, 0x13438, prControl}, // Cf [9] EGYPTIAN HIEROGLYPH VERTICAL JOINER..EGYPTIAN HIEROGLYPH END SEGMENT + {0x11F00, 0x11F01, prExtend}, // Mn [2] KAWI SIGN CANDRABINDU..KAWI SIGN ANUSVARA + {0x11F02, 0x11F02, prPrepend}, // Lo KAWI SIGN REPHA + {0x11F03, 0x11F03, prSpacingMark}, // Mc KAWI SIGN VISARGA + {0x11F34, 0x11F35, prSpacingMark}, // Mc [2] KAWI VOWEL SIGN AA..KAWI VOWEL SIGN ALTERNATE AA + {0x11F36, 0x11F3A, prExtend}, // Mn [5] KAWI VOWEL SIGN I..KAWI VOWEL SIGN VOCALIC R + {0x11F3E, 0x11F3F, prSpacingMark}, // Mc [2] KAWI VOWEL SIGN E..KAWI VOWEL SIGN AI + {0x11F40, 0x11F40, prExtend}, // Mn KAWI VOWEL SIGN EU + {0x11F41, 0x11F41, prSpacingMark}, // Mc KAWI SIGN KILLER + {0x11F42, 0x11F42, prExtend}, // Mn KAWI CONJOINER + {0x13430, 0x1343F, prControl}, // Cf [16] EGYPTIAN HIEROGLYPH VERTICAL JOINER..EGYPTIAN HIEROGLYPH END WALLED ENCLOSURE + {0x13440, 0x13440, prExtend}, // Mn EGYPTIAN HIEROGLYPH MIRROR HORIZONTALLY + {0x13447, 0x13455, prExtend}, // Mn [15] EGYPTIAN HIEROGLYPH MODIFIER DAMAGED AT TOP START..EGYPTIAN HIEROGLYPH MODIFIER DAMAGED {0x16AF0, 0x16AF4, prExtend}, // Mn [5] BASSA VAH COMBINING HIGH TONE..BASSA VAH COMBINING HIGH-LOW TONE {0x16B30, 0x16B36, prExtend}, // Mn [7] PAHAWH HMONG MARK CIM TUB..PAHAWH HMONG MARK CIM TAUM {0x16F4F, 0x16F4F, prExtend}, // Mn MIAO SIGN CONSONANT MODIFIER BAR @@ -1527,9 +1541,11 @@ var graphemeCodePoints = [][3]int{ {0x1E01B, 0x1E021, prExtend}, // Mn [7] COMBINING GLAGOLITIC LETTER SHTA..COMBINING GLAGOLITIC LETTER YATI {0x1E023, 0x1E024, prExtend}, // Mn [2] COMBINING GLAGOLITIC LETTER YU..COMBINING GLAGOLITIC LETTER SMALL YUS {0x1E026, 0x1E02A, prExtend}, // Mn [5] COMBINING GLAGOLITIC LETTER YO..COMBINING GLAGOLITIC LETTER FITA + {0x1E08F, 0x1E08F, prExtend}, // Mn COMBINING CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I {0x1E130, 0x1E136, prExtend}, // Mn [7] NYIAKENG PUACHUE HMONG TONE-B..NYIAKENG PUACHUE HMONG TONE-D {0x1E2AE, 0x1E2AE, prExtend}, // Mn TOTO SIGN RISING TONE {0x1E2EC, 0x1E2EF, prExtend}, // Mn [4] WANCHO TONE TUP..WANCHO TONE KOINI + {0x1E4EC, 0x1E4EF, prExtend}, // Mn [4] NAG MUNDARI SIGN MUHOR..NAG MUNDARI SIGN SUTUH {0x1E8D0, 0x1E8D6, prExtend}, // Mn [7] MENDE KIKAKUI COMBINING NUMBER TEENS..MENDE KIKAKUI COMBINING NUMBER MILLIONS {0x1E944, 0x1E94A, prExtend}, // Mn [7] ADLAM ALIF LENGTHENER..ADLAM NUKTA {0x1F000, 0x1F003, prExtendedPictographic}, // E0.0 [4] (🀀..🀃) MAHJONG TILE EAST WIND..MAHJONG TILE NORTH WIND @@ -1780,7 +1796,8 @@ var graphemeCodePoints = [][3]int{ {0x1F6D3, 0x1F6D4, prExtendedPictographic}, // E0.0 [2] (🛓..🛔) STUPA..PAGODA {0x1F6D5, 0x1F6D5, prExtendedPictographic}, // E12.0 [1] (🛕) hindu temple {0x1F6D6, 0x1F6D7, prExtendedPictographic}, // E13.0 [2] (🛖..🛗) hut..elevator - {0x1F6D8, 0x1F6DC, prExtendedPictographic}, // E0.0 [5] (🛘..🛜) .. + {0x1F6D8, 0x1F6DB, prExtendedPictographic}, // E0.0 [4] (🛘..🛛) .. + {0x1F6DC, 0x1F6DC, prExtendedPictographic}, // E15.0 [1] (🛜) wireless {0x1F6DD, 0x1F6DF, prExtendedPictographic}, // E14.0 [3] (🛝..🛟) playground slide..ring buoy {0x1F6E0, 0x1F6E5, prExtendedPictographic}, // E0.7 [6] (🛠️..🛥️) hammer and wrench..motor boat {0x1F6E6, 0x1F6E8, prExtendedPictographic}, // E0.0 [3] (🛦..🛨) UP-POINTING MILITARY AIRPLANE..UP-POINTING SMALL AIRPLANE @@ -1797,7 +1814,7 @@ var graphemeCodePoints = [][3]int{ {0x1F6FA, 0x1F6FA, prExtendedPictographic}, // E12.0 [1] (🛺) auto rickshaw {0x1F6FB, 0x1F6FC, prExtendedPictographic}, // E13.0 [2] (🛻..🛼) pickup truck..roller skate {0x1F6FD, 0x1F6FF, prExtendedPictographic}, // E0.0 [3] (🛽..🛿) .. - {0x1F774, 0x1F77F, prExtendedPictographic}, // E0.0 [12] (🝴..🝿) .. + {0x1F774, 0x1F77F, prExtendedPictographic}, // E0.0 [12] (🝴..🝿) LOT OF FORTUNE..ORCUS {0x1F7D5, 0x1F7DF, prExtendedPictographic}, // E0.0 [11] (🟕..🟟) CIRCLED TRIANGLE.. {0x1F7E0, 0x1F7EB, prExtendedPictographic}, // E12.0 [12] (🟠..🟫) orange circle..brown square {0x1F7EC, 0x1F7EF, prExtendedPictographic}, // E0.0 [4] (🟬..🟯) .. @@ -1856,30 +1873,37 @@ var graphemeCodePoints = [][3]int{ {0x1FA00, 0x1FA6F, prExtendedPictographic}, // E0.0 [112] (🨀..🩯) NEUTRAL CHESS KING.. {0x1FA70, 0x1FA73, prExtendedPictographic}, // E12.0 [4] (🩰..🩳) ballet shoes..shorts {0x1FA74, 0x1FA74, prExtendedPictographic}, // E13.0 [1] (🩴) thong sandal - {0x1FA75, 0x1FA77, prExtendedPictographic}, // E0.0 [3] (🩵..🩷) .. + {0x1FA75, 0x1FA77, prExtendedPictographic}, // E15.0 [3] (🩵..🩷) light blue heart..pink heart {0x1FA78, 0x1FA7A, prExtendedPictographic}, // E12.0 [3] (🩸..🩺) drop of blood..stethoscope {0x1FA7B, 0x1FA7C, prExtendedPictographic}, // E14.0 [2] (🩻..🩼) x-ray..crutch {0x1FA7D, 0x1FA7F, prExtendedPictographic}, // E0.0 [3] (🩽..🩿) .. {0x1FA80, 0x1FA82, prExtendedPictographic}, // E12.0 [3] (🪀..🪂) yo-yo..parachute {0x1FA83, 0x1FA86, prExtendedPictographic}, // E13.0 [4] (🪃..🪆) boomerang..nesting dolls - {0x1FA87, 0x1FA8F, prExtendedPictographic}, // E0.0 [9] (🪇..🪏) .. + {0x1FA87, 0x1FA88, prExtendedPictographic}, // E15.0 [2] (🪇..🪈) maracas..flute + {0x1FA89, 0x1FA8F, prExtendedPictographic}, // E0.0 [7] (🪉..🪏) .. {0x1FA90, 0x1FA95, prExtendedPictographic}, // E12.0 [6] (🪐..🪕) ringed planet..banjo {0x1FA96, 0x1FAA8, prExtendedPictographic}, // E13.0 [19] (🪖..🪨) military helmet..rock {0x1FAA9, 0x1FAAC, prExtendedPictographic}, // E14.0 [4] (🪩..🪬) mirror ball..hamsa - {0x1FAAD, 0x1FAAF, prExtendedPictographic}, // E0.0 [3] (🪭..🪯) .. + {0x1FAAD, 0x1FAAF, prExtendedPictographic}, // E15.0 [3] (🪭..🪯) folding hand fan..khanda {0x1FAB0, 0x1FAB6, prExtendedPictographic}, // E13.0 [7] (🪰..🪶) fly..feather {0x1FAB7, 0x1FABA, prExtendedPictographic}, // E14.0 [4] (🪷..🪺) lotus..nest with eggs - {0x1FABB, 0x1FABF, prExtendedPictographic}, // E0.0 [5] (🪻..🪿) .. + {0x1FABB, 0x1FABD, prExtendedPictographic}, // E15.0 [3] (🪻..🪽) hyacinth..wing + {0x1FABE, 0x1FABE, prExtendedPictographic}, // E0.0 [1] (🪾) + {0x1FABF, 0x1FABF, prExtendedPictographic}, // E15.0 [1] (🪿) goose {0x1FAC0, 0x1FAC2, prExtendedPictographic}, // E13.0 [3] (🫀..🫂) anatomical heart..people hugging {0x1FAC3, 0x1FAC5, prExtendedPictographic}, // E14.0 [3] (🫃..🫅) pregnant man..person with crown - {0x1FAC6, 0x1FACF, prExtendedPictographic}, // E0.0 [10] (🫆..🫏) .. + {0x1FAC6, 0x1FACD, prExtendedPictographic}, // E0.0 [8] (🫆..🫍) .. + {0x1FACE, 0x1FACF, prExtendedPictographic}, // E15.0 [2] (🫎..🫏) moose..donkey {0x1FAD0, 0x1FAD6, prExtendedPictographic}, // E13.0 [7] (🫐..🫖) blueberries..teapot {0x1FAD7, 0x1FAD9, prExtendedPictographic}, // E14.0 [3] (🫗..🫙) pouring liquid..jar - {0x1FADA, 0x1FADF, prExtendedPictographic}, // E0.0 [6] (🫚..🫟) .. + {0x1FADA, 0x1FADB, prExtendedPictographic}, // E15.0 [2] (🫚..🫛) ginger root..pea pod + {0x1FADC, 0x1FADF, prExtendedPictographic}, // E0.0 [4] (🫜..🫟) .. {0x1FAE0, 0x1FAE7, prExtendedPictographic}, // E14.0 [8] (🫠..🫧) melting face..bubbles - {0x1FAE8, 0x1FAEF, prExtendedPictographic}, // E0.0 [8] (🫨..🫯) .. + {0x1FAE8, 0x1FAE8, prExtendedPictographic}, // E15.0 [1] (🫨) shaking face + {0x1FAE9, 0x1FAEF, prExtendedPictographic}, // E0.0 [7] (🫩..🫯) .. {0x1FAF0, 0x1FAF6, prExtendedPictographic}, // E14.0 [7] (🫰..🫶) hand with index finger and thumb crossed..heart hands - {0x1FAF7, 0x1FAFF, prExtendedPictographic}, // E0.0 [9] (🫷..🫿) .. + {0x1FAF7, 0x1FAF8, prExtendedPictographic}, // E15.0 [2] (🫷..🫸) leftwards pushing hand..rightwards pushing hand + {0x1FAF9, 0x1FAFF, prExtendedPictographic}, // E0.0 [7] (🫹..🫿) .. {0x1FC00, 0x1FFFD, prExtendedPictographic}, // E0.0[1022] (🰀..🿽) .. {0xE0000, 0xE0000, prControl}, // Cn {0xE0001, 0xE0001, prControl}, // Cf LANGUAGE TAG diff --git a/vendor/github.com/rivo/uniseg/graphemerules.go b/vendor/github.com/rivo/uniseg/graphemerules.go index 9f46b575b..5d399d29c 100644 --- a/vendor/github.com/rivo/uniseg/graphemerules.go +++ b/vendor/github.com/rivo/uniseg/graphemerules.go @@ -21,11 +21,12 @@ const ( grBoundary ) -// The grapheme cluster parser's state transitions. Maps (state, property) to -// (new state, breaking instruction, rule number). The breaking instruction -// always refers to the boundary between the last and next code point. +// grTransitions implements the grapheme cluster parser's state transitions. +// Maps state and property to a new state, a breaking instruction, and rule +// number. The breaking instruction always refers to the boundary between the +// last and next code point. Returns negative values if no transition is found. // -// This map is queried as follows: +// This function is used as follows: // // 1. Find specific state + specific property. Stop if found. // 2. Find specific state + any property. @@ -36,59 +37,96 @@ const ( // are equal. Stop. // 6. Assume grAny and grBoundary. // -// Unicode version 14.0.0. -var grTransitions = map[[2]int][3]int{ +// Unicode version 15.0.0. +func grTransitions(state, prop int) (newState int, newProp int, boundary int) { + // It turns out that using a big switch statement is much faster than using + // a map. + + switch uint64(state) | uint64(prop)<<32 { // GB5 - {grAny, prCR}: {grCR, grBoundary, 50}, - {grAny, prLF}: {grControlLF, grBoundary, 50}, - {grAny, prControl}: {grControlLF, grBoundary, 50}, + case grAny | prCR<<32: + return grCR, grBoundary, 50 + case grAny | prLF<<32: + return grControlLF, grBoundary, 50 + case grAny | prControl<<32: + return grControlLF, grBoundary, 50 // GB4 - {grCR, prAny}: {grAny, grBoundary, 40}, - {grControlLF, prAny}: {grAny, grBoundary, 40}, - - // GB3. - {grCR, prLF}: {grControlLF, grNoBoundary, 30}, - - // GB6. - {grAny, prL}: {grL, grBoundary, 9990}, - {grL, prL}: {grL, grNoBoundary, 60}, - {grL, prV}: {grLVV, grNoBoundary, 60}, - {grL, prLV}: {grLVV, grNoBoundary, 60}, - {grL, prLVT}: {grLVTT, grNoBoundary, 60}, - - // GB7. - {grAny, prLV}: {grLVV, grBoundary, 9990}, - {grAny, prV}: {grLVV, grBoundary, 9990}, - {grLVV, prV}: {grLVV, grNoBoundary, 70}, - {grLVV, prT}: {grLVTT, grNoBoundary, 70}, - - // GB8. - {grAny, prLVT}: {grLVTT, grBoundary, 9990}, - {grAny, prT}: {grLVTT, grBoundary, 9990}, - {grLVTT, prT}: {grLVTT, grNoBoundary, 80}, - - // GB9. - {grAny, prExtend}: {grAny, grNoBoundary, 90}, - {grAny, prZWJ}: {grAny, grNoBoundary, 90}, - - // GB9a. - {grAny, prSpacingMark}: {grAny, grNoBoundary, 91}, - - // GB9b. - {grAny, prPrepend}: {grPrepend, grBoundary, 9990}, - {grPrepend, prAny}: {grAny, grNoBoundary, 92}, - - // GB11. - {grAny, prExtendedPictographic}: {grExtendedPictographic, grBoundary, 9990}, - {grExtendedPictographic, prExtend}: {grExtendedPictographic, grNoBoundary, 110}, - {grExtendedPictographic, prZWJ}: {grExtendedPictographicZWJ, grNoBoundary, 110}, - {grExtendedPictographicZWJ, prExtendedPictographic}: {grExtendedPictographic, grNoBoundary, 110}, - - // GB12 / GB13. - {grAny, prRegionalIndicator}: {grRIOdd, grBoundary, 9990}, - {grRIOdd, prRegionalIndicator}: {grRIEven, grNoBoundary, 120}, - {grRIEven, prRegionalIndicator}: {grRIOdd, grBoundary, 120}, + case grCR | prAny<<32: + return grAny, grBoundary, 40 + case grControlLF | prAny<<32: + return grAny, grBoundary, 40 + + // GB3 + case grCR | prLF<<32: + return grControlLF, grNoBoundary, 30 + + // GB6 + case grAny | prL<<32: + return grL, grBoundary, 9990 + case grL | prL<<32: + return grL, grNoBoundary, 60 + case grL | prV<<32: + return grLVV, grNoBoundary, 60 + case grL | prLV<<32: + return grLVV, grNoBoundary, 60 + case grL | prLVT<<32: + return grLVTT, grNoBoundary, 60 + + // GB7 + case grAny | prLV<<32: + return grLVV, grBoundary, 9990 + case grAny | prV<<32: + return grLVV, grBoundary, 9990 + case grLVV | prV<<32: + return grLVV, grNoBoundary, 70 + case grLVV | prT<<32: + return grLVTT, grNoBoundary, 70 + + // GB8 + case grAny | prLVT<<32: + return grLVTT, grBoundary, 9990 + case grAny | prT<<32: + return grLVTT, grBoundary, 9990 + case grLVTT | prT<<32: + return grLVTT, grNoBoundary, 80 + + // GB9 + case grAny | prExtend<<32: + return grAny, grNoBoundary, 90 + case grAny | prZWJ<<32: + return grAny, grNoBoundary, 90 + + // GB9a + case grAny | prSpacingMark<<32: + return grAny, grNoBoundary, 91 + + // GB9b + case grAny | prPrepend<<32: + return grPrepend, grBoundary, 9990 + case grPrepend | prAny<<32: + return grAny, grNoBoundary, 92 + + // GB11 + case grAny | prExtendedPictographic<<32: + return grExtendedPictographic, grBoundary, 9990 + case grExtendedPictographic | prExtend<<32: + return grExtendedPictographic, grNoBoundary, 110 + case grExtendedPictographic | prZWJ<<32: + return grExtendedPictographicZWJ, grNoBoundary, 110 + case grExtendedPictographicZWJ | prExtendedPictographic<<32: + return grExtendedPictographic, grNoBoundary, 110 + + // GB12 / GB13 + case grAny | prRegionalIndicator<<32: + return grRIOdd, grBoundary, 9990 + case grRIOdd | prRegionalIndicator<<32: + return grRIEven, grNoBoundary, 120 + case grRIEven | prRegionalIndicator<<32: + return grRIOdd, grBoundary, 120 + default: + return -1, -1, -1 + } } // transitionGraphemeState determines the new state of the grapheme cluster @@ -97,40 +135,40 @@ var grTransitions = map[[2]int][3]int{ // table) and whether a cluster boundary was detected. func transitionGraphemeState(state int, r rune) (newState, prop int, boundary bool) { // Determine the property of the next character. - prop = property(graphemeCodePoints, r) + prop = propertyGraphemes(r) // Find the applicable transition. - transition, ok := grTransitions[[2]int{state, prop}] - if ok { + nextState, nextProp, _ := grTransitions(state, prop) + if nextState >= 0 { // We have a specific transition. We'll use it. - return transition[0], prop, transition[1] == grBoundary + return nextState, prop, nextProp == grBoundary } // No specific transition found. Try the less specific ones. - transAnyProp, okAnyProp := grTransitions[[2]int{state, prAny}] - transAnyState, okAnyState := grTransitions[[2]int{grAny, prop}] - if okAnyProp && okAnyState { + anyPropState, anyPropProp, anyPropRule := grTransitions(state, prAny) + anyStateState, anyStateProp, anyStateRule := grTransitions(grAny, prop) + if anyPropState >= 0 && anyStateState >= 0 { // Both apply. We'll use a mix (see comments for grTransitions). - newState = transAnyState[0] - boundary = transAnyState[1] == grBoundary - if transAnyProp[2] < transAnyState[2] { - boundary = transAnyProp[1] == grBoundary + newState = anyStateState + boundary = anyStateProp == grBoundary + if anyPropRule < anyStateRule { + boundary = anyPropProp == grBoundary } return } - if okAnyProp { + if anyPropState >= 0 { // We only have a specific state. - return transAnyProp[0], prop, transAnyProp[1] == grBoundary + return anyPropState, prop, anyPropProp == grBoundary // This branch will probably never be reached because okAnyState will // always be true given the current transition map. But we keep it here // for future modifications to the transition map where this may not be // true anymore. } - if okAnyState { + if anyStateState >= 0 { // We only have a specific property. - return transAnyState[0], prop, transAnyState[1] == grBoundary + return anyStateState, prop, anyStateProp == grBoundary } // No known transition. GB999: Any ÷ Any. diff --git a/vendor/github.com/rivo/uniseg/line.go b/vendor/github.com/rivo/uniseg/line.go index 87f28503f..7a46318d9 100644 --- a/vendor/github.com/rivo/uniseg/line.go +++ b/vendor/github.com/rivo/uniseg/line.go @@ -80,7 +80,7 @@ func FirstLineSegment(b []byte, state int) (segment, rest []byte, mustBreak bool } } -// FirstLineSegmentInString is like FirstLineSegment() but its input and outputs +// FirstLineSegmentInString is like [FirstLineSegment] but its input and outputs // are strings. func FirstLineSegmentInString(str string, state int) (segment, rest string, mustBreak bool, newState int) { // An empty byte slice returns nothing. @@ -122,13 +122,13 @@ func FirstLineSegmentInString(str string, state int) (segment, rest string, must // [UAX #14]: https://www.unicode.org/reports/tr14/#Algorithm func HasTrailingLineBreak(b []byte) bool { r, _ := utf8.DecodeLastRune(b) - property, _ := propertyWithGenCat(lineBreakCodePoints, r) - return property == lbBK || property == lbCR || property == lbLF || property == lbNL + property, _ := propertyLineBreak(r) + return property == prBK || property == prCR || property == prLF || property == prNL } // HasTrailingLineBreakInString is like [HasTrailingLineBreak] but for a string. func HasTrailingLineBreakInString(str string) bool { r, _ := utf8.DecodeLastRuneInString(str) - property, _ := propertyWithGenCat(lineBreakCodePoints, r) - return property == lbBK || property == lbCR || property == lbLF || property == lbNL + property, _ := propertyLineBreak(r) + return property == prBK || property == prCR || property == prLF || property == prNL } diff --git a/vendor/github.com/rivo/uniseg/lineproperties.go b/vendor/github.com/rivo/uniseg/lineproperties.go index 32169306e..ac7fac4c0 100644 --- a/vendor/github.com/rivo/uniseg/lineproperties.go +++ b/vendor/github.com/rivo/uniseg/lineproperties.go @@ -1,13 +1,13 @@ -package uniseg - // Code generated via go generate from gen_properties.go. DO NOT EDIT. +package uniseg + // lineBreakCodePoints are taken from -// https://www.unicode.org/Public/14.0.0/ucd/LineBreak.txt +// https://www.unicode.org/Public/15.0.0/ucd/LineBreak.txt // and -// https://unicode.org/Public/14.0.0/ucd/emoji/emoji-data.txt +// https://unicode.org/Public/15.0.0/ucd/emoji/emoji-data.txt // ("Extended_Pictographic" only) -// on September 10, 2022. See https://www.unicode.org/license.html for the Unicode +// on September 5, 2023. See https://www.unicode.org/license.html for the Unicode // license agreement. var lineBreakCodePoints = [][4]int{ {0x0000, 0x0008, prCM, gcCc}, // [9] .. @@ -439,6 +439,7 @@ var lineBreakCodePoints = [][4]int{ {0x0CE2, 0x0CE3, prCM, gcMn}, // [2] KANNADA VOWEL SIGN VOCALIC L..KANNADA VOWEL SIGN VOCALIC LL {0x0CE6, 0x0CEF, prNU, gcNd}, // [10] KANNADA DIGIT ZERO..KANNADA DIGIT NINE {0x0CF1, 0x0CF2, prAL, gcLo}, // [2] KANNADA SIGN JIHVAMULIYA..KANNADA SIGN UPADHMANIYA + {0x0CF3, 0x0CF3, prCM, gcMc}, // KANNADA SIGN COMBINING ANUSVARA ABOVE RIGHT {0x0D00, 0x0D01, prCM, gcMn}, // [2] MALAYALAM SIGN COMBINING ANUSVARA ABOVE..MALAYALAM SIGN CANDRABINDU {0x0D02, 0x0D03, prCM, gcMc}, // [2] MALAYALAM SIGN ANUSVARA..MALAYALAM SIGN VISARGA {0x0D04, 0x0D0C, prAL, gcLo}, // [9] MALAYALAM LETTER VEDIC ANUSVARA..MALAYALAM LETTER VOCALIC L @@ -500,7 +501,7 @@ var lineBreakCodePoints = [][4]int{ {0x0EBD, 0x0EBD, prSA, gcLo}, // LAO SEMIVOWEL SIGN NYO {0x0EC0, 0x0EC4, prSA, gcLo}, // [5] LAO VOWEL SIGN E..LAO VOWEL SIGN AI {0x0EC6, 0x0EC6, prSA, gcLm}, // LAO KO LA - {0x0EC8, 0x0ECD, prSA, gcMn}, // [6] LAO TONE MAI EK..LAO NIGGAHITA + {0x0EC8, 0x0ECE, prSA, gcMn}, // [7] LAO TONE MAI EK..LAO YAMAKKAN {0x0ED0, 0x0ED9, prNU, gcNd}, // [10] LAO DIGIT ZERO..LAO DIGIT NINE {0x0EDC, 0x0EDF, prSA, gcLo}, // [4] LAO HO NO..LAO LETTER KHMU NYO {0x0F00, 0x0F00, prAL, gcLo}, // TIBETAN SYLLABLE OM @@ -813,7 +814,11 @@ var lineBreakCodePoints = [][4]int{ {0x1D79, 0x1D7F, prAL, gcLl}, // [7] LATIN SMALL LETTER INSULAR G..LATIN SMALL LETTER UPSILON WITH STROKE {0x1D80, 0x1D9A, prAL, gcLl}, // [27] LATIN SMALL LETTER B WITH PALATAL HOOK..LATIN SMALL LETTER EZH WITH RETROFLEX HOOK {0x1D9B, 0x1DBF, prAL, gcLm}, // [37] MODIFIER LETTER SMALL TURNED ALPHA..MODIFIER LETTER SMALL THETA - {0x1DC0, 0x1DFF, prCM, gcMn}, // [64] COMBINING DOTTED GRAVE ACCENT..COMBINING RIGHT ARROWHEAD AND DOWN ARROWHEAD BELOW + {0x1DC0, 0x1DCC, prCM, gcMn}, // [13] COMBINING DOTTED GRAVE ACCENT..COMBINING MACRON-BREVE + {0x1DCD, 0x1DCD, prGL, gcMn}, // COMBINING DOUBLE CIRCUMFLEX ABOVE + {0x1DCE, 0x1DFB, prCM, gcMn}, // [46] COMBINING OGONEK ABOVE..COMBINING DELETION MARK + {0x1DFC, 0x1DFC, prGL, gcMn}, // COMBINING DOUBLE INVERTED BREVE BELOW + {0x1DFD, 0x1DFF, prCM, gcMn}, // [3] COMBINING ALMOST EQUAL TO BELOW..COMBINING RIGHT ARROWHEAD AND DOWN ARROWHEAD BELOW {0x1E00, 0x1EFF, prAL, gcLC}, // [256] LATIN CAPITAL LETTER A WITH RING BELOW..LATIN SMALL LETTER Y WITH LOOP {0x1F00, 0x1F15, prAL, gcLC}, // [22] GREEK SMALL LETTER ALPHA WITH PSILI..GREEK SMALL LETTER EPSILON WITH DASIA AND OXIA {0x1F18, 0x1F1D, prAL, gcLu}, // [6] GREEK CAPITAL LETTER EPSILON WITH PSILI..GREEK CAPITAL LETTER EPSILON WITH DASIA AND OXIA @@ -889,7 +894,7 @@ var lineBreakCodePoints = [][4]int{ {0x2054, 0x2054, prAL, gcPc}, // INVERTED UNDERTIE {0x2055, 0x2055, prAL, gcPo}, // FLOWER PUNCTUATION MARK {0x2056, 0x2056, prBA, gcPo}, // THREE DOT PUNCTUATION - {0x2057, 0x2057, prAL, gcPo}, // QUADRUPLE PRIME + {0x2057, 0x2057, prPO, gcPo}, // QUADRUPLE PRIME {0x2058, 0x205B, prBA, gcPo}, // [4] FOUR DOT PUNCTUATION..FOUR DOT MARK {0x205C, 0x205C, prAL, gcPo}, // DOTTED CROSS {0x205D, 0x205E, prBA, gcPo}, // [2] TRICOLON..VERTICAL FOUR DOTS @@ -2751,6 +2756,7 @@ var lineBreakCodePoints = [][4]int{ {0x10EAB, 0x10EAC, prCM, gcMn}, // [2] YEZIDI COMBINING HAMZA MARK..YEZIDI COMBINING MADDA MARK {0x10EAD, 0x10EAD, prBA, gcPd}, // YEZIDI HYPHENATION MARK {0x10EB0, 0x10EB1, prAL, gcLo}, // [2] YEZIDI LETTER LAM WITH DOT ABOVE..YEZIDI LETTER YOT WITH CIRCUMFLEX ABOVE + {0x10EFD, 0x10EFF, prCM, gcMn}, // [3] ARABIC SMALL LOW WORD SAKTA..ARABIC SMALL LOW WORD MADDA {0x10F00, 0x10F1C, prAL, gcLo}, // [29] OLD SOGDIAN LETTER ALEPH..OLD SOGDIAN LETTER FINAL TAW WITH VERTICAL TAIL {0x10F1D, 0x10F26, prAL, gcNo}, // [10] OLD SOGDIAN NUMBER ONE..OLD SOGDIAN FRACTION ONE HALF {0x10F27, 0x10F27, prAL, gcLo}, // OLD SOGDIAN LIGATURE AYIN-DALETH @@ -2840,6 +2846,8 @@ var lineBreakCodePoints = [][4]int{ {0x1123B, 0x1123C, prBA, gcPo}, // [2] KHOJKI SECTION MARK..KHOJKI DOUBLE SECTION MARK {0x1123D, 0x1123D, prAL, gcPo}, // KHOJKI ABBREVIATION SIGN {0x1123E, 0x1123E, prCM, gcMn}, // KHOJKI SIGN SUKUN + {0x1123F, 0x11240, prAL, gcLo}, // [2] KHOJKI LETTER QA..KHOJKI LETTER SHORT I + {0x11241, 0x11241, prCM, gcMn}, // KHOJKI VOWEL SIGN VOCALIC R {0x11280, 0x11286, prAL, gcLo}, // [7] MULTANI LETTER A..MULTANI LETTER GA {0x11288, 0x11288, prAL, gcLo}, // MULTANI LETTER GHA {0x1128A, 0x1128D, prAL, gcLo}, // [4] MULTANI LETTER CA..MULTANI LETTER JJA @@ -3013,6 +3021,7 @@ var lineBreakCodePoints = [][4]int{ {0x11AA1, 0x11AA2, prBA, gcPo}, // [2] SOYOMBO TERMINAL MARK-1..SOYOMBO TERMINAL MARK-2 {0x11AB0, 0x11ABF, prAL, gcLo}, // [16] CANADIAN SYLLABICS NATTILIK HI..CANADIAN SYLLABICS SPA {0x11AC0, 0x11AF8, prAL, gcLo}, // [57] PAU CIN HAU LETTER PA..PAU CIN HAU GLOTTAL STOP FINAL + {0x11B00, 0x11B09, prBB, gcPo}, // [10] DEVANAGARI HEAD MARK..DEVANAGARI SIGN MINDU {0x11C00, 0x11C08, prAL, gcLo}, // [9] BHAIKSUKI LETTER A..BHAIKSUKI LETTER VOCALIC L {0x11C0A, 0x11C2E, prAL, gcLo}, // [37] BHAIKSUKI LETTER E..BHAIKSUKI LETTER HA {0x11C2F, 0x11C2F, prCM, gcMc}, // BHAIKSUKI VOWEL SIGN AA @@ -3059,6 +3068,20 @@ var lineBreakCodePoints = [][4]int{ {0x11EF3, 0x11EF4, prCM, gcMn}, // [2] MAKASAR VOWEL SIGN I..MAKASAR VOWEL SIGN U {0x11EF5, 0x11EF6, prCM, gcMc}, // [2] MAKASAR VOWEL SIGN E..MAKASAR VOWEL SIGN O {0x11EF7, 0x11EF8, prAL, gcPo}, // [2] MAKASAR PASSIMBANG..MAKASAR END OF SECTION + {0x11F00, 0x11F01, prCM, gcMn}, // [2] KAWI SIGN CANDRABINDU..KAWI SIGN ANUSVARA + {0x11F02, 0x11F02, prAL, gcLo}, // KAWI SIGN REPHA + {0x11F03, 0x11F03, prCM, gcMc}, // KAWI SIGN VISARGA + {0x11F04, 0x11F10, prAL, gcLo}, // [13] KAWI LETTER A..KAWI LETTER O + {0x11F12, 0x11F33, prAL, gcLo}, // [34] KAWI LETTER KA..KAWI LETTER JNYA + {0x11F34, 0x11F35, prCM, gcMc}, // [2] KAWI VOWEL SIGN AA..KAWI VOWEL SIGN ALTERNATE AA + {0x11F36, 0x11F3A, prCM, gcMn}, // [5] KAWI VOWEL SIGN I..KAWI VOWEL SIGN VOCALIC R + {0x11F3E, 0x11F3F, prCM, gcMc}, // [2] KAWI VOWEL SIGN E..KAWI VOWEL SIGN AI + {0x11F40, 0x11F40, prCM, gcMn}, // KAWI VOWEL SIGN EU + {0x11F41, 0x11F41, prCM, gcMc}, // KAWI SIGN KILLER + {0x11F42, 0x11F42, prCM, gcMn}, // KAWI CONJOINER + {0x11F43, 0x11F44, prBA, gcPo}, // [2] KAWI DANDA..KAWI DOUBLE DANDA + {0x11F45, 0x11F4F, prID, gcPo}, // [11] KAWI PUNCTUATION SECTION MARKER..KAWI PUNCTUATION CLOSING SPIRAL + {0x11F50, 0x11F59, prNU, gcNd}, // [10] KAWI DIGIT ZERO..KAWI DIGIT NINE {0x11FB0, 0x11FB0, prAL, gcLo}, // LISU LETTER YHA {0x11FC0, 0x11FD4, prAL, gcNo}, // [21] TAMIL FRACTION ONE THREE-HUNDRED-AND-TWENTIETH..TAMIL FRACTION DOWNSCALING FACTOR KIIZH {0x11FD5, 0x11FDC, prAL, gcSo}, // [8] TAMIL SIGN NEL..TAMIL SIGN MUKKURUNI @@ -3084,10 +3107,18 @@ var lineBreakCodePoints = [][4]int{ {0x1328A, 0x13378, prAL, gcLo}, // [239] EGYPTIAN HIEROGLYPH O037..EGYPTIAN HIEROGLYPH V011 {0x13379, 0x13379, prOP, gcLo}, // EGYPTIAN HIEROGLYPH V011A {0x1337A, 0x1337B, prCL, gcLo}, // [2] EGYPTIAN HIEROGLYPH V011B..EGYPTIAN HIEROGLYPH V011C - {0x1337C, 0x1342E, prAL, gcLo}, // [179] EGYPTIAN HIEROGLYPH V012..EGYPTIAN HIEROGLYPH AA032 + {0x1337C, 0x1342F, prAL, gcLo}, // [180] EGYPTIAN HIEROGLYPH V012..EGYPTIAN HIEROGLYPH V011D {0x13430, 0x13436, prGL, gcCf}, // [7] EGYPTIAN HIEROGLYPH VERTICAL JOINER..EGYPTIAN HIEROGLYPH OVERLAY MIDDLE {0x13437, 0x13437, prOP, gcCf}, // EGYPTIAN HIEROGLYPH BEGIN SEGMENT {0x13438, 0x13438, prCL, gcCf}, // EGYPTIAN HIEROGLYPH END SEGMENT + {0x13439, 0x1343B, prGL, gcCf}, // [3] EGYPTIAN HIEROGLYPH INSERT AT MIDDLE..EGYPTIAN HIEROGLYPH INSERT AT BOTTOM + {0x1343C, 0x1343C, prOP, gcCf}, // EGYPTIAN HIEROGLYPH BEGIN ENCLOSURE + {0x1343D, 0x1343D, prCL, gcCf}, // EGYPTIAN HIEROGLYPH END ENCLOSURE + {0x1343E, 0x1343E, prOP, gcCf}, // EGYPTIAN HIEROGLYPH BEGIN WALLED ENCLOSURE + {0x1343F, 0x1343F, prCL, gcCf}, // EGYPTIAN HIEROGLYPH END WALLED ENCLOSURE + {0x13440, 0x13440, prCM, gcMn}, // EGYPTIAN HIEROGLYPH MIRROR HORIZONTALLY + {0x13441, 0x13446, prAL, gcLo}, // [6] EGYPTIAN HIEROGLYPH FULL BLANK..EGYPTIAN HIEROGLYPH WIDE LOST SIGN + {0x13447, 0x13455, prCM, gcMn}, // [15] EGYPTIAN HIEROGLYPH MODIFIER DAMAGED AT TOP START..EGYPTIAN HIEROGLYPH MODIFIER DAMAGED {0x14400, 0x145CD, prAL, gcLo}, // [462] ANATOLIAN HIEROGLYPH A001..ANATOLIAN HIEROGLYPH A409 {0x145CE, 0x145CE, prOP, gcLo}, // ANATOLIAN HIEROGLYPH A410 BEGIN LOGOGRAM MARK {0x145CF, 0x145CF, prCL, gcLo}, // ANATOLIAN HIEROGLYPH A410A END LOGOGRAM MARK @@ -3137,7 +3168,9 @@ var lineBreakCodePoints = [][4]int{ {0x1AFFD, 0x1AFFE, prAL, gcLm}, // [2] KATAKANA LETTER MINNAN NASALIZED TONE-7..KATAKANA LETTER MINNAN NASALIZED TONE-8 {0x1B000, 0x1B0FF, prID, gcLo}, // [256] KATAKANA LETTER ARCHAIC E..HENTAIGANA LETTER RE-2 {0x1B100, 0x1B122, prID, gcLo}, // [35] HENTAIGANA LETTER RE-3..KATAKANA LETTER ARCHAIC WU + {0x1B132, 0x1B132, prCJ, gcLo}, // HIRAGANA LETTER SMALL KO {0x1B150, 0x1B152, prCJ, gcLo}, // [3] HIRAGANA LETTER SMALL WI..HIRAGANA LETTER SMALL WO + {0x1B155, 0x1B155, prCJ, gcLo}, // KATAKANA LETTER SMALL KO {0x1B164, 0x1B167, prCJ, gcLo}, // [4] KATAKANA LETTER SMALL WI..KATAKANA LETTER SMALL N {0x1B170, 0x1B2FB, prID, gcLo}, // [396] NUSHU CHARACTER-1B170..NUSHU CHARACTER-1B2FB {0x1BC00, 0x1BC6A, prAL, gcLo}, // [107] DUPLOYAN LETTER H..DUPLOYAN LETTER VOCALIC M @@ -3168,6 +3201,7 @@ var lineBreakCodePoints = [][4]int{ {0x1D200, 0x1D241, prAL, gcSo}, // [66] GREEK VOCAL NOTATION SYMBOL-1..GREEK INSTRUMENTAL NOTATION SYMBOL-54 {0x1D242, 0x1D244, prCM, gcMn}, // [3] COMBINING GREEK MUSICAL TRISEME..COMBINING GREEK MUSICAL PENTASEME {0x1D245, 0x1D245, prAL, gcSo}, // GREEK MUSICAL LEIMMA + {0x1D2C0, 0x1D2D3, prAL, gcNo}, // [20] KAKTOVIK NUMERAL ZERO..KAKTOVIK NUMERAL NINETEEN {0x1D2E0, 0x1D2F3, prAL, gcNo}, // [20] MAYAN NUMERAL ZERO..MAYAN NUMERAL NINETEEN {0x1D300, 0x1D356, prAL, gcSo}, // [87] MONOGRAM FOR EARTH..TETRAGRAM FOR FOSTERING {0x1D360, 0x1D378, prAL, gcNo}, // [25] COUNTING ROD UNIT DIGIT ONE..TALLY MARK FIVE @@ -3228,11 +3262,14 @@ var lineBreakCodePoints = [][4]int{ {0x1DF00, 0x1DF09, prAL, gcLl}, // [10] LATIN SMALL LETTER FENG DIGRAPH WITH TRILL..LATIN SMALL LETTER T WITH HOOK AND RETROFLEX HOOK {0x1DF0A, 0x1DF0A, prAL, gcLo}, // LATIN LETTER RETROFLEX CLICK WITH RETROFLEX HOOK {0x1DF0B, 0x1DF1E, prAL, gcLl}, // [20] LATIN SMALL LETTER ESH WITH DOUBLE BAR..LATIN SMALL LETTER S WITH CURL + {0x1DF25, 0x1DF2A, prAL, gcLl}, // [6] LATIN SMALL LETTER D WITH MID-HEIGHT LEFT HOOK..LATIN SMALL LETTER T WITH MID-HEIGHT LEFT HOOK {0x1E000, 0x1E006, prCM, gcMn}, // [7] COMBINING GLAGOLITIC LETTER AZU..COMBINING GLAGOLITIC LETTER ZHIVETE {0x1E008, 0x1E018, prCM, gcMn}, // [17] COMBINING GLAGOLITIC LETTER ZEMLJA..COMBINING GLAGOLITIC LETTER HERU {0x1E01B, 0x1E021, prCM, gcMn}, // [7] COMBINING GLAGOLITIC LETTER SHTA..COMBINING GLAGOLITIC LETTER YATI {0x1E023, 0x1E024, prCM, gcMn}, // [2] COMBINING GLAGOLITIC LETTER YU..COMBINING GLAGOLITIC LETTER SMALL YUS {0x1E026, 0x1E02A, prCM, gcMn}, // [5] COMBINING GLAGOLITIC LETTER YO..COMBINING GLAGOLITIC LETTER FITA + {0x1E030, 0x1E06D, prAL, gcLm}, // [62] MODIFIER LETTER CYRILLIC SMALL A..MODIFIER LETTER CYRILLIC SMALL STRAIGHT U WITH STROKE + {0x1E08F, 0x1E08F, prCM, gcMn}, // COMBINING CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I {0x1E100, 0x1E12C, prAL, gcLo}, // [45] NYIAKENG PUACHUE HMONG LETTER MA..NYIAKENG PUACHUE HMONG LETTER W {0x1E130, 0x1E136, prCM, gcMn}, // [7] NYIAKENG PUACHUE HMONG TONE-B..NYIAKENG PUACHUE HMONG TONE-D {0x1E137, 0x1E13D, prAL, gcLm}, // [7] NYIAKENG PUACHUE HMONG SIGN FOR PERSON..NYIAKENG PUACHUE HMONG SYLLABLE LENGTHENER @@ -3245,6 +3282,10 @@ var lineBreakCodePoints = [][4]int{ {0x1E2EC, 0x1E2EF, prCM, gcMn}, // [4] WANCHO TONE TUP..WANCHO TONE KOINI {0x1E2F0, 0x1E2F9, prNU, gcNd}, // [10] WANCHO DIGIT ZERO..WANCHO DIGIT NINE {0x1E2FF, 0x1E2FF, prPR, gcSc}, // WANCHO NGUN SIGN + {0x1E4D0, 0x1E4EA, prAL, gcLo}, // [27] NAG MUNDARI LETTER O..NAG MUNDARI LETTER ELL + {0x1E4EB, 0x1E4EB, prAL, gcLm}, // NAG MUNDARI SIGN OJOD + {0x1E4EC, 0x1E4EF, prCM, gcMn}, // [4] NAG MUNDARI SIGN MUHOR..NAG MUNDARI SIGN SUTUH + {0x1E4F0, 0x1E4F9, prNU, gcNd}, // [10] NAG MUNDARI DIGIT ZERO..NAG MUNDARI DIGIT NINE {0x1E7E0, 0x1E7E6, prAL, gcLo}, // [7] ETHIOPIC SYLLABLE HHYA..ETHIOPIC SYLLABLE HHYO {0x1E7E8, 0x1E7EB, prAL, gcLo}, // [4] ETHIOPIC SYLLABLE GURAGE HHWA..ETHIOPIC SYLLABLE HHWE {0x1E7ED, 0x1E7EE, prAL, gcLo}, // [2] ETHIOPIC SYLLABLE GURAGE MWI..ETHIOPIC SYLLABLE GURAGE MWEE @@ -3412,16 +3453,18 @@ var lineBreakCodePoints = [][4]int{ {0x1F6C1, 0x1F6CB, prID, gcSo}, // [11] BATHTUB..COUCH AND LAMP {0x1F6CC, 0x1F6CC, prEB, gcSo}, // SLEEPING ACCOMMODATION {0x1F6CD, 0x1F6D7, prID, gcSo}, // [11] SHOPPING BAGS..ELEVATOR - {0x1F6D8, 0x1F6DC, prID, gcCn}, // [5] .. - {0x1F6DD, 0x1F6EC, prID, gcSo}, // [16] PLAYGROUND SLIDE..AIRPLANE ARRIVING + {0x1F6D8, 0x1F6DB, prID, gcCn}, // [4] .. + {0x1F6DC, 0x1F6EC, prID, gcSo}, // [17] WIRELESS..AIRPLANE ARRIVING {0x1F6ED, 0x1F6EF, prID, gcCn}, // [3] .. {0x1F6F0, 0x1F6FC, prID, gcSo}, // [13] SATELLITE..ROLLER SKATE {0x1F6FD, 0x1F6FF, prID, gcCn}, // [3] .. {0x1F700, 0x1F773, prAL, gcSo}, // [116] ALCHEMICAL SYMBOL FOR QUINTESSENCE..ALCHEMICAL SYMBOL FOR HALF OUNCE - {0x1F774, 0x1F77F, prID, gcCn}, // [12] .. + {0x1F774, 0x1F776, prID, gcSo}, // [3] LOT OF FORTUNE..LUNAR ECLIPSE + {0x1F777, 0x1F77A, prID, gcCn}, // [4] .. + {0x1F77B, 0x1F77F, prID, gcSo}, // [5] HAUMEA..ORCUS {0x1F780, 0x1F7D4, prAL, gcSo}, // [85] BLACK LEFT-POINTING ISOSCELES RIGHT TRIANGLE..HEAVY TWELVE POINTED PINWHEEL STAR - {0x1F7D5, 0x1F7D8, prID, gcSo}, // [4] CIRCLED TRIANGLE..NEGATIVE CIRCLED SQUARE - {0x1F7D9, 0x1F7DF, prID, gcCn}, // [7] .. + {0x1F7D5, 0x1F7D9, prID, gcSo}, // [5] CIRCLED TRIANGLE..NINE POINTED WHITE STAR + {0x1F7DA, 0x1F7DF, prID, gcCn}, // [6] .. {0x1F7E0, 0x1F7EB, prID, gcSo}, // [12] LARGE ORANGE CIRCLE..LARGE BROWN SQUARE {0x1F7EC, 0x1F7EF, prID, gcCn}, // [4] .. {0x1F7F0, 0x1F7F0, prID, gcSo}, // HEAVY EQUALS SIGN @@ -3467,33 +3510,29 @@ var lineBreakCodePoints = [][4]int{ {0x1FA54, 0x1FA5F, prID, gcCn}, // [12] .. {0x1FA60, 0x1FA6D, prID, gcSo}, // [14] XIANGQI RED GENERAL..XIANGQI BLACK SOLDIER {0x1FA6E, 0x1FA6F, prID, gcCn}, // [2] .. - {0x1FA70, 0x1FA74, prID, gcSo}, // [5] BALLET SHOES..THONG SANDAL - {0x1FA75, 0x1FA77, prID, gcCn}, // [3] .. - {0x1FA78, 0x1FA7C, prID, gcSo}, // [5] DROP OF BLOOD..CRUTCH + {0x1FA70, 0x1FA7C, prID, gcSo}, // [13] BALLET SHOES..CRUTCH {0x1FA7D, 0x1FA7F, prID, gcCn}, // [3] .. - {0x1FA80, 0x1FA86, prID, gcSo}, // [7] YO-YO..NESTING DOLLS - {0x1FA87, 0x1FA8F, prID, gcCn}, // [9] .. - {0x1FA90, 0x1FAAC, prID, gcSo}, // [29] RINGED PLANET..HAMSA - {0x1FAAD, 0x1FAAF, prID, gcCn}, // [3] .. - {0x1FAB0, 0x1FABA, prID, gcSo}, // [11] FLY..NEST WITH EGGS - {0x1FABB, 0x1FABF, prID, gcCn}, // [5] .. - {0x1FAC0, 0x1FAC2, prID, gcSo}, // [3] ANATOMICAL HEART..PEOPLE HUGGING + {0x1FA80, 0x1FA88, prID, gcSo}, // [9] YO-YO..FLUTE + {0x1FA89, 0x1FA8F, prID, gcCn}, // [7] .. + {0x1FA90, 0x1FABD, prID, gcSo}, // [46] RINGED PLANET..WING + {0x1FABE, 0x1FABE, prID, gcCn}, // + {0x1FABF, 0x1FAC2, prID, gcSo}, // [4] GOOSE..PEOPLE HUGGING {0x1FAC3, 0x1FAC5, prEB, gcSo}, // [3] PREGNANT MAN..PERSON WITH CROWN - {0x1FAC6, 0x1FACF, prID, gcCn}, // [10] .. - {0x1FAD0, 0x1FAD9, prID, gcSo}, // [10] BLUEBERRIES..JAR - {0x1FADA, 0x1FADF, prID, gcCn}, // [6] .. - {0x1FAE0, 0x1FAE7, prID, gcSo}, // [8] MELTING FACE..BUBBLES - {0x1FAE8, 0x1FAEF, prID, gcCn}, // [8] .. - {0x1FAF0, 0x1FAF6, prEB, gcSo}, // [7] HAND WITH INDEX FINGER AND THUMB CROSSED..HEART HANDS - {0x1FAF7, 0x1FAFF, prID, gcCn}, // [9] .. + {0x1FAC6, 0x1FACD, prID, gcCn}, // [8] .. + {0x1FACE, 0x1FADB, prID, gcSo}, // [14] MOOSE..PEA POD + {0x1FADC, 0x1FADF, prID, gcCn}, // [4] .. + {0x1FAE0, 0x1FAE8, prID, gcSo}, // [9] MELTING FACE..SHAKING FACE + {0x1FAE9, 0x1FAEF, prID, gcCn}, // [7] .. + {0x1FAF0, 0x1FAF8, prEB, gcSo}, // [9] HAND WITH INDEX FINGER AND THUMB CROSSED..RIGHTWARDS PUSHING HAND + {0x1FAF9, 0x1FAFF, prID, gcCn}, // [7] .. {0x1FB00, 0x1FB92, prAL, gcSo}, // [147] BLOCK SEXTANT-1..UPPER HALF INVERSE MEDIUM SHADE AND LOWER HALF BLOCK {0x1FB94, 0x1FBCA, prAL, gcSo}, // [55] LEFT HALF INVERSE MEDIUM SHADE AND RIGHT HALF BLOCK..WHITE UP-POINTING CHEVRON {0x1FBF0, 0x1FBF9, prNU, gcNd}, // [10] SEGMENTED DIGIT ZERO..SEGMENTED DIGIT NINE {0x1FC00, 0x1FFFD, prID, gcCn}, // [1022] .. {0x20000, 0x2A6DF, prID, gcLo}, // [42720] CJK UNIFIED IDEOGRAPH-20000..CJK UNIFIED IDEOGRAPH-2A6DF {0x2A6E0, 0x2A6FF, prID, gcCn}, // [32] .. - {0x2A700, 0x2B738, prID, gcLo}, // [4153] CJK UNIFIED IDEOGRAPH-2A700..CJK UNIFIED IDEOGRAPH-2B738 - {0x2B739, 0x2B73F, prID, gcCn}, // [7] .. + {0x2A700, 0x2B739, prID, gcLo}, // [4154] CJK UNIFIED IDEOGRAPH-2A700..CJK UNIFIED IDEOGRAPH-2B739 + {0x2B73A, 0x2B73F, prID, gcCn}, // [6] .. {0x2B740, 0x2B81D, prID, gcLo}, // [222] CJK UNIFIED IDEOGRAPH-2B740..CJK UNIFIED IDEOGRAPH-2B81D {0x2B81E, 0x2B81F, prID, gcCn}, // [2] .. {0x2B820, 0x2CEA1, prID, gcLo}, // [5762] CJK UNIFIED IDEOGRAPH-2B820..CJK UNIFIED IDEOGRAPH-2CEA1 @@ -3504,7 +3543,9 @@ var lineBreakCodePoints = [][4]int{ {0x2FA1E, 0x2FA1F, prID, gcCn}, // [2] .. {0x2FA20, 0x2FFFD, prID, gcCn}, // [1502] .. {0x30000, 0x3134A, prID, gcLo}, // [4939] CJK UNIFIED IDEOGRAPH-30000..CJK UNIFIED IDEOGRAPH-3134A - {0x3134B, 0x3FFFD, prID, gcCn}, // [60595] .. + {0x3134B, 0x3134F, prID, gcCn}, // [5] .. + {0x31350, 0x323AF, prID, gcLo}, // [4192] CJK UNIFIED IDEOGRAPH-31350..CJK UNIFIED IDEOGRAPH-323AF + {0x323B0, 0x3FFFD, prID, gcCn}, // [56398] .. {0xE0001, 0xE0001, prCM, gcCf}, // LANGUAGE TAG {0xE0020, 0xE007F, prCM, gcCf}, // [96] TAG SPACE..CANCEL TAG {0xE0100, 0xE01EF, prCM, gcMn}, // [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256 diff --git a/vendor/github.com/rivo/uniseg/linerules.go b/vendor/github.com/rivo/uniseg/linerules.go index d2ad51680..7708ae0fb 100644 --- a/vendor/github.com/rivo/uniseg/linerules.go +++ b/vendor/github.com/rivo/uniseg/linerules.go @@ -64,222 +64,381 @@ const ( LineMustBreak // You must break the line here. ) -// The line break parser's state transitions. It's anologous to grTransitions, -// see comments there for details. Unicode version 14.0.0. -var lbTransitions = map[[2]int][3]int{ +// lbTransitions implements the line break parser's state transitions. It's +// anologous to [grTransitions], see comments there for details. +// +// Unicode version 15.0.0. +func lbTransitions(state, prop int) (newState, lineBreak, rule int) { + switch uint64(state) | uint64(prop)<<32 { // LB4. - {lbAny, prBK}: {lbBK, LineCanBreak, 310}, - {lbBK, prAny}: {lbAny, LineMustBreak, 40}, + case lbBK | prAny<<32: + return lbAny, LineMustBreak, 40 // LB5. - {lbAny, prCR}: {lbCR, LineCanBreak, 310}, - {lbAny, prLF}: {lbLF, LineCanBreak, 310}, - {lbAny, prNL}: {lbNL, LineCanBreak, 310}, - {lbCR, prLF}: {lbLF, LineDontBreak, 50}, - {lbCR, prAny}: {lbAny, LineMustBreak, 50}, - {lbLF, prAny}: {lbAny, LineMustBreak, 50}, - {lbNL, prAny}: {lbAny, LineMustBreak, 50}, + case lbCR | prLF<<32: + return lbLF, LineDontBreak, 50 + case lbCR | prAny<<32: + return lbAny, LineMustBreak, 50 + case lbLF | prAny<<32: + return lbAny, LineMustBreak, 50 + case lbNL | prAny<<32: + return lbAny, LineMustBreak, 50 // LB6. - {lbAny, prBK}: {lbBK, LineDontBreak, 60}, - {lbAny, prCR}: {lbCR, LineDontBreak, 60}, - {lbAny, prLF}: {lbLF, LineDontBreak, 60}, - {lbAny, prNL}: {lbNL, LineDontBreak, 60}, + case lbAny | prBK<<32: + return lbBK, LineDontBreak, 60 + case lbAny | prCR<<32: + return lbCR, LineDontBreak, 60 + case lbAny | prLF<<32: + return lbLF, LineDontBreak, 60 + case lbAny | prNL<<32: + return lbNL, LineDontBreak, 60 // LB7. - {lbAny, prSP}: {lbSP, LineDontBreak, 70}, - {lbAny, prZW}: {lbZW, LineDontBreak, 70}, + case lbAny | prSP<<32: + return lbSP, LineDontBreak, 70 + case lbAny | prZW<<32: + return lbZW, LineDontBreak, 70 // LB8. - {lbZW, prSP}: {lbZW, LineDontBreak, 70}, - {lbZW, prAny}: {lbAny, LineCanBreak, 80}, + case lbZW | prSP<<32: + return lbZW, LineDontBreak, 70 + case lbZW | prAny<<32: + return lbAny, LineCanBreak, 80 // LB11. - {lbAny, prWJ}: {lbWJ, LineDontBreak, 110}, - {lbWJ, prAny}: {lbAny, LineDontBreak, 110}, + case lbAny | prWJ<<32: + return lbWJ, LineDontBreak, 110 + case lbWJ | prAny<<32: + return lbAny, LineDontBreak, 110 // LB12. - {lbAny, prGL}: {lbGL, LineCanBreak, 310}, - {lbGL, prAny}: {lbAny, LineDontBreak, 120}, + case lbAny | prGL<<32: + return lbGL, LineCanBreak, 310 + case lbGL | prAny<<32: + return lbAny, LineDontBreak, 120 // LB13 (simple transitions). - {lbAny, prCL}: {lbCL, LineCanBreak, 310}, - {lbAny, prCP}: {lbCP, LineCanBreak, 310}, - {lbAny, prEX}: {lbEX, LineDontBreak, 130}, - {lbAny, prIS}: {lbIS, LineCanBreak, 310}, - {lbAny, prSY}: {lbSY, LineCanBreak, 310}, + case lbAny | prCL<<32: + return lbCL, LineCanBreak, 310 + case lbAny | prCP<<32: + return lbCP, LineCanBreak, 310 + case lbAny | prEX<<32: + return lbEX, LineDontBreak, 130 + case lbAny | prIS<<32: + return lbIS, LineCanBreak, 310 + case lbAny | prSY<<32: + return lbSY, LineCanBreak, 310 // LB14. - {lbAny, prOP}: {lbOP, LineCanBreak, 310}, - {lbOP, prSP}: {lbOP, LineDontBreak, 70}, - {lbOP, prAny}: {lbAny, LineDontBreak, 140}, + case lbAny | prOP<<32: + return lbOP, LineCanBreak, 310 + case lbOP | prSP<<32: + return lbOP, LineDontBreak, 70 + case lbOP | prAny<<32: + return lbAny, LineDontBreak, 140 // LB15. - {lbQU, prSP}: {lbQUSP, LineDontBreak, 70}, - {lbQU, prOP}: {lbOP, LineDontBreak, 150}, - {lbQUSP, prOP}: {lbOP, LineDontBreak, 150}, + case lbQU | prSP<<32: + return lbQUSP, LineDontBreak, 70 + case lbQU | prOP<<32: + return lbOP, LineDontBreak, 150 + case lbQUSP | prOP<<32: + return lbOP, LineDontBreak, 150 // LB16. - {lbCL, prSP}: {lbCLCPSP, LineDontBreak, 70}, - {lbNUCL, prSP}: {lbCLCPSP, LineDontBreak, 70}, - {lbCP, prSP}: {lbCLCPSP, LineDontBreak, 70}, - {lbNUCP, prSP}: {lbCLCPSP, LineDontBreak, 70}, - {lbCL, prNS}: {lbNS, LineDontBreak, 160}, - {lbNUCL, prNS}: {lbNS, LineDontBreak, 160}, - {lbCP, prNS}: {lbNS, LineDontBreak, 160}, - {lbNUCP, prNS}: {lbNS, LineDontBreak, 160}, - {lbCLCPSP, prNS}: {lbNS, LineDontBreak, 160}, + case lbCL | prSP<<32: + return lbCLCPSP, LineDontBreak, 70 + case lbNUCL | prSP<<32: + return lbCLCPSP, LineDontBreak, 70 + case lbCP | prSP<<32: + return lbCLCPSP, LineDontBreak, 70 + case lbNUCP | prSP<<32: + return lbCLCPSP, LineDontBreak, 70 + case lbCL | prNS<<32: + return lbNS, LineDontBreak, 160 + case lbNUCL | prNS<<32: + return lbNS, LineDontBreak, 160 + case lbCP | prNS<<32: + return lbNS, LineDontBreak, 160 + case lbNUCP | prNS<<32: + return lbNS, LineDontBreak, 160 + case lbCLCPSP | prNS<<32: + return lbNS, LineDontBreak, 160 // LB17. - {lbAny, prB2}: {lbB2, LineCanBreak, 310}, - {lbB2, prSP}: {lbB2SP, LineDontBreak, 70}, - {lbB2, prB2}: {lbB2, LineDontBreak, 170}, - {lbB2SP, prB2}: {lbB2, LineDontBreak, 170}, + case lbAny | prB2<<32: + return lbB2, LineCanBreak, 310 + case lbB2 | prSP<<32: + return lbB2SP, LineDontBreak, 70 + case lbB2 | prB2<<32: + return lbB2, LineDontBreak, 170 + case lbB2SP | prB2<<32: + return lbB2, LineDontBreak, 170 // LB18. - {lbSP, prAny}: {lbAny, LineCanBreak, 180}, - {lbQUSP, prAny}: {lbAny, LineCanBreak, 180}, - {lbCLCPSP, prAny}: {lbAny, LineCanBreak, 180}, - {lbB2SP, prAny}: {lbAny, LineCanBreak, 180}, + case lbSP | prAny<<32: + return lbAny, LineCanBreak, 180 + case lbQUSP | prAny<<32: + return lbAny, LineCanBreak, 180 + case lbCLCPSP | prAny<<32: + return lbAny, LineCanBreak, 180 + case lbB2SP | prAny<<32: + return lbAny, LineCanBreak, 180 // LB19. - {lbAny, prQU}: {lbQU, LineDontBreak, 190}, - {lbQU, prAny}: {lbAny, LineDontBreak, 190}, + case lbAny | prQU<<32: + return lbQU, LineDontBreak, 190 + case lbQU | prAny<<32: + return lbAny, LineDontBreak, 190 // LB20. - {lbAny, prCB}: {lbCB, LineCanBreak, 200}, - {lbCB, prAny}: {lbAny, LineCanBreak, 200}, + case lbAny | prCB<<32: + return lbCB, LineCanBreak, 200 + case lbCB | prAny<<32: + return lbAny, LineCanBreak, 200 // LB21. - {lbAny, prBA}: {lbBA, LineDontBreak, 210}, - {lbAny, prHY}: {lbHY, LineDontBreak, 210}, - {lbAny, prNS}: {lbNS, LineDontBreak, 210}, - {lbAny, prBB}: {lbBB, LineCanBreak, 310}, - {lbBB, prAny}: {lbAny, LineDontBreak, 210}, + case lbAny | prBA<<32: + return lbBA, LineDontBreak, 210 + case lbAny | prHY<<32: + return lbHY, LineDontBreak, 210 + case lbAny | prNS<<32: + return lbNS, LineDontBreak, 210 + case lbAny | prBB<<32: + return lbBB, LineCanBreak, 310 + case lbBB | prAny<<32: + return lbAny, LineDontBreak, 210 // LB21a. - {lbAny, prHL}: {lbHL, LineCanBreak, 310}, - {lbHL, prHY}: {lbLB21a, LineDontBreak, 210}, - {lbHL, prBA}: {lbLB21a, LineDontBreak, 210}, - {lbLB21a, prAny}: {lbAny, LineDontBreak, 211}, + case lbAny | prHL<<32: + return lbHL, LineCanBreak, 310 + case lbHL | prHY<<32: + return lbLB21a, LineDontBreak, 210 + case lbHL | prBA<<32: + return lbLB21a, LineDontBreak, 210 + case lbLB21a | prAny<<32: + return lbAny, LineDontBreak, 211 // LB21b. - {lbSY, prHL}: {lbHL, LineDontBreak, 212}, - {lbNUSY, prHL}: {lbHL, LineDontBreak, 212}, + case lbSY | prHL<<32: + return lbHL, LineDontBreak, 212 + case lbNUSY | prHL<<32: + return lbHL, LineDontBreak, 212 // LB22. - {lbAny, prIN}: {lbAny, LineDontBreak, 220}, + case lbAny | prIN<<32: + return lbAny, LineDontBreak, 220 // LB23. - {lbAny, prAL}: {lbAL, LineCanBreak, 310}, - {lbAny, prNU}: {lbNU, LineCanBreak, 310}, - {lbAL, prNU}: {lbNU, LineDontBreak, 230}, - {lbHL, prNU}: {lbNU, LineDontBreak, 230}, - {lbNU, prAL}: {lbAL, LineDontBreak, 230}, - {lbNU, prHL}: {lbHL, LineDontBreak, 230}, - {lbNUNU, prAL}: {lbAL, LineDontBreak, 230}, - {lbNUNU, prHL}: {lbHL, LineDontBreak, 230}, + case lbAny | prAL<<32: + return lbAL, LineCanBreak, 310 + case lbAny | prNU<<32: + return lbNU, LineCanBreak, 310 + case lbAL | prNU<<32: + return lbNU, LineDontBreak, 230 + case lbHL | prNU<<32: + return lbNU, LineDontBreak, 230 + case lbNU | prAL<<32: + return lbAL, LineDontBreak, 230 + case lbNU | prHL<<32: + return lbHL, LineDontBreak, 230 + case lbNUNU | prAL<<32: + return lbAL, LineDontBreak, 230 + case lbNUNU | prHL<<32: + return lbHL, LineDontBreak, 230 // LB23a. - {lbAny, prPR}: {lbPR, LineCanBreak, 310}, - {lbAny, prID}: {lbIDEM, LineCanBreak, 310}, - {lbAny, prEB}: {lbEB, LineCanBreak, 310}, - {lbAny, prEM}: {lbIDEM, LineCanBreak, 310}, - {lbPR, prID}: {lbIDEM, LineDontBreak, 231}, - {lbPR, prEB}: {lbEB, LineDontBreak, 231}, - {lbPR, prEM}: {lbIDEM, LineDontBreak, 231}, - {lbIDEM, prPO}: {lbPO, LineDontBreak, 231}, - {lbEB, prPO}: {lbPO, LineDontBreak, 231}, + case lbAny | prPR<<32: + return lbPR, LineCanBreak, 310 + case lbAny | prID<<32: + return lbIDEM, LineCanBreak, 310 + case lbAny | prEB<<32: + return lbEB, LineCanBreak, 310 + case lbAny | prEM<<32: + return lbIDEM, LineCanBreak, 310 + case lbPR | prID<<32: + return lbIDEM, LineDontBreak, 231 + case lbPR | prEB<<32: + return lbEB, LineDontBreak, 231 + case lbPR | prEM<<32: + return lbIDEM, LineDontBreak, 231 + case lbIDEM | prPO<<32: + return lbPO, LineDontBreak, 231 + case lbEB | prPO<<32: + return lbPO, LineDontBreak, 231 // LB24. - {lbAny, prPO}: {lbPO, LineCanBreak, 310}, - {lbPR, prAL}: {lbAL, LineDontBreak, 240}, - {lbPR, prHL}: {lbHL, LineDontBreak, 240}, - {lbPO, prAL}: {lbAL, LineDontBreak, 240}, - {lbPO, prHL}: {lbHL, LineDontBreak, 240}, - {lbAL, prPR}: {lbPR, LineDontBreak, 240}, - {lbAL, prPO}: {lbPO, LineDontBreak, 240}, - {lbHL, prPR}: {lbPR, LineDontBreak, 240}, - {lbHL, prPO}: {lbPO, LineDontBreak, 240}, + case lbAny | prPO<<32: + return lbPO, LineCanBreak, 310 + case lbPR | prAL<<32: + return lbAL, LineDontBreak, 240 + case lbPR | prHL<<32: + return lbHL, LineDontBreak, 240 + case lbPO | prAL<<32: + return lbAL, LineDontBreak, 240 + case lbPO | prHL<<32: + return lbHL, LineDontBreak, 240 + case lbAL | prPR<<32: + return lbPR, LineDontBreak, 240 + case lbAL | prPO<<32: + return lbPO, LineDontBreak, 240 + case lbHL | prPR<<32: + return lbPR, LineDontBreak, 240 + case lbHL | prPO<<32: + return lbPO, LineDontBreak, 240 // LB25 (simple transitions). - {lbPR, prNU}: {lbNU, LineDontBreak, 250}, - {lbPO, prNU}: {lbNU, LineDontBreak, 250}, - {lbOP, prNU}: {lbNU, LineDontBreak, 250}, - {lbHY, prNU}: {lbNU, LineDontBreak, 250}, - {lbNU, prNU}: {lbNUNU, LineDontBreak, 250}, - {lbNU, prSY}: {lbNUSY, LineDontBreak, 250}, - {lbNU, prIS}: {lbNUIS, LineDontBreak, 250}, - {lbNUNU, prNU}: {lbNUNU, LineDontBreak, 250}, - {lbNUNU, prSY}: {lbNUSY, LineDontBreak, 250}, - {lbNUNU, prIS}: {lbNUIS, LineDontBreak, 250}, - {lbNUSY, prNU}: {lbNUNU, LineDontBreak, 250}, - {lbNUSY, prSY}: {lbNUSY, LineDontBreak, 250}, - {lbNUSY, prIS}: {lbNUIS, LineDontBreak, 250}, - {lbNUIS, prNU}: {lbNUNU, LineDontBreak, 250}, - {lbNUIS, prSY}: {lbNUSY, LineDontBreak, 250}, - {lbNUIS, prIS}: {lbNUIS, LineDontBreak, 250}, - {lbNU, prCL}: {lbNUCL, LineDontBreak, 250}, - {lbNU, prCP}: {lbNUCP, LineDontBreak, 250}, - {lbNUNU, prCL}: {lbNUCL, LineDontBreak, 250}, - {lbNUNU, prCP}: {lbNUCP, LineDontBreak, 250}, - {lbNUSY, prCL}: {lbNUCL, LineDontBreak, 250}, - {lbNUSY, prCP}: {lbNUCP, LineDontBreak, 250}, - {lbNUIS, prCL}: {lbNUCL, LineDontBreak, 250}, - {lbNUIS, prCP}: {lbNUCP, LineDontBreak, 250}, - {lbNU, prPO}: {lbPO, LineDontBreak, 250}, - {lbNUNU, prPO}: {lbPO, LineDontBreak, 250}, - {lbNUSY, prPO}: {lbPO, LineDontBreak, 250}, - {lbNUIS, prPO}: {lbPO, LineDontBreak, 250}, - {lbNUCL, prPO}: {lbPO, LineDontBreak, 250}, - {lbNUCP, prPO}: {lbPO, LineDontBreak, 250}, - {lbNU, prPR}: {lbPR, LineDontBreak, 250}, - {lbNUNU, prPR}: {lbPR, LineDontBreak, 250}, - {lbNUSY, prPR}: {lbPR, LineDontBreak, 250}, - {lbNUIS, prPR}: {lbPR, LineDontBreak, 250}, - {lbNUCL, prPR}: {lbPR, LineDontBreak, 250}, - {lbNUCP, prPR}: {lbPR, LineDontBreak, 250}, + case lbPR | prNU<<32: + return lbNU, LineDontBreak, 250 + case lbPO | prNU<<32: + return lbNU, LineDontBreak, 250 + case lbOP | prNU<<32: + return lbNU, LineDontBreak, 250 + case lbHY | prNU<<32: + return lbNU, LineDontBreak, 250 + case lbNU | prNU<<32: + return lbNUNU, LineDontBreak, 250 + case lbNU | prSY<<32: + return lbNUSY, LineDontBreak, 250 + case lbNU | prIS<<32: + return lbNUIS, LineDontBreak, 250 + case lbNUNU | prNU<<32: + return lbNUNU, LineDontBreak, 250 + case lbNUNU | prSY<<32: + return lbNUSY, LineDontBreak, 250 + case lbNUNU | prIS<<32: + return lbNUIS, LineDontBreak, 250 + case lbNUSY | prNU<<32: + return lbNUNU, LineDontBreak, 250 + case lbNUSY | prSY<<32: + return lbNUSY, LineDontBreak, 250 + case lbNUSY | prIS<<32: + return lbNUIS, LineDontBreak, 250 + case lbNUIS | prNU<<32: + return lbNUNU, LineDontBreak, 250 + case lbNUIS | prSY<<32: + return lbNUSY, LineDontBreak, 250 + case lbNUIS | prIS<<32: + return lbNUIS, LineDontBreak, 250 + case lbNU | prCL<<32: + return lbNUCL, LineDontBreak, 250 + case lbNU | prCP<<32: + return lbNUCP, LineDontBreak, 250 + case lbNUNU | prCL<<32: + return lbNUCL, LineDontBreak, 250 + case lbNUNU | prCP<<32: + return lbNUCP, LineDontBreak, 250 + case lbNUSY | prCL<<32: + return lbNUCL, LineDontBreak, 250 + case lbNUSY | prCP<<32: + return lbNUCP, LineDontBreak, 250 + case lbNUIS | prCL<<32: + return lbNUCL, LineDontBreak, 250 + case lbNUIS | prCP<<32: + return lbNUCP, LineDontBreak, 250 + case lbNU | prPO<<32: + return lbPO, LineDontBreak, 250 + case lbNUNU | prPO<<32: + return lbPO, LineDontBreak, 250 + case lbNUSY | prPO<<32: + return lbPO, LineDontBreak, 250 + case lbNUIS | prPO<<32: + return lbPO, LineDontBreak, 250 + case lbNUCL | prPO<<32: + return lbPO, LineDontBreak, 250 + case lbNUCP | prPO<<32: + return lbPO, LineDontBreak, 250 + case lbNU | prPR<<32: + return lbPR, LineDontBreak, 250 + case lbNUNU | prPR<<32: + return lbPR, LineDontBreak, 250 + case lbNUSY | prPR<<32: + return lbPR, LineDontBreak, 250 + case lbNUIS | prPR<<32: + return lbPR, LineDontBreak, 250 + case lbNUCL | prPR<<32: + return lbPR, LineDontBreak, 250 + case lbNUCP | prPR<<32: + return lbPR, LineDontBreak, 250 // LB26. - {lbAny, prJL}: {lbJL, LineCanBreak, 310}, - {lbAny, prJV}: {lbJV, LineCanBreak, 310}, - {lbAny, prJT}: {lbJT, LineCanBreak, 310}, - {lbAny, prH2}: {lbH2, LineCanBreak, 310}, - {lbAny, prH3}: {lbH3, LineCanBreak, 310}, - {lbJL, prJL}: {lbJL, LineDontBreak, 260}, - {lbJL, prJV}: {lbJV, LineDontBreak, 260}, - {lbJL, prH2}: {lbH2, LineDontBreak, 260}, - {lbJL, prH3}: {lbH3, LineDontBreak, 260}, - {lbJV, prJV}: {lbJV, LineDontBreak, 260}, - {lbJV, prJT}: {lbJT, LineDontBreak, 260}, - {lbH2, prJV}: {lbJV, LineDontBreak, 260}, - {lbH2, prJT}: {lbJT, LineDontBreak, 260}, - {lbJT, prJT}: {lbJT, LineDontBreak, 260}, - {lbH3, prJT}: {lbJT, LineDontBreak, 260}, + case lbAny | prJL<<32: + return lbJL, LineCanBreak, 310 + case lbAny | prJV<<32: + return lbJV, LineCanBreak, 310 + case lbAny | prJT<<32: + return lbJT, LineCanBreak, 310 + case lbAny | prH2<<32: + return lbH2, LineCanBreak, 310 + case lbAny | prH3<<32: + return lbH3, LineCanBreak, 310 + case lbJL | prJL<<32: + return lbJL, LineDontBreak, 260 + case lbJL | prJV<<32: + return lbJV, LineDontBreak, 260 + case lbJL | prH2<<32: + return lbH2, LineDontBreak, 260 + case lbJL | prH3<<32: + return lbH3, LineDontBreak, 260 + case lbJV | prJV<<32: + return lbJV, LineDontBreak, 260 + case lbJV | prJT<<32: + return lbJT, LineDontBreak, 260 + case lbH2 | prJV<<32: + return lbJV, LineDontBreak, 260 + case lbH2 | prJT<<32: + return lbJT, LineDontBreak, 260 + case lbJT | prJT<<32: + return lbJT, LineDontBreak, 260 + case lbH3 | prJT<<32: + return lbJT, LineDontBreak, 260 // LB27. - {lbJL, prPO}: {lbPO, LineDontBreak, 270}, - {lbJV, prPO}: {lbPO, LineDontBreak, 270}, - {lbJT, prPO}: {lbPO, LineDontBreak, 270}, - {lbH2, prPO}: {lbPO, LineDontBreak, 270}, - {lbH3, prPO}: {lbPO, LineDontBreak, 270}, - {lbPR, prJL}: {lbJL, LineDontBreak, 270}, - {lbPR, prJV}: {lbJV, LineDontBreak, 270}, - {lbPR, prJT}: {lbJT, LineDontBreak, 270}, - {lbPR, prH2}: {lbH2, LineDontBreak, 270}, - {lbPR, prH3}: {lbH3, LineDontBreak, 270}, + case lbJL | prPO<<32: + return lbPO, LineDontBreak, 270 + case lbJV | prPO<<32: + return lbPO, LineDontBreak, 270 + case lbJT | prPO<<32: + return lbPO, LineDontBreak, 270 + case lbH2 | prPO<<32: + return lbPO, LineDontBreak, 270 + case lbH3 | prPO<<32: + return lbPO, LineDontBreak, 270 + case lbPR | prJL<<32: + return lbJL, LineDontBreak, 270 + case lbPR | prJV<<32: + return lbJV, LineDontBreak, 270 + case lbPR | prJT<<32: + return lbJT, LineDontBreak, 270 + case lbPR | prH2<<32: + return lbH2, LineDontBreak, 270 + case lbPR | prH3<<32: + return lbH3, LineDontBreak, 270 // LB28. - {lbAL, prAL}: {lbAL, LineDontBreak, 280}, - {lbAL, prHL}: {lbHL, LineDontBreak, 280}, - {lbHL, prAL}: {lbAL, LineDontBreak, 280}, - {lbHL, prHL}: {lbHL, LineDontBreak, 280}, + case lbAL | prAL<<32: + return lbAL, LineDontBreak, 280 + case lbAL | prHL<<32: + return lbHL, LineDontBreak, 280 + case lbHL | prAL<<32: + return lbAL, LineDontBreak, 280 + case lbHL | prHL<<32: + return lbHL, LineDontBreak, 280 // LB29. - {lbIS, prAL}: {lbAL, LineDontBreak, 290}, - {lbIS, prHL}: {lbHL, LineDontBreak, 290}, - {lbNUIS, prAL}: {lbAL, LineDontBreak, 290}, - {lbNUIS, prHL}: {lbHL, LineDontBreak, 290}, + case lbIS | prAL<<32: + return lbAL, LineDontBreak, 290 + case lbIS | prHL<<32: + return lbHL, LineDontBreak, 290 + case lbNUIS | prAL<<32: + return lbAL, LineDontBreak, 290 + case lbNUIS | prHL<<32: + return lbHL, LineDontBreak, 290 + + default: + return -1, -1, -1 + } } // transitionLineBreakState determines the new state of the line break parser @@ -290,7 +449,7 @@ var lbTransitions = map[[2]int][3]int{ // further lookups. func transitionLineBreakState(state int, r rune, b []byte, str string) (newState int, lineBreak int) { // Determine the property of the next character. - nextProperty, generalCategory := propertyWithGenCat(lineBreakCodePoints, r) + nextProperty, generalCategory := propertyLineBreak(r) // Prepare. var forceNoBreak, isCPeaFWH bool @@ -306,7 +465,7 @@ func transitionLineBreakState(state int, r rune, b []byte, str string) (newState defer func() { // Transition into LB30. if newState == lbCP || newState == lbNUCP { - ea := property(eastAsianWidth, r) + ea := propertyEastAsianWidth(r) if ea != prF && ea != prW && ea != prH { newState |= lbCPeaFWHBit } @@ -352,30 +511,27 @@ func transitionLineBreakState(state int, r rune, b []byte, str string) (newState // Find the applicable transition in the table. var rule int - transition, ok := lbTransitions[[2]int{state, nextProperty}] - if ok { - // We have a specific transition. We'll use it. - newState, lineBreak, rule = transition[0], transition[1], transition[2] - } else { + newState, lineBreak, rule = lbTransitions(state, nextProperty) + if newState < 0 { // No specific transition found. Try the less specific ones. - transAnyProp, okAnyProp := lbTransitions[[2]int{state, prAny}] - transAnyState, okAnyState := lbTransitions[[2]int{lbAny, nextProperty}] - if okAnyProp && okAnyState { + anyPropProp, anyPropLineBreak, anyPropRule := lbTransitions(state, prAny) + anyStateProp, anyStateLineBreak, anyStateRule := lbTransitions(lbAny, nextProperty) + if anyPropProp >= 0 && anyStateProp >= 0 { // Both apply. We'll use a mix (see comments for grTransitions). - newState, lineBreak, rule = transAnyState[0], transAnyState[1], transAnyState[2] - if transAnyProp[2] < transAnyState[2] { - lineBreak, rule = transAnyProp[1], transAnyProp[2] + newState, lineBreak, rule = anyStateProp, anyStateLineBreak, anyStateRule + if anyPropRule < anyStateRule { + lineBreak, rule = anyPropLineBreak, anyPropRule } - } else if okAnyProp { + } else if anyPropProp >= 0 { // We only have a specific state. - newState, lineBreak, rule = transAnyProp[0], transAnyProp[1], transAnyProp[2] + newState, lineBreak, rule = anyPropProp, anyPropLineBreak, anyPropRule // This branch will probably never be reached because okAnyState will // always be true given the current transition map. But we keep it here // for future modifications to the transition map where this may not be // true anymore. - } else if okAnyState { + } else if anyStateProp >= 0 { // We only have a specific property. - newState, lineBreak, rule = transAnyState[0], transAnyState[1], transAnyState[2] + newState, lineBreak, rule = anyStateProp, anyStateLineBreak, anyStateRule } else { // No known transition. LB31: ALL ÷ ALL. newState, lineBreak, rule = lbAny, LineCanBreak, 310 @@ -414,7 +570,7 @@ func transitionLineBreakState(state int, r rune, b []byte, str string) (newState r, _ = utf8.DecodeRuneInString(str) } if r != utf8.RuneError { - pr, _ := propertyWithGenCat(lineBreakCodePoints, r) + pr, _ := propertyLineBreak(r) if pr == prNU { return lbNU, LineDontBreak } @@ -424,7 +580,7 @@ func transitionLineBreakState(state int, r rune, b []byte, str string) (newState // LB30 (part one). if rule > 300 { if (state == lbAL || state == lbHL || state == lbNU || state == lbNUNU) && nextProperty == prOP { - ea := property(eastAsianWidth, r) + ea := propertyEastAsianWidth(r) if ea != prF && ea != prW && ea != prH { return lbOP, LineDontBreak } @@ -460,7 +616,7 @@ func transitionLineBreakState(state int, r rune, b []byte, str string) (newState return prAny, LineDontBreak } } - graphemeProperty := property(graphemeCodePoints, r) + graphemeProperty := propertyGraphemes(r) if graphemeProperty == prExtendedPictographic && generalCategory == gcCn { return lbExtPicCn, LineCanBreak } diff --git a/vendor/github.com/rivo/uniseg/properties.go b/vendor/github.com/rivo/uniseg/properties.go index bc3c7bcf3..6290e6810 100644 --- a/vendor/github.com/rivo/uniseg/properties.go +++ b/vendor/github.com/rivo/uniseg/properties.go @@ -160,9 +160,49 @@ func property(dictionary [][3]int, r rune) int { return propertySearch(dictionary, r)[2] } -// propertyWithGenCat returns the Unicode property value and General Category -// (see constants above) of the given code point. -func propertyWithGenCat(dictionary [][4]int, r rune) (property, generalCategory int) { - entry := propertySearch(dictionary, r) +// propertyLineBreak returns the Unicode property value and General Category +// (see constants above) of the given code point, as listed in the line break +// code points table, while fast tracking ASCII digits and letters. +func propertyLineBreak(r rune) (property, generalCategory int) { + if r >= 'a' && r <= 'z' { + return prAL, gcLl + } + if r >= 'A' && r <= 'Z' { + return prAL, gcLu + } + if r >= '0' && r <= '9' { + return prNU, gcNd + } + entry := propertySearch(lineBreakCodePoints, r) return entry[2], entry[3] } + +// propertyGraphemes returns the Unicode grapheme cluster property value of the +// given code point while fast tracking ASCII characters. +func propertyGraphemes(r rune) int { + if r >= 0x20 && r <= 0x7e { + return prAny + } + if r == 0x0a { + return prLF + } + if r == 0x0d { + return prCR + } + if r >= 0 && r <= 0x1f || r == 0x7f { + return prControl + } + return property(graphemeCodePoints, r) +} + +// propertyEastAsianWidth returns the Unicode East Asian Width property value of +// the given code point while fast tracking ASCII characters. +func propertyEastAsianWidth(r rune) int { + if r >= 0x20 && r <= 0x7e { + return prNa + } + if r >= 0 && r <= 0x1f || r == 0x7f { + return prN + } + return property(eastAsianWidth, r) +} diff --git a/vendor/github.com/rivo/uniseg/sentenceproperties.go b/vendor/github.com/rivo/uniseg/sentenceproperties.go index ba0cf2de1..67717ec1f 100644 --- a/vendor/github.com/rivo/uniseg/sentenceproperties.go +++ b/vendor/github.com/rivo/uniseg/sentenceproperties.go @@ -1,13 +1,13 @@ -package uniseg - // Code generated via go generate from gen_properties.go. DO NOT EDIT. +package uniseg + // sentenceBreakCodePoints are taken from -// https://www.unicode.org/Public/14.0.0/ucd/auxiliary/SentenceBreakProperty.txt +// https://www.unicode.org/Public/15.0.0/ucd/auxiliary/SentenceBreakProperty.txt // and -// https://unicode.org/Public/14.0.0/ucd/emoji/emoji-data.txt +// https://unicode.org/Public/15.0.0/ucd/emoji/emoji-data.txt // ("Extended_Pictographic" only) -// on September 10, 2022. See https://www.unicode.org/license.html for the Unicode +// on September 5, 2023. See https://www.unicode.org/license.html for the Unicode // license agreement. var sentenceBreakCodePoints = [][3]int{ {0x0009, 0x0009, prSp}, // Cc @@ -843,6 +843,7 @@ var sentenceBreakCodePoints = [][3]int{ {0x0CE2, 0x0CE3, prExtend}, // Mn [2] KANNADA VOWEL SIGN VOCALIC L..KANNADA VOWEL SIGN VOCALIC LL {0x0CE6, 0x0CEF, prNumeric}, // Nd [10] KANNADA DIGIT ZERO..KANNADA DIGIT NINE {0x0CF1, 0x0CF2, prOLetter}, // Lo [2] KANNADA SIGN JIHVAMULIYA..KANNADA SIGN UPADHMANIYA + {0x0CF3, 0x0CF3, prExtend}, // Mc KANNADA SIGN COMBINING ANUSVARA ABOVE RIGHT {0x0D00, 0x0D01, prExtend}, // Mn [2] MALAYALAM SIGN COMBINING ANUSVARA ABOVE..MALAYALAM SIGN CANDRABINDU {0x0D02, 0x0D03, prExtend}, // Mc [2] MALAYALAM SIGN ANUSVARA..MALAYALAM SIGN VISARGA {0x0D04, 0x0D0C, prOLetter}, // Lo [9] MALAYALAM LETTER VEDIC ANUSVARA..MALAYALAM LETTER VOCALIC L @@ -896,7 +897,7 @@ var sentenceBreakCodePoints = [][3]int{ {0x0EBD, 0x0EBD, prOLetter}, // Lo LAO SEMIVOWEL SIGN NYO {0x0EC0, 0x0EC4, prOLetter}, // Lo [5] LAO VOWEL SIGN E..LAO VOWEL SIGN AI {0x0EC6, 0x0EC6, prOLetter}, // Lm LAO KO LA - {0x0EC8, 0x0ECD, prExtend}, // Mn [6] LAO TONE MAI EK..LAO NIGGAHITA + {0x0EC8, 0x0ECE, prExtend}, // Mn [7] LAO TONE MAI EK..LAO YAMAKKAN {0x0ED0, 0x0ED9, prNumeric}, // Nd [10] LAO DIGIT ZERO..LAO DIGIT NINE {0x0EDC, 0x0EDF, prOLetter}, // Lo [4] LAO HO NO..LAO LETTER KHMU NYO {0x0F00, 0x0F00, prOLetter}, // Lo TIBETAN SYLLABLE OM @@ -958,7 +959,7 @@ var sentenceBreakCodePoints = [][3]int{ {0x10C7, 0x10C7, prUpper}, // L& GEORGIAN CAPITAL LETTER YN {0x10CD, 0x10CD, prUpper}, // L& GEORGIAN CAPITAL LETTER AEN {0x10D0, 0x10FA, prOLetter}, // L& [43] GEORGIAN LETTER AN..GEORGIAN LETTER AIN - {0x10FC, 0x10FC, prOLetter}, // Lm MODIFIER LETTER GEORGIAN NAR + {0x10FC, 0x10FC, prLower}, // Lm MODIFIER LETTER GEORGIAN NAR {0x10FD, 0x10FF, prOLetter}, // L& [3] GEORGIAN LETTER AEN..GEORGIAN LETTER LABIAL SIGN {0x1100, 0x1248, prOLetter}, // Lo [329] HANGUL CHOSEONG KIYEOK..ETHIOPIC SYLLABLE QWA {0x124A, 0x124D, prOLetter}, // Lo [4] ETHIOPIC SYLLABLE QWI..ETHIOPIC SYLLABLE QWE @@ -2034,7 +2035,7 @@ var sentenceBreakCodePoints = [][3]int{ {0xA7D7, 0xA7D7, prLower}, // L& LATIN SMALL LETTER MIDDLE SCOTS S {0xA7D8, 0xA7D8, prUpper}, // L& LATIN CAPITAL LETTER SIGMOID S {0xA7D9, 0xA7D9, prLower}, // L& LATIN SMALL LETTER SIGMOID S - {0xA7F2, 0xA7F4, prOLetter}, // Lm [3] MODIFIER LETTER CAPITAL C..MODIFIER LETTER CAPITAL Q + {0xA7F2, 0xA7F4, prLower}, // Lm [3] MODIFIER LETTER CAPITAL C..MODIFIER LETTER CAPITAL Q {0xA7F5, 0xA7F5, prUpper}, // L& LATIN CAPITAL LETTER REVERSED HALF H {0xA7F6, 0xA7F6, prLower}, // L& LATIN SMALL LETTER REVERSED HALF H {0xA7F7, 0xA7F7, prOLetter}, // Lo LATIN EPIGRAPHIC LETTER SIDEWAYS I @@ -2140,7 +2141,7 @@ var sentenceBreakCodePoints = [][3]int{ {0xAB30, 0xAB5A, prLower}, // L& [43] LATIN SMALL LETTER BARRED ALPHA..LATIN SMALL LETTER Y WITH SHORT RIGHT LEG {0xAB5C, 0xAB5F, prLower}, // Lm [4] MODIFIER LETTER SMALL HENG..MODIFIER LETTER SMALL U WITH LEFT HOOK {0xAB60, 0xAB68, prLower}, // L& [9] LATIN SMALL LETTER SAKHA YAT..LATIN SMALL LETTER TURNED R WITH MIDDLE TILDE - {0xAB69, 0xAB69, prOLetter}, // Lm MODIFIER LETTER SMALL TURNED W + {0xAB69, 0xAB69, prLower}, // Lm MODIFIER LETTER SMALL TURNED W {0xAB70, 0xABBF, prLower}, // L& [80] CHEROKEE SMALL LETTER A..CHEROKEE SMALL LETTER YA {0xABC0, 0xABE2, prOLetter}, // Lo [35] MEETEI MAYEK LETTER KOK..MEETEI MAYEK LETTER I LONSUM {0xABE3, 0xABE4, prExtend}, // Mc [2] MEETEI MAYEK VOWEL SIGN ONAP..MEETEI MAYEK VOWEL SIGN INAP @@ -2334,6 +2335,7 @@ var sentenceBreakCodePoints = [][3]int{ {0x10E80, 0x10EA9, prOLetter}, // Lo [42] YEZIDI LETTER ELIF..YEZIDI LETTER ET {0x10EAB, 0x10EAC, prExtend}, // Mn [2] YEZIDI COMBINING HAMZA MARK..YEZIDI COMBINING MADDA MARK {0x10EB0, 0x10EB1, prOLetter}, // Lo [2] YEZIDI LETTER LAM WITH DOT ABOVE..YEZIDI LETTER YOT WITH CIRCUMFLEX ABOVE + {0x10EFD, 0x10EFF, prExtend}, // Mn [3] ARABIC SMALL LOW WORD SAKTA..ARABIC SMALL LOW WORD MADDA {0x10F00, 0x10F1C, prOLetter}, // Lo [29] OLD SOGDIAN LETTER ALEPH..OLD SOGDIAN LETTER FINAL TAW WITH VERTICAL TAIL {0x10F27, 0x10F27, prOLetter}, // Lo OLD SOGDIAN LIGATURE AYIN-DALETH {0x10F30, 0x10F45, prOLetter}, // Lo [22] SOGDIAN LETTER ALEPH..SOGDIAN INDEPENDENT SHIN @@ -2408,6 +2410,8 @@ var sentenceBreakCodePoints = [][3]int{ {0x11238, 0x11239, prSTerm}, // Po [2] KHOJKI DANDA..KHOJKI DOUBLE DANDA {0x1123B, 0x1123C, prSTerm}, // Po [2] KHOJKI SECTION MARK..KHOJKI DOUBLE SECTION MARK {0x1123E, 0x1123E, prExtend}, // Mn KHOJKI SIGN SUKUN + {0x1123F, 0x11240, prOLetter}, // Lo [2] KHOJKI LETTER QA..KHOJKI LETTER SHORT I + {0x11241, 0x11241, prExtend}, // Mn KHOJKI VOWEL SIGN VOCALIC R {0x11280, 0x11286, prOLetter}, // Lo [7] MULTANI LETTER A..MULTANI LETTER GA {0x11288, 0x11288, prOLetter}, // Lo MULTANI LETTER GHA {0x1128A, 0x1128D, prOLetter}, // Lo [4] MULTANI LETTER CA..MULTANI LETTER JJA @@ -2603,13 +2607,29 @@ var sentenceBreakCodePoints = [][3]int{ {0x11EF3, 0x11EF4, prExtend}, // Mn [2] MAKASAR VOWEL SIGN I..MAKASAR VOWEL SIGN U {0x11EF5, 0x11EF6, prExtend}, // Mc [2] MAKASAR VOWEL SIGN E..MAKASAR VOWEL SIGN O {0x11EF7, 0x11EF8, prSTerm}, // Po [2] MAKASAR PASSIMBANG..MAKASAR END OF SECTION + {0x11F00, 0x11F01, prExtend}, // Mn [2] KAWI SIGN CANDRABINDU..KAWI SIGN ANUSVARA + {0x11F02, 0x11F02, prOLetter}, // Lo KAWI SIGN REPHA + {0x11F03, 0x11F03, prExtend}, // Mc KAWI SIGN VISARGA + {0x11F04, 0x11F10, prOLetter}, // Lo [13] KAWI LETTER A..KAWI LETTER O + {0x11F12, 0x11F33, prOLetter}, // Lo [34] KAWI LETTER KA..KAWI LETTER JNYA + {0x11F34, 0x11F35, prExtend}, // Mc [2] KAWI VOWEL SIGN AA..KAWI VOWEL SIGN ALTERNATE AA + {0x11F36, 0x11F3A, prExtend}, // Mn [5] KAWI VOWEL SIGN I..KAWI VOWEL SIGN VOCALIC R + {0x11F3E, 0x11F3F, prExtend}, // Mc [2] KAWI VOWEL SIGN E..KAWI VOWEL SIGN AI + {0x11F40, 0x11F40, prExtend}, // Mn KAWI VOWEL SIGN EU + {0x11F41, 0x11F41, prExtend}, // Mc KAWI SIGN KILLER + {0x11F42, 0x11F42, prExtend}, // Mn KAWI CONJOINER + {0x11F43, 0x11F44, prSTerm}, // Po [2] KAWI DANDA..KAWI DOUBLE DANDA + {0x11F50, 0x11F59, prNumeric}, // Nd [10] KAWI DIGIT ZERO..KAWI DIGIT NINE {0x11FB0, 0x11FB0, prOLetter}, // Lo LISU LETTER YHA {0x12000, 0x12399, prOLetter}, // Lo [922] CUNEIFORM SIGN A..CUNEIFORM SIGN U U {0x12400, 0x1246E, prOLetter}, // Nl [111] CUNEIFORM NUMERIC SIGN TWO ASH..CUNEIFORM NUMERIC SIGN NINE U VARIANT FORM {0x12480, 0x12543, prOLetter}, // Lo [196] CUNEIFORM SIGN AB TIMES NUN TENU..CUNEIFORM SIGN ZU5 TIMES THREE DISH TENU {0x12F90, 0x12FF0, prOLetter}, // Lo [97] CYPRO-MINOAN SIGN CM001..CYPRO-MINOAN SIGN CM114 - {0x13000, 0x1342E, prOLetter}, // Lo [1071] EGYPTIAN HIEROGLYPH A001..EGYPTIAN HIEROGLYPH AA032 - {0x13430, 0x13438, prFormat}, // Cf [9] EGYPTIAN HIEROGLYPH VERTICAL JOINER..EGYPTIAN HIEROGLYPH END SEGMENT + {0x13000, 0x1342F, prOLetter}, // Lo [1072] EGYPTIAN HIEROGLYPH A001..EGYPTIAN HIEROGLYPH V011D + {0x13430, 0x1343F, prFormat}, // Cf [16] EGYPTIAN HIEROGLYPH VERTICAL JOINER..EGYPTIAN HIEROGLYPH END WALLED ENCLOSURE + {0x13440, 0x13440, prExtend}, // Mn EGYPTIAN HIEROGLYPH MIRROR HORIZONTALLY + {0x13441, 0x13446, prOLetter}, // Lo [6] EGYPTIAN HIEROGLYPH FULL BLANK..EGYPTIAN HIEROGLYPH WIDE LOST SIGN + {0x13447, 0x13455, prExtend}, // Mn [15] EGYPTIAN HIEROGLYPH MODIFIER DAMAGED AT TOP START..EGYPTIAN HIEROGLYPH MODIFIER DAMAGED {0x14400, 0x14646, prOLetter}, // Lo [583] ANATOLIAN HIEROGLYPH A001..ANATOLIAN HIEROGLYPH A530 {0x16800, 0x16A38, prOLetter}, // Lo [569] BAMUM LETTER PHASE-A NGKUE MFON..BAMUM LETTER PHASE-F VUEQ {0x16A40, 0x16A5E, prOLetter}, // Lo [31] MRO LETTER TA..MRO LETTER TEK @@ -2648,7 +2668,9 @@ var sentenceBreakCodePoints = [][3]int{ {0x1AFF5, 0x1AFFB, prOLetter}, // Lm [7] KATAKANA LETTER MINNAN TONE-7..KATAKANA LETTER MINNAN NASALIZED TONE-5 {0x1AFFD, 0x1AFFE, prOLetter}, // Lm [2] KATAKANA LETTER MINNAN NASALIZED TONE-7..KATAKANA LETTER MINNAN NASALIZED TONE-8 {0x1B000, 0x1B122, prOLetter}, // Lo [291] KATAKANA LETTER ARCHAIC E..KATAKANA LETTER ARCHAIC WU + {0x1B132, 0x1B132, prOLetter}, // Lo HIRAGANA LETTER SMALL KO {0x1B150, 0x1B152, prOLetter}, // Lo [3] HIRAGANA LETTER SMALL WI..HIRAGANA LETTER SMALL WO + {0x1B155, 0x1B155, prOLetter}, // Lo KATAKANA LETTER SMALL KO {0x1B164, 0x1B167, prOLetter}, // Lo [4] KATAKANA LETTER SMALL WI..KATAKANA LETTER SMALL N {0x1B170, 0x1B2FB, prOLetter}, // Lo [396] NUSHU CHARACTER-1B170..NUSHU CHARACTER-1B2FB {0x1BC00, 0x1BC6A, prOLetter}, // Lo [107] DUPLOYAN LETTER H..DUPLOYAN LETTER VOCALIC M @@ -2738,11 +2760,14 @@ var sentenceBreakCodePoints = [][3]int{ {0x1DF00, 0x1DF09, prLower}, // L& [10] LATIN SMALL LETTER FENG DIGRAPH WITH TRILL..LATIN SMALL LETTER T WITH HOOK AND RETROFLEX HOOK {0x1DF0A, 0x1DF0A, prOLetter}, // Lo LATIN LETTER RETROFLEX CLICK WITH RETROFLEX HOOK {0x1DF0B, 0x1DF1E, prLower}, // L& [20] LATIN SMALL LETTER ESH WITH DOUBLE BAR..LATIN SMALL LETTER S WITH CURL + {0x1DF25, 0x1DF2A, prLower}, // L& [6] LATIN SMALL LETTER D WITH MID-HEIGHT LEFT HOOK..LATIN SMALL LETTER T WITH MID-HEIGHT LEFT HOOK {0x1E000, 0x1E006, prExtend}, // Mn [7] COMBINING GLAGOLITIC LETTER AZU..COMBINING GLAGOLITIC LETTER ZHIVETE {0x1E008, 0x1E018, prExtend}, // Mn [17] COMBINING GLAGOLITIC LETTER ZEMLJA..COMBINING GLAGOLITIC LETTER HERU {0x1E01B, 0x1E021, prExtend}, // Mn [7] COMBINING GLAGOLITIC LETTER SHTA..COMBINING GLAGOLITIC LETTER YATI {0x1E023, 0x1E024, prExtend}, // Mn [2] COMBINING GLAGOLITIC LETTER YU..COMBINING GLAGOLITIC LETTER SMALL YUS {0x1E026, 0x1E02A, prExtend}, // Mn [5] COMBINING GLAGOLITIC LETTER YO..COMBINING GLAGOLITIC LETTER FITA + {0x1E030, 0x1E06D, prLower}, // Lm [62] MODIFIER LETTER CYRILLIC SMALL A..MODIFIER LETTER CYRILLIC SMALL STRAIGHT U WITH STROKE + {0x1E08F, 0x1E08F, prExtend}, // Mn COMBINING CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I {0x1E100, 0x1E12C, prOLetter}, // Lo [45] NYIAKENG PUACHUE HMONG LETTER MA..NYIAKENG PUACHUE HMONG LETTER W {0x1E130, 0x1E136, prExtend}, // Mn [7] NYIAKENG PUACHUE HMONG TONE-B..NYIAKENG PUACHUE HMONG TONE-D {0x1E137, 0x1E13D, prOLetter}, // Lm [7] NYIAKENG PUACHUE HMONG SIGN FOR PERSON..NYIAKENG PUACHUE HMONG SYLLABLE LENGTHENER @@ -2753,6 +2778,10 @@ var sentenceBreakCodePoints = [][3]int{ {0x1E2C0, 0x1E2EB, prOLetter}, // Lo [44] WANCHO LETTER AA..WANCHO LETTER YIH {0x1E2EC, 0x1E2EF, prExtend}, // Mn [4] WANCHO TONE TUP..WANCHO TONE KOINI {0x1E2F0, 0x1E2F9, prNumeric}, // Nd [10] WANCHO DIGIT ZERO..WANCHO DIGIT NINE + {0x1E4D0, 0x1E4EA, prOLetter}, // Lo [27] NAG MUNDARI LETTER O..NAG MUNDARI LETTER ELL + {0x1E4EB, 0x1E4EB, prOLetter}, // Lm NAG MUNDARI SIGN OJOD + {0x1E4EC, 0x1E4EF, prExtend}, // Mn [4] NAG MUNDARI SIGN MUHOR..NAG MUNDARI SIGN SUTUH + {0x1E4F0, 0x1E4F9, prNumeric}, // Nd [10] NAG MUNDARI DIGIT ZERO..NAG MUNDARI DIGIT NINE {0x1E7E0, 0x1E7E6, prOLetter}, // Lo [7] ETHIOPIC SYLLABLE HHYA..ETHIOPIC SYLLABLE HHYO {0x1E7E8, 0x1E7EB, prOLetter}, // Lo [4] ETHIOPIC SYLLABLE GURAGE HHWA..ETHIOPIC SYLLABLE HHWE {0x1E7ED, 0x1E7EE, prOLetter}, // Lo [2] ETHIOPIC SYLLABLE GURAGE MWI..ETHIOPIC SYLLABLE GURAGE MWEE @@ -2803,12 +2832,13 @@ var sentenceBreakCodePoints = [][3]int{ {0x1F676, 0x1F678, prClose}, // So [3] SANS-SERIF HEAVY DOUBLE TURNED COMMA QUOTATION MARK ORNAMENT..SANS-SERIF HEAVY LOW DOUBLE COMMA QUOTATION MARK ORNAMENT {0x1FBF0, 0x1FBF9, prNumeric}, // Nd [10] SEGMENTED DIGIT ZERO..SEGMENTED DIGIT NINE {0x20000, 0x2A6DF, prOLetter}, // Lo [42720] CJK UNIFIED IDEOGRAPH-20000..CJK UNIFIED IDEOGRAPH-2A6DF - {0x2A700, 0x2B738, prOLetter}, // Lo [4153] CJK UNIFIED IDEOGRAPH-2A700..CJK UNIFIED IDEOGRAPH-2B738 + {0x2A700, 0x2B739, prOLetter}, // Lo [4154] CJK UNIFIED IDEOGRAPH-2A700..CJK UNIFIED IDEOGRAPH-2B739 {0x2B740, 0x2B81D, prOLetter}, // Lo [222] CJK UNIFIED IDEOGRAPH-2B740..CJK UNIFIED IDEOGRAPH-2B81D {0x2B820, 0x2CEA1, prOLetter}, // Lo [5762] CJK UNIFIED IDEOGRAPH-2B820..CJK UNIFIED IDEOGRAPH-2CEA1 {0x2CEB0, 0x2EBE0, prOLetter}, // Lo [7473] CJK UNIFIED IDEOGRAPH-2CEB0..CJK UNIFIED IDEOGRAPH-2EBE0 {0x2F800, 0x2FA1D, prOLetter}, // Lo [542] CJK COMPATIBILITY IDEOGRAPH-2F800..CJK COMPATIBILITY IDEOGRAPH-2FA1D {0x30000, 0x3134A, prOLetter}, // Lo [4939] CJK UNIFIED IDEOGRAPH-30000..CJK UNIFIED IDEOGRAPH-3134A + {0x31350, 0x323AF, prOLetter}, // Lo [4192] CJK UNIFIED IDEOGRAPH-31350..CJK UNIFIED IDEOGRAPH-323AF {0xE0001, 0xE0001, prFormat}, // Cf LANGUAGE TAG {0xE0020, 0xE007F, prExtend}, // Cf [96] TAG SPACE..CANCEL TAG {0xE0100, 0xE01EF, prExtend}, // Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256 diff --git a/vendor/github.com/rivo/uniseg/sentencerules.go b/vendor/github.com/rivo/uniseg/sentencerules.go index 58c04794e..0b29c7bdb 100644 --- a/vendor/github.com/rivo/uniseg/sentencerules.go +++ b/vendor/github.com/rivo/uniseg/sentencerules.go @@ -18,104 +18,178 @@ const ( sbSB8aSp ) -// The sentence break parser's breaking instructions. -const ( - sbDontBreak = iota - sbBreak -) - -// The sentence break parser's state transitions. It's anologous to -// grTransitions, see comments there for details. Unicode version 14.0.0. -var sbTransitions = map[[2]int][3]int{ +// sbTransitions implements the sentence break parser's state transitions. It's +// anologous to [grTransitions], see comments there for details. +// +// Unicode version 15.0.0. +func sbTransitions(state, prop int) (newState int, sentenceBreak bool, rule int) { + switch uint64(state) | uint64(prop)<<32 { // SB3. - {sbAny, prCR}: {sbCR, sbDontBreak, 9990}, - {sbCR, prLF}: {sbParaSep, sbDontBreak, 30}, + case sbAny | prCR<<32: + return sbCR, false, 9990 + case sbCR | prLF<<32: + return sbParaSep, false, 30 // SB4. - {sbAny, prSep}: {sbParaSep, sbDontBreak, 9990}, - {sbAny, prLF}: {sbParaSep, sbDontBreak, 9990}, - {sbParaSep, prAny}: {sbAny, sbBreak, 40}, - {sbCR, prAny}: {sbAny, sbBreak, 40}, + case sbAny | prSep<<32: + return sbParaSep, false, 9990 + case sbAny | prLF<<32: + return sbParaSep, false, 9990 + case sbParaSep | prAny<<32: + return sbAny, true, 40 + case sbCR | prAny<<32: + return sbAny, true, 40 // SB6. - {sbAny, prATerm}: {sbATerm, sbDontBreak, 9990}, - {sbATerm, prNumeric}: {sbAny, sbDontBreak, 60}, - {sbSB7, prNumeric}: {sbAny, sbDontBreak, 60}, // Because ATerm also appears in SB7. + case sbAny | prATerm<<32: + return sbATerm, false, 9990 + case sbATerm | prNumeric<<32: + return sbAny, false, 60 + case sbSB7 | prNumeric<<32: + return sbAny, false, 60 // Because ATerm also appears in SB7. // SB7. - {sbAny, prUpper}: {sbUpper, sbDontBreak, 9990}, - {sbAny, prLower}: {sbLower, sbDontBreak, 9990}, - {sbUpper, prATerm}: {sbSB7, sbDontBreak, 70}, - {sbLower, prATerm}: {sbSB7, sbDontBreak, 70}, - {sbSB7, prUpper}: {sbUpper, sbDontBreak, 70}, + case sbAny | prUpper<<32: + return sbUpper, false, 9990 + case sbAny | prLower<<32: + return sbLower, false, 9990 + case sbUpper | prATerm<<32: + return sbSB7, false, 70 + case sbLower | prATerm<<32: + return sbSB7, false, 70 + case sbSB7 | prUpper<<32: + return sbUpper, false, 70 // SB8a. - {sbAny, prSTerm}: {sbSTerm, sbDontBreak, 9990}, - {sbATerm, prSContinue}: {sbAny, sbDontBreak, 81}, - {sbATerm, prATerm}: {sbATerm, sbDontBreak, 81}, - {sbATerm, prSTerm}: {sbSTerm, sbDontBreak, 81}, - {sbSB7, prSContinue}: {sbAny, sbDontBreak, 81}, - {sbSB7, prATerm}: {sbATerm, sbDontBreak, 81}, - {sbSB7, prSTerm}: {sbSTerm, sbDontBreak, 81}, - {sbSB8Close, prSContinue}: {sbAny, sbDontBreak, 81}, - {sbSB8Close, prATerm}: {sbATerm, sbDontBreak, 81}, - {sbSB8Close, prSTerm}: {sbSTerm, sbDontBreak, 81}, - {sbSB8Sp, prSContinue}: {sbAny, sbDontBreak, 81}, - {sbSB8Sp, prATerm}: {sbATerm, sbDontBreak, 81}, - {sbSB8Sp, prSTerm}: {sbSTerm, sbDontBreak, 81}, - {sbSTerm, prSContinue}: {sbAny, sbDontBreak, 81}, - {sbSTerm, prATerm}: {sbATerm, sbDontBreak, 81}, - {sbSTerm, prSTerm}: {sbSTerm, sbDontBreak, 81}, - {sbSB8aClose, prSContinue}: {sbAny, sbDontBreak, 81}, - {sbSB8aClose, prATerm}: {sbATerm, sbDontBreak, 81}, - {sbSB8aClose, prSTerm}: {sbSTerm, sbDontBreak, 81}, - {sbSB8aSp, prSContinue}: {sbAny, sbDontBreak, 81}, - {sbSB8aSp, prATerm}: {sbATerm, sbDontBreak, 81}, - {sbSB8aSp, prSTerm}: {sbSTerm, sbDontBreak, 81}, + case sbAny | prSTerm<<32: + return sbSTerm, false, 9990 + case sbATerm | prSContinue<<32: + return sbAny, false, 81 + case sbATerm | prATerm<<32: + return sbATerm, false, 81 + case sbATerm | prSTerm<<32: + return sbSTerm, false, 81 + case sbSB7 | prSContinue<<32: + return sbAny, false, 81 + case sbSB7 | prATerm<<32: + return sbATerm, false, 81 + case sbSB7 | prSTerm<<32: + return sbSTerm, false, 81 + case sbSB8Close | prSContinue<<32: + return sbAny, false, 81 + case sbSB8Close | prATerm<<32: + return sbATerm, false, 81 + case sbSB8Close | prSTerm<<32: + return sbSTerm, false, 81 + case sbSB8Sp | prSContinue<<32: + return sbAny, false, 81 + case sbSB8Sp | prATerm<<32: + return sbATerm, false, 81 + case sbSB8Sp | prSTerm<<32: + return sbSTerm, false, 81 + case sbSTerm | prSContinue<<32: + return sbAny, false, 81 + case sbSTerm | prATerm<<32: + return sbATerm, false, 81 + case sbSTerm | prSTerm<<32: + return sbSTerm, false, 81 + case sbSB8aClose | prSContinue<<32: + return sbAny, false, 81 + case sbSB8aClose | prATerm<<32: + return sbATerm, false, 81 + case sbSB8aClose | prSTerm<<32: + return sbSTerm, false, 81 + case sbSB8aSp | prSContinue<<32: + return sbAny, false, 81 + case sbSB8aSp | prATerm<<32: + return sbATerm, false, 81 + case sbSB8aSp | prSTerm<<32: + return sbSTerm, false, 81 // SB9. - {sbATerm, prClose}: {sbSB8Close, sbDontBreak, 90}, - {sbSB7, prClose}: {sbSB8Close, sbDontBreak, 90}, - {sbSB8Close, prClose}: {sbSB8Close, sbDontBreak, 90}, - {sbATerm, prSp}: {sbSB8Sp, sbDontBreak, 90}, - {sbSB7, prSp}: {sbSB8Sp, sbDontBreak, 90}, - {sbSB8Close, prSp}: {sbSB8Sp, sbDontBreak, 90}, - {sbSTerm, prClose}: {sbSB8aClose, sbDontBreak, 90}, - {sbSB8aClose, prClose}: {sbSB8aClose, sbDontBreak, 90}, - {sbSTerm, prSp}: {sbSB8aSp, sbDontBreak, 90}, - {sbSB8aClose, prSp}: {sbSB8aSp, sbDontBreak, 90}, - {sbATerm, prSep}: {sbParaSep, sbDontBreak, 90}, - {sbATerm, prCR}: {sbParaSep, sbDontBreak, 90}, - {sbATerm, prLF}: {sbParaSep, sbDontBreak, 90}, - {sbSB7, prSep}: {sbParaSep, sbDontBreak, 90}, - {sbSB7, prCR}: {sbParaSep, sbDontBreak, 90}, - {sbSB7, prLF}: {sbParaSep, sbDontBreak, 90}, - {sbSB8Close, prSep}: {sbParaSep, sbDontBreak, 90}, - {sbSB8Close, prCR}: {sbParaSep, sbDontBreak, 90}, - {sbSB8Close, prLF}: {sbParaSep, sbDontBreak, 90}, - {sbSTerm, prSep}: {sbParaSep, sbDontBreak, 90}, - {sbSTerm, prCR}: {sbParaSep, sbDontBreak, 90}, - {sbSTerm, prLF}: {sbParaSep, sbDontBreak, 90}, - {sbSB8aClose, prSep}: {sbParaSep, sbDontBreak, 90}, - {sbSB8aClose, prCR}: {sbParaSep, sbDontBreak, 90}, - {sbSB8aClose, prLF}: {sbParaSep, sbDontBreak, 90}, + case sbATerm | prClose<<32: + return sbSB8Close, false, 90 + case sbSB7 | prClose<<32: + return sbSB8Close, false, 90 + case sbSB8Close | prClose<<32: + return sbSB8Close, false, 90 + case sbATerm | prSp<<32: + return sbSB8Sp, false, 90 + case sbSB7 | prSp<<32: + return sbSB8Sp, false, 90 + case sbSB8Close | prSp<<32: + return sbSB8Sp, false, 90 + case sbSTerm | prClose<<32: + return sbSB8aClose, false, 90 + case sbSB8aClose | prClose<<32: + return sbSB8aClose, false, 90 + case sbSTerm | prSp<<32: + return sbSB8aSp, false, 90 + case sbSB8aClose | prSp<<32: + return sbSB8aSp, false, 90 + case sbATerm | prSep<<32: + return sbParaSep, false, 90 + case sbATerm | prCR<<32: + return sbParaSep, false, 90 + case sbATerm | prLF<<32: + return sbParaSep, false, 90 + case sbSB7 | prSep<<32: + return sbParaSep, false, 90 + case sbSB7 | prCR<<32: + return sbParaSep, false, 90 + case sbSB7 | prLF<<32: + return sbParaSep, false, 90 + case sbSB8Close | prSep<<32: + return sbParaSep, false, 90 + case sbSB8Close | prCR<<32: + return sbParaSep, false, 90 + case sbSB8Close | prLF<<32: + return sbParaSep, false, 90 + case sbSTerm | prSep<<32: + return sbParaSep, false, 90 + case sbSTerm | prCR<<32: + return sbParaSep, false, 90 + case sbSTerm | prLF<<32: + return sbParaSep, false, 90 + case sbSB8aClose | prSep<<32: + return sbParaSep, false, 90 + case sbSB8aClose | prCR<<32: + return sbParaSep, false, 90 + case sbSB8aClose | prLF<<32: + return sbParaSep, false, 90 // SB10. - {sbSB8Sp, prSp}: {sbSB8Sp, sbDontBreak, 100}, - {sbSB8aSp, prSp}: {sbSB8aSp, sbDontBreak, 100}, - {sbSB8Sp, prSep}: {sbParaSep, sbDontBreak, 100}, - {sbSB8Sp, prCR}: {sbParaSep, sbDontBreak, 100}, - {sbSB8Sp, prLF}: {sbParaSep, sbDontBreak, 100}, + case sbSB8Sp | prSp<<32: + return sbSB8Sp, false, 100 + case sbSB8aSp | prSp<<32: + return sbSB8aSp, false, 100 + case sbSB8Sp | prSep<<32: + return sbParaSep, false, 100 + case sbSB8Sp | prCR<<32: + return sbParaSep, false, 100 + case sbSB8Sp | prLF<<32: + return sbParaSep, false, 100 // SB11. - {sbATerm, prAny}: {sbAny, sbBreak, 110}, - {sbSB7, prAny}: {sbAny, sbBreak, 110}, - {sbSB8Close, prAny}: {sbAny, sbBreak, 110}, - {sbSB8Sp, prAny}: {sbAny, sbBreak, 110}, - {sbSTerm, prAny}: {sbAny, sbBreak, 110}, - {sbSB8aClose, prAny}: {sbAny, sbBreak, 110}, - {sbSB8aSp, prAny}: {sbAny, sbBreak, 110}, + case sbATerm | prAny<<32: + return sbAny, true, 110 + case sbSB7 | prAny<<32: + return sbAny, true, 110 + case sbSB8Close | prAny<<32: + return sbAny, true, 110 + case sbSB8Sp | prAny<<32: + return sbAny, true, 110 + case sbSTerm | prAny<<32: + return sbAny, true, 110 + case sbSB8aClose | prAny<<32: + return sbAny, true, 110 + case sbSB8aSp | prAny<<32: + return sbAny, true, 110 // We'll always break after ParaSep due to SB4. + + default: + return -1, false, -1 + } } // transitionSentenceBreakState determines the new state of the sentence break @@ -141,30 +215,27 @@ func transitionSentenceBreakState(state int, r rune, b []byte, str string) (newS // Find the applicable transition in the table. var rule int - transition, ok := sbTransitions[[2]int{state, nextProperty}] - if ok { - // We have a specific transition. We'll use it. - newState, sentenceBreak, rule = transition[0], transition[1] == sbBreak, transition[2] - } else { + newState, sentenceBreak, rule = sbTransitions(state, nextProperty) + if newState < 0 { // No specific transition found. Try the less specific ones. - transAnyProp, okAnyProp := sbTransitions[[2]int{state, prAny}] - transAnyState, okAnyState := sbTransitions[[2]int{sbAny, nextProperty}] - if okAnyProp && okAnyState { + anyPropState, anyPropProp, anyPropRule := sbTransitions(state, prAny) + anyStateState, anyStateProp, anyStateRule := sbTransitions(sbAny, nextProperty) + if anyPropState >= 0 && anyStateState >= 0 { // Both apply. We'll use a mix (see comments for grTransitions). - newState, sentenceBreak, rule = transAnyState[0], transAnyState[1] == sbBreak, transAnyState[2] - if transAnyProp[2] < transAnyState[2] { - sentenceBreak, rule = transAnyProp[1] == sbBreak, transAnyProp[2] + newState, sentenceBreak, rule = anyStateState, anyStateProp, anyStateRule + if anyPropRule < anyStateRule { + sentenceBreak, rule = anyPropProp, anyPropRule } - } else if okAnyProp { + } else if anyPropState >= 0 { // We only have a specific state. - newState, sentenceBreak, rule = transAnyProp[0], transAnyProp[1] == sbBreak, transAnyProp[2] + newState, sentenceBreak, rule = anyPropState, anyPropProp, anyPropRule // This branch will probably never be reached because okAnyState will // always be true given the current transition map. But we keep it here // for future modifications to the transition map where this may not be // true anymore. - } else if okAnyState { + } else if anyStateState >= 0 { // We only have a specific property. - newState, sentenceBreak, rule = transAnyState[0], transAnyState[1] == sbBreak, transAnyState[2] + newState, sentenceBreak, rule = anyStateState, anyStateProp, anyStateRule } else { // No known transition. SB999: Any × Any. newState, sentenceBreak, rule = sbAny, false, 9990 diff --git a/vendor/github.com/rivo/uniseg/step.go b/vendor/github.com/rivo/uniseg/step.go index 6eca4b5dc..9b72c5e59 100644 --- a/vendor/github.com/rivo/uniseg/step.go +++ b/vendor/github.com/rivo/uniseg/step.go @@ -100,7 +100,7 @@ func Step(b []byte, state int) (cluster, rest []byte, boundaries int, newState i if len(b) <= length { // If we're already past the end, there is nothing else to parse. var prop int if state < 0 { - prop = property(graphemeCodePoints, r) + prop = propertyGraphemes(r) } else { prop = state >> shiftPropState } @@ -150,16 +150,14 @@ func Step(b []byte, state int) (cluster, rest []byte, boundaries int, newState i return b[:length], b[length:], boundary, graphemeState | (wordState << shiftWordState) | (sentenceState << shiftSentenceState) | (lineState << shiftLineState) | (prop << shiftPropState) } - if r == vs16 { - width = 2 - } else if firstProp != prExtendedPictographic && firstProp != prRegionalIndicator && firstProp != prL { - width += runeWidth(r, prop) - } else if firstProp == prExtendedPictographic { + if firstProp == prExtendedPictographic { if r == vs15 { width = 1 - } else { + } else if r == vs16 { width = 2 } + } else if firstProp != prRegionalIndicator && firstProp != prL { + width += runeWidth(r, prop) } length += l @@ -179,7 +177,7 @@ func StepString(str string, state int) (cluster, rest string, boundaries int, ne // Extract the first rune. r, length := utf8.DecodeRuneInString(str) if len(str) <= length { // If we're already past the end, there is nothing else to parse. - prop := property(graphemeCodePoints, r) + prop := propertyGraphemes(r) return str, "", LineMustBreak | (1 << shiftWord) | (1 << shiftSentence) | (runeWidth(r, prop) << ShiftWidth), grAny | (wbAny << shiftWordState) | (sbAny << shiftSentenceState) | (lbAny << shiftLineState) } @@ -226,16 +224,14 @@ func StepString(str string, state int) (cluster, rest string, boundaries int, ne return str[:length], str[length:], boundary, graphemeState | (wordState << shiftWordState) | (sentenceState << shiftSentenceState) | (lineState << shiftLineState) | (prop << shiftPropState) } - if r == vs16 { - width = 2 - } else if firstProp != prExtendedPictographic && firstProp != prRegionalIndicator && firstProp != prL { - width += runeWidth(r, prop) - } else if firstProp == prExtendedPictographic { + if firstProp == prExtendedPictographic { if r == vs15 { width = 1 - } else { + } else if r == vs16 { width = 2 } + } else if firstProp != prRegionalIndicator && firstProp != prL { + width += runeWidth(r, prop) } length += l diff --git a/vendor/github.com/rivo/uniseg/width.go b/vendor/github.com/rivo/uniseg/width.go index 12a57cc2e..975a9f134 100644 --- a/vendor/github.com/rivo/uniseg/width.go +++ b/vendor/github.com/rivo/uniseg/width.go @@ -1,5 +1,10 @@ package uniseg +// EastAsianAmbiguousWidth specifies the monospace width for East Asian +// characters classified as Ambiguous. The default is 1 but some rare fonts +// render them with a width of 2. +var EastAsianAmbiguousWidth = 1 + // runeWidth returns the monospace width for the given rune. The provided // grapheme property is a value mapped by the [graphemeCodePoints] table. // @@ -33,9 +38,11 @@ func runeWidth(r rune, graphemeProperty int) int { return 4 } - switch property(eastAsianWidth, r) { + switch propertyEastAsianWidth(r) { case prW, prF: return 2 + case prA: + return EastAsianAmbiguousWidth } return 1 diff --git a/vendor/github.com/rivo/uniseg/wordproperties.go b/vendor/github.com/rivo/uniseg/wordproperties.go index 805cc536c..277ca1006 100644 --- a/vendor/github.com/rivo/uniseg/wordproperties.go +++ b/vendor/github.com/rivo/uniseg/wordproperties.go @@ -1,13 +1,13 @@ -package uniseg - // Code generated via go generate from gen_properties.go. DO NOT EDIT. +package uniseg + // workBreakCodePoints are taken from -// https://www.unicode.org/Public/14.0.0/ucd/auxiliary/WordBreakProperty.txt +// https://www.unicode.org/Public/15.0.0/ucd/auxiliary/WordBreakProperty.txt // and -// https://unicode.org/Public/14.0.0/ucd/emoji/emoji-data.txt +// https://unicode.org/Public/15.0.0/ucd/emoji/emoji-data.txt // ("Extended_Pictographic" only) -// on September 10, 2022. See https://www.unicode.org/license.html for the Unicode +// on September 5, 2023. See https://www.unicode.org/license.html for the Unicode // license agreement. var workBreakCodePoints = [][3]int{ {0x000A, 0x000A, prLF}, // Cc @@ -318,6 +318,7 @@ var workBreakCodePoints = [][3]int{ {0x0CE2, 0x0CE3, prExtend}, // Mn [2] KANNADA VOWEL SIGN VOCALIC L..KANNADA VOWEL SIGN VOCALIC LL {0x0CE6, 0x0CEF, prNumeric}, // Nd [10] KANNADA DIGIT ZERO..KANNADA DIGIT NINE {0x0CF1, 0x0CF2, prALetter}, // Lo [2] KANNADA SIGN JIHVAMULIYA..KANNADA SIGN UPADHMANIYA + {0x0CF3, 0x0CF3, prExtend}, // Mc KANNADA SIGN COMBINING ANUSVARA ABOVE RIGHT {0x0D00, 0x0D01, prExtend}, // Mn [2] MALAYALAM SIGN COMBINING ANUSVARA ABOVE..MALAYALAM SIGN CANDRABINDU {0x0D02, 0x0D03, prExtend}, // Mc [2] MALAYALAM SIGN ANUSVARA..MALAYALAM SIGN VISARGA {0x0D04, 0x0D0C, prALetter}, // Lo [9] MALAYALAM LETTER VEDIC ANUSVARA..MALAYALAM LETTER VOCALIC L @@ -357,7 +358,7 @@ var workBreakCodePoints = [][3]int{ {0x0E50, 0x0E59, prNumeric}, // Nd [10] THAI DIGIT ZERO..THAI DIGIT NINE {0x0EB1, 0x0EB1, prExtend}, // Mn LAO VOWEL SIGN MAI KAN {0x0EB4, 0x0EBC, prExtend}, // Mn [9] LAO VOWEL SIGN I..LAO SEMIVOWEL SIGN LO - {0x0EC8, 0x0ECD, prExtend}, // Mn [6] LAO TONE MAI EK..LAO NIGGAHITA + {0x0EC8, 0x0ECE, prExtend}, // Mn [7] LAO TONE MAI EK..LAO YAMAKKAN {0x0ED0, 0x0ED9, prNumeric}, // Nd [10] LAO DIGIT ZERO..LAO DIGIT NINE {0x0F00, 0x0F00, prALetter}, // Lo TIBETAN SYLLABLE OM {0x0F18, 0x0F19, prExtend}, // Mn [2] TIBETAN ASTROLOGICAL SIGN -KHYUD PA..TIBETAN ASTROLOGICAL SIGN SDONG TSHUGS @@ -1093,6 +1094,7 @@ var workBreakCodePoints = [][3]int{ {0x10E80, 0x10EA9, prALetter}, // Lo [42] YEZIDI LETTER ELIF..YEZIDI LETTER ET {0x10EAB, 0x10EAC, prExtend}, // Mn [2] YEZIDI COMBINING HAMZA MARK..YEZIDI COMBINING MADDA MARK {0x10EB0, 0x10EB1, prALetter}, // Lo [2] YEZIDI LETTER LAM WITH DOT ABOVE..YEZIDI LETTER YOT WITH CIRCUMFLEX ABOVE + {0x10EFD, 0x10EFF, prExtend}, // Mn [3] ARABIC SMALL LOW WORD SAKTA..ARABIC SMALL LOW WORD MADDA {0x10F00, 0x10F1C, prALetter}, // Lo [29] OLD SOGDIAN LETTER ALEPH..OLD SOGDIAN LETTER FINAL TAW WITH VERTICAL TAIL {0x10F27, 0x10F27, prALetter}, // Lo OLD SOGDIAN LIGATURE AYIN-DALETH {0x10F30, 0x10F45, prALetter}, // Lo [22] SOGDIAN LETTER ALEPH..SOGDIAN INDEPENDENT SHIN @@ -1157,6 +1159,8 @@ var workBreakCodePoints = [][3]int{ {0x11235, 0x11235, prExtend}, // Mc KHOJKI SIGN VIRAMA {0x11236, 0x11237, prExtend}, // Mn [2] KHOJKI SIGN NUKTA..KHOJKI SIGN SHADDA {0x1123E, 0x1123E, prExtend}, // Mn KHOJKI SIGN SUKUN + {0x1123F, 0x11240, prALetter}, // Lo [2] KHOJKI LETTER QA..KHOJKI LETTER SHORT I + {0x11241, 0x11241, prExtend}, // Mn KHOJKI VOWEL SIGN VOCALIC R {0x11280, 0x11286, prALetter}, // Lo [7] MULTANI LETTER A..MULTANI LETTER GA {0x11288, 0x11288, prALetter}, // Lo MULTANI LETTER GHA {0x1128A, 0x1128D, prALetter}, // Lo [4] MULTANI LETTER CA..MULTANI LETTER JJA @@ -1337,13 +1341,28 @@ var workBreakCodePoints = [][3]int{ {0x11EE0, 0x11EF2, prALetter}, // Lo [19] MAKASAR LETTER KA..MAKASAR ANGKA {0x11EF3, 0x11EF4, prExtend}, // Mn [2] MAKASAR VOWEL SIGN I..MAKASAR VOWEL SIGN U {0x11EF5, 0x11EF6, prExtend}, // Mc [2] MAKASAR VOWEL SIGN E..MAKASAR VOWEL SIGN O + {0x11F00, 0x11F01, prExtend}, // Mn [2] KAWI SIGN CANDRABINDU..KAWI SIGN ANUSVARA + {0x11F02, 0x11F02, prALetter}, // Lo KAWI SIGN REPHA + {0x11F03, 0x11F03, prExtend}, // Mc KAWI SIGN VISARGA + {0x11F04, 0x11F10, prALetter}, // Lo [13] KAWI LETTER A..KAWI LETTER O + {0x11F12, 0x11F33, prALetter}, // Lo [34] KAWI LETTER KA..KAWI LETTER JNYA + {0x11F34, 0x11F35, prExtend}, // Mc [2] KAWI VOWEL SIGN AA..KAWI VOWEL SIGN ALTERNATE AA + {0x11F36, 0x11F3A, prExtend}, // Mn [5] KAWI VOWEL SIGN I..KAWI VOWEL SIGN VOCALIC R + {0x11F3E, 0x11F3F, prExtend}, // Mc [2] KAWI VOWEL SIGN E..KAWI VOWEL SIGN AI + {0x11F40, 0x11F40, prExtend}, // Mn KAWI VOWEL SIGN EU + {0x11F41, 0x11F41, prExtend}, // Mc KAWI SIGN KILLER + {0x11F42, 0x11F42, prExtend}, // Mn KAWI CONJOINER + {0x11F50, 0x11F59, prNumeric}, // Nd [10] KAWI DIGIT ZERO..KAWI DIGIT NINE {0x11FB0, 0x11FB0, prALetter}, // Lo LISU LETTER YHA {0x12000, 0x12399, prALetter}, // Lo [922] CUNEIFORM SIGN A..CUNEIFORM SIGN U U {0x12400, 0x1246E, prALetter}, // Nl [111] CUNEIFORM NUMERIC SIGN TWO ASH..CUNEIFORM NUMERIC SIGN NINE U VARIANT FORM {0x12480, 0x12543, prALetter}, // Lo [196] CUNEIFORM SIGN AB TIMES NUN TENU..CUNEIFORM SIGN ZU5 TIMES THREE DISH TENU {0x12F90, 0x12FF0, prALetter}, // Lo [97] CYPRO-MINOAN SIGN CM001..CYPRO-MINOAN SIGN CM114 - {0x13000, 0x1342E, prALetter}, // Lo [1071] EGYPTIAN HIEROGLYPH A001..EGYPTIAN HIEROGLYPH AA032 - {0x13430, 0x13438, prFormat}, // Cf [9] EGYPTIAN HIEROGLYPH VERTICAL JOINER..EGYPTIAN HIEROGLYPH END SEGMENT + {0x13000, 0x1342F, prALetter}, // Lo [1072] EGYPTIAN HIEROGLYPH A001..EGYPTIAN HIEROGLYPH V011D + {0x13430, 0x1343F, prFormat}, // Cf [16] EGYPTIAN HIEROGLYPH VERTICAL JOINER..EGYPTIAN HIEROGLYPH END WALLED ENCLOSURE + {0x13440, 0x13440, prExtend}, // Mn EGYPTIAN HIEROGLYPH MIRROR HORIZONTALLY + {0x13441, 0x13446, prALetter}, // Lo [6] EGYPTIAN HIEROGLYPH FULL BLANK..EGYPTIAN HIEROGLYPH WIDE LOST SIGN + {0x13447, 0x13455, prExtend}, // Mn [15] EGYPTIAN HIEROGLYPH MODIFIER DAMAGED AT TOP START..EGYPTIAN HIEROGLYPH MODIFIER DAMAGED {0x14400, 0x14646, prALetter}, // Lo [583] ANATOLIAN HIEROGLYPH A001..ANATOLIAN HIEROGLYPH A530 {0x16800, 0x16A38, prALetter}, // Lo [569] BAMUM LETTER PHASE-A NGKUE MFON..BAMUM LETTER PHASE-F VUEQ {0x16A40, 0x16A5E, prALetter}, // Lo [31] MRO LETTER TA..MRO LETTER TEK @@ -1374,6 +1393,7 @@ var workBreakCodePoints = [][3]int{ {0x1AFFD, 0x1AFFE, prKatakana}, // Lm [2] KATAKANA LETTER MINNAN NASALIZED TONE-7..KATAKANA LETTER MINNAN NASALIZED TONE-8 {0x1B000, 0x1B000, prKatakana}, // Lo KATAKANA LETTER ARCHAIC E {0x1B120, 0x1B122, prKatakana}, // Lo [3] KATAKANA LETTER ARCHAIC YI..KATAKANA LETTER ARCHAIC WU + {0x1B155, 0x1B155, prKatakana}, // Lo KATAKANA LETTER SMALL KO {0x1B164, 0x1B167, prKatakana}, // Lo [4] KATAKANA LETTER SMALL WI..KATAKANA LETTER SMALL N {0x1BC00, 0x1BC6A, prALetter}, // Lo [107] DUPLOYAN LETTER H..DUPLOYAN LETTER VOCALIC M {0x1BC70, 0x1BC7C, prALetter}, // Lo [13] DUPLOYAN AFFIX LEFT HORIZONTAL SECANT..DUPLOYAN AFFIX ATTACHED TANGENT HOOK @@ -1431,11 +1451,14 @@ var workBreakCodePoints = [][3]int{ {0x1DF00, 0x1DF09, prALetter}, // L& [10] LATIN SMALL LETTER FENG DIGRAPH WITH TRILL..LATIN SMALL LETTER T WITH HOOK AND RETROFLEX HOOK {0x1DF0A, 0x1DF0A, prALetter}, // Lo LATIN LETTER RETROFLEX CLICK WITH RETROFLEX HOOK {0x1DF0B, 0x1DF1E, prALetter}, // L& [20] LATIN SMALL LETTER ESH WITH DOUBLE BAR..LATIN SMALL LETTER S WITH CURL + {0x1DF25, 0x1DF2A, prALetter}, // L& [6] LATIN SMALL LETTER D WITH MID-HEIGHT LEFT HOOK..LATIN SMALL LETTER T WITH MID-HEIGHT LEFT HOOK {0x1E000, 0x1E006, prExtend}, // Mn [7] COMBINING GLAGOLITIC LETTER AZU..COMBINING GLAGOLITIC LETTER ZHIVETE {0x1E008, 0x1E018, prExtend}, // Mn [17] COMBINING GLAGOLITIC LETTER ZEMLJA..COMBINING GLAGOLITIC LETTER HERU {0x1E01B, 0x1E021, prExtend}, // Mn [7] COMBINING GLAGOLITIC LETTER SHTA..COMBINING GLAGOLITIC LETTER YATI {0x1E023, 0x1E024, prExtend}, // Mn [2] COMBINING GLAGOLITIC LETTER YU..COMBINING GLAGOLITIC LETTER SMALL YUS {0x1E026, 0x1E02A, prExtend}, // Mn [5] COMBINING GLAGOLITIC LETTER YO..COMBINING GLAGOLITIC LETTER FITA + {0x1E030, 0x1E06D, prALetter}, // Lm [62] MODIFIER LETTER CYRILLIC SMALL A..MODIFIER LETTER CYRILLIC SMALL STRAIGHT U WITH STROKE + {0x1E08F, 0x1E08F, prExtend}, // Mn COMBINING CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I {0x1E100, 0x1E12C, prALetter}, // Lo [45] NYIAKENG PUACHUE HMONG LETTER MA..NYIAKENG PUACHUE HMONG LETTER W {0x1E130, 0x1E136, prExtend}, // Mn [7] NYIAKENG PUACHUE HMONG TONE-B..NYIAKENG PUACHUE HMONG TONE-D {0x1E137, 0x1E13D, prALetter}, // Lm [7] NYIAKENG PUACHUE HMONG SIGN FOR PERSON..NYIAKENG PUACHUE HMONG SYLLABLE LENGTHENER @@ -1446,6 +1469,10 @@ var workBreakCodePoints = [][3]int{ {0x1E2C0, 0x1E2EB, prALetter}, // Lo [44] WANCHO LETTER AA..WANCHO LETTER YIH {0x1E2EC, 0x1E2EF, prExtend}, // Mn [4] WANCHO TONE TUP..WANCHO TONE KOINI {0x1E2F0, 0x1E2F9, prNumeric}, // Nd [10] WANCHO DIGIT ZERO..WANCHO DIGIT NINE + {0x1E4D0, 0x1E4EA, prALetter}, // Lo [27] NAG MUNDARI LETTER O..NAG MUNDARI LETTER ELL + {0x1E4EB, 0x1E4EB, prALetter}, // Lm NAG MUNDARI SIGN OJOD + {0x1E4EC, 0x1E4EF, prExtend}, // Mn [4] NAG MUNDARI SIGN MUHOR..NAG MUNDARI SIGN SUTUH + {0x1E4F0, 0x1E4F9, prNumeric}, // Nd [10] NAG MUNDARI DIGIT ZERO..NAG MUNDARI DIGIT NINE {0x1E7E0, 0x1E7E6, prALetter}, // Lo [7] ETHIOPIC SYLLABLE HHYA..ETHIOPIC SYLLABLE HHYO {0x1E7E8, 0x1E7EB, prALetter}, // Lo [4] ETHIOPIC SYLLABLE GURAGE HHWA..ETHIOPIC SYLLABLE HHWE {0x1E7ED, 0x1E7EE, prALetter}, // Lo [2] ETHIOPIC SYLLABLE GURAGE MWI..ETHIOPIC SYLLABLE GURAGE MWEE @@ -1740,7 +1767,8 @@ var workBreakCodePoints = [][3]int{ {0x1F6D3, 0x1F6D4, prExtendedPictographic}, // E0.0 [2] (🛓..🛔) STUPA..PAGODA {0x1F6D5, 0x1F6D5, prExtendedPictographic}, // E12.0 [1] (🛕) hindu temple {0x1F6D6, 0x1F6D7, prExtendedPictographic}, // E13.0 [2] (🛖..🛗) hut..elevator - {0x1F6D8, 0x1F6DC, prExtendedPictographic}, // E0.0 [5] (🛘..🛜) .. + {0x1F6D8, 0x1F6DB, prExtendedPictographic}, // E0.0 [4] (🛘..🛛) .. + {0x1F6DC, 0x1F6DC, prExtendedPictographic}, // E15.0 [1] (🛜) wireless {0x1F6DD, 0x1F6DF, prExtendedPictographic}, // E14.0 [3] (🛝..🛟) playground slide..ring buoy {0x1F6E0, 0x1F6E5, prExtendedPictographic}, // E0.7 [6] (🛠️..🛥️) hammer and wrench..motor boat {0x1F6E6, 0x1F6E8, prExtendedPictographic}, // E0.0 [3] (🛦..🛨) UP-POINTING MILITARY AIRPLANE..UP-POINTING SMALL AIRPLANE @@ -1757,7 +1785,7 @@ var workBreakCodePoints = [][3]int{ {0x1F6FA, 0x1F6FA, prExtendedPictographic}, // E12.0 [1] (🛺) auto rickshaw {0x1F6FB, 0x1F6FC, prExtendedPictographic}, // E13.0 [2] (🛻..🛼) pickup truck..roller skate {0x1F6FD, 0x1F6FF, prExtendedPictographic}, // E0.0 [3] (🛽..🛿) .. - {0x1F774, 0x1F77F, prExtendedPictographic}, // E0.0 [12] (🝴..🝿) .. + {0x1F774, 0x1F77F, prExtendedPictographic}, // E0.0 [12] (🝴..🝿) LOT OF FORTUNE..ORCUS {0x1F7D5, 0x1F7DF, prExtendedPictographic}, // E0.0 [11] (🟕..🟟) CIRCLED TRIANGLE.. {0x1F7E0, 0x1F7EB, prExtendedPictographic}, // E12.0 [12] (🟠..🟫) orange circle..brown square {0x1F7EC, 0x1F7EF, prExtendedPictographic}, // E0.0 [4] (🟬..🟯) .. @@ -1816,30 +1844,37 @@ var workBreakCodePoints = [][3]int{ {0x1FA00, 0x1FA6F, prExtendedPictographic}, // E0.0 [112] (🨀..🩯) NEUTRAL CHESS KING.. {0x1FA70, 0x1FA73, prExtendedPictographic}, // E12.0 [4] (🩰..🩳) ballet shoes..shorts {0x1FA74, 0x1FA74, prExtendedPictographic}, // E13.0 [1] (🩴) thong sandal - {0x1FA75, 0x1FA77, prExtendedPictographic}, // E0.0 [3] (🩵..🩷) .. + {0x1FA75, 0x1FA77, prExtendedPictographic}, // E15.0 [3] (🩵..🩷) light blue heart..pink heart {0x1FA78, 0x1FA7A, prExtendedPictographic}, // E12.0 [3] (🩸..🩺) drop of blood..stethoscope {0x1FA7B, 0x1FA7C, prExtendedPictographic}, // E14.0 [2] (🩻..🩼) x-ray..crutch {0x1FA7D, 0x1FA7F, prExtendedPictographic}, // E0.0 [3] (🩽..🩿) .. {0x1FA80, 0x1FA82, prExtendedPictographic}, // E12.0 [3] (🪀..🪂) yo-yo..parachute {0x1FA83, 0x1FA86, prExtendedPictographic}, // E13.0 [4] (🪃..🪆) boomerang..nesting dolls - {0x1FA87, 0x1FA8F, prExtendedPictographic}, // E0.0 [9] (🪇..🪏) .. + {0x1FA87, 0x1FA88, prExtendedPictographic}, // E15.0 [2] (🪇..🪈) maracas..flute + {0x1FA89, 0x1FA8F, prExtendedPictographic}, // E0.0 [7] (🪉..🪏) .. {0x1FA90, 0x1FA95, prExtendedPictographic}, // E12.0 [6] (🪐..🪕) ringed planet..banjo {0x1FA96, 0x1FAA8, prExtendedPictographic}, // E13.0 [19] (🪖..🪨) military helmet..rock {0x1FAA9, 0x1FAAC, prExtendedPictographic}, // E14.0 [4] (🪩..🪬) mirror ball..hamsa - {0x1FAAD, 0x1FAAF, prExtendedPictographic}, // E0.0 [3] (🪭..🪯) .. + {0x1FAAD, 0x1FAAF, prExtendedPictographic}, // E15.0 [3] (🪭..🪯) folding hand fan..khanda {0x1FAB0, 0x1FAB6, prExtendedPictographic}, // E13.0 [7] (🪰..🪶) fly..feather {0x1FAB7, 0x1FABA, prExtendedPictographic}, // E14.0 [4] (🪷..🪺) lotus..nest with eggs - {0x1FABB, 0x1FABF, prExtendedPictographic}, // E0.0 [5] (🪻..🪿) .. + {0x1FABB, 0x1FABD, prExtendedPictographic}, // E15.0 [3] (🪻..🪽) hyacinth..wing + {0x1FABE, 0x1FABE, prExtendedPictographic}, // E0.0 [1] (🪾) + {0x1FABF, 0x1FABF, prExtendedPictographic}, // E15.0 [1] (🪿) goose {0x1FAC0, 0x1FAC2, prExtendedPictographic}, // E13.0 [3] (🫀..🫂) anatomical heart..people hugging {0x1FAC3, 0x1FAC5, prExtendedPictographic}, // E14.0 [3] (🫃..🫅) pregnant man..person with crown - {0x1FAC6, 0x1FACF, prExtendedPictographic}, // E0.0 [10] (🫆..🫏) .. + {0x1FAC6, 0x1FACD, prExtendedPictographic}, // E0.0 [8] (🫆..🫍) .. + {0x1FACE, 0x1FACF, prExtendedPictographic}, // E15.0 [2] (🫎..🫏) moose..donkey {0x1FAD0, 0x1FAD6, prExtendedPictographic}, // E13.0 [7] (🫐..🫖) blueberries..teapot {0x1FAD7, 0x1FAD9, prExtendedPictographic}, // E14.0 [3] (🫗..🫙) pouring liquid..jar - {0x1FADA, 0x1FADF, prExtendedPictographic}, // E0.0 [6] (🫚..🫟) .. + {0x1FADA, 0x1FADB, prExtendedPictographic}, // E15.0 [2] (🫚..🫛) ginger root..pea pod + {0x1FADC, 0x1FADF, prExtendedPictographic}, // E0.0 [4] (🫜..🫟) .. {0x1FAE0, 0x1FAE7, prExtendedPictographic}, // E14.0 [8] (🫠..🫧) melting face..bubbles - {0x1FAE8, 0x1FAEF, prExtendedPictographic}, // E0.0 [8] (🫨..🫯) .. + {0x1FAE8, 0x1FAE8, prExtendedPictographic}, // E15.0 [1] (🫨) shaking face + {0x1FAE9, 0x1FAEF, prExtendedPictographic}, // E0.0 [7] (🫩..🫯) .. {0x1FAF0, 0x1FAF6, prExtendedPictographic}, // E14.0 [7] (🫰..🫶) hand with index finger and thumb crossed..heart hands - {0x1FAF7, 0x1FAFF, prExtendedPictographic}, // E0.0 [9] (🫷..🫿) .. + {0x1FAF7, 0x1FAF8, prExtendedPictographic}, // E15.0 [2] (🫷..🫸) leftwards pushing hand..rightwards pushing hand + {0x1FAF9, 0x1FAFF, prExtendedPictographic}, // E0.0 [7] (🫹..🫿) .. {0x1FBF0, 0x1FBF9, prNumeric}, // Nd [10] SEGMENTED DIGIT ZERO..SEGMENTED DIGIT NINE {0x1FC00, 0x1FFFD, prExtendedPictographic}, // E0.0[1022] (🰀..🿽) .. {0xE0001, 0xE0001, prFormat}, // Cf LANGUAGE TAG diff --git a/vendor/github.com/rivo/uniseg/wordrules.go b/vendor/github.com/rivo/uniseg/wordrules.go index 325407e40..57a8c6831 100644 --- a/vendor/github.com/rivo/uniseg/wordrules.go +++ b/vendor/github.com/rivo/uniseg/wordrules.go @@ -22,82 +22,121 @@ const ( wbZWJBit = 16 // This bit is set for any states followed by at least one zero-width joiner (see WB4 and WB3c). ) -// The word break parser's breaking instructions. -const ( - wbDontBreak = iota - wbBreak -) - -// The word break parser's state transitions. It's anologous to grTransitions, -// see comments there for details. Unicode version 14.0.0. -var wbTransitions = map[[2]int][3]int{ +// wbTransitions implements the word break parser's state transitions. It's +// anologous to [grTransitions], see comments there for details. +// +// Unicode version 15.0.0. +func wbTransitions(state, prop int) (newState int, wordBreak bool, rule int) { + switch uint64(state) | uint64(prop)<<32 { // WB3b. - {wbAny, prNewline}: {wbNewline, wbBreak, 32}, - {wbAny, prCR}: {wbCR, wbBreak, 32}, - {wbAny, prLF}: {wbLF, wbBreak, 32}, + case wbAny | prNewline<<32: + return wbNewline, true, 32 + case wbAny | prCR<<32: + return wbCR, true, 32 + case wbAny | prLF<<32: + return wbLF, true, 32 // WB3a. - {wbNewline, prAny}: {wbAny, wbBreak, 31}, - {wbCR, prAny}: {wbAny, wbBreak, 31}, - {wbLF, prAny}: {wbAny, wbBreak, 31}, + case wbNewline | prAny<<32: + return wbAny, true, 31 + case wbCR | prAny<<32: + return wbAny, true, 31 + case wbLF | prAny<<32: + return wbAny, true, 31 // WB3. - {wbCR, prLF}: {wbLF, wbDontBreak, 30}, + case wbCR | prLF<<32: + return wbLF, false, 30 // WB3d. - {wbAny, prWSegSpace}: {wbWSegSpace, wbBreak, 9990}, - {wbWSegSpace, prWSegSpace}: {wbWSegSpace, wbDontBreak, 34}, + case wbAny | prWSegSpace<<32: + return wbWSegSpace, true, 9990 + case wbWSegSpace | prWSegSpace<<32: + return wbWSegSpace, false, 34 // WB5. - {wbAny, prALetter}: {wbALetter, wbBreak, 9990}, - {wbAny, prHebrewLetter}: {wbHebrewLetter, wbBreak, 9990}, - {wbALetter, prALetter}: {wbALetter, wbDontBreak, 50}, - {wbALetter, prHebrewLetter}: {wbHebrewLetter, wbDontBreak, 50}, - {wbHebrewLetter, prALetter}: {wbALetter, wbDontBreak, 50}, - {wbHebrewLetter, prHebrewLetter}: {wbHebrewLetter, wbDontBreak, 50}, + case wbAny | prALetter<<32: + return wbALetter, true, 9990 + case wbAny | prHebrewLetter<<32: + return wbHebrewLetter, true, 9990 + case wbALetter | prALetter<<32: + return wbALetter, false, 50 + case wbALetter | prHebrewLetter<<32: + return wbHebrewLetter, false, 50 + case wbHebrewLetter | prALetter<<32: + return wbALetter, false, 50 + case wbHebrewLetter | prHebrewLetter<<32: + return wbHebrewLetter, false, 50 // WB7. Transitions to wbWB7 handled by transitionWordBreakState(). - {wbWB7, prALetter}: {wbALetter, wbDontBreak, 70}, - {wbWB7, prHebrewLetter}: {wbHebrewLetter, wbDontBreak, 70}, + case wbWB7 | prALetter<<32: + return wbALetter, false, 70 + case wbWB7 | prHebrewLetter<<32: + return wbHebrewLetter, false, 70 // WB7a. - {wbHebrewLetter, prSingleQuote}: {wbAny, wbDontBreak, 71}, + case wbHebrewLetter | prSingleQuote<<32: + return wbAny, false, 71 // WB7c. Transitions to wbWB7c handled by transitionWordBreakState(). - {wbWB7c, prHebrewLetter}: {wbHebrewLetter, wbDontBreak, 73}, + case wbWB7c | prHebrewLetter<<32: + return wbHebrewLetter, false, 73 // WB8. - {wbAny, prNumeric}: {wbNumeric, wbBreak, 9990}, - {wbNumeric, prNumeric}: {wbNumeric, wbDontBreak, 80}, + case wbAny | prNumeric<<32: + return wbNumeric, true, 9990 + case wbNumeric | prNumeric<<32: + return wbNumeric, false, 80 // WB9. - {wbALetter, prNumeric}: {wbNumeric, wbDontBreak, 90}, - {wbHebrewLetter, prNumeric}: {wbNumeric, wbDontBreak, 90}, + case wbALetter | prNumeric<<32: + return wbNumeric, false, 90 + case wbHebrewLetter | prNumeric<<32: + return wbNumeric, false, 90 // WB10. - {wbNumeric, prALetter}: {wbALetter, wbDontBreak, 100}, - {wbNumeric, prHebrewLetter}: {wbHebrewLetter, wbDontBreak, 100}, + case wbNumeric | prALetter<<32: + return wbALetter, false, 100 + case wbNumeric | prHebrewLetter<<32: + return wbHebrewLetter, false, 100 // WB11. Transitions to wbWB11 handled by transitionWordBreakState(). - {wbWB11, prNumeric}: {wbNumeric, wbDontBreak, 110}, + case wbWB11 | prNumeric<<32: + return wbNumeric, false, 110 // WB13. - {wbAny, prKatakana}: {wbKatakana, wbBreak, 9990}, - {wbKatakana, prKatakana}: {wbKatakana, wbDontBreak, 130}, + case wbAny | prKatakana<<32: + return wbKatakana, true, 9990 + case wbKatakana | prKatakana<<32: + return wbKatakana, false, 130 // WB13a. - {wbAny, prExtendNumLet}: {wbExtendNumLet, wbBreak, 9990}, - {wbALetter, prExtendNumLet}: {wbExtendNumLet, wbDontBreak, 131}, - {wbHebrewLetter, prExtendNumLet}: {wbExtendNumLet, wbDontBreak, 131}, - {wbNumeric, prExtendNumLet}: {wbExtendNumLet, wbDontBreak, 131}, - {wbKatakana, prExtendNumLet}: {wbExtendNumLet, wbDontBreak, 131}, - {wbExtendNumLet, prExtendNumLet}: {wbExtendNumLet, wbDontBreak, 131}, + case wbAny | prExtendNumLet<<32: + return wbExtendNumLet, true, 9990 + case wbALetter | prExtendNumLet<<32: + return wbExtendNumLet, false, 131 + case wbHebrewLetter | prExtendNumLet<<32: + return wbExtendNumLet, false, 131 + case wbNumeric | prExtendNumLet<<32: + return wbExtendNumLet, false, 131 + case wbKatakana | prExtendNumLet<<32: + return wbExtendNumLet, false, 131 + case wbExtendNumLet | prExtendNumLet<<32: + return wbExtendNumLet, false, 131 // WB13b. - {wbExtendNumLet, prALetter}: {wbALetter, wbDontBreak, 132}, - {wbExtendNumLet, prHebrewLetter}: {wbHebrewLetter, wbDontBreak, 132}, - {wbExtendNumLet, prNumeric}: {wbNumeric, wbDontBreak, 132}, - {wbExtendNumLet, prKatakana}: {prKatakana, wbDontBreak, 132}, + case wbExtendNumLet | prALetter<<32: + return wbALetter, false, 132 + case wbExtendNumLet | prHebrewLetter<<32: + return wbHebrewLetter, false, 132 + case wbExtendNumLet | prNumeric<<32: + return wbNumeric, false, 132 + case wbExtendNumLet | prKatakana<<32: + return wbKatakana, false, 132 + + default: + return -1, false, -1 + } } // transitionWordBreakState determines the new state of the word break parser @@ -141,30 +180,27 @@ func transitionWordBreakState(state int, r rune, b []byte, str string) (newState // Find the applicable transition in the table. var rule int - transition, ok := wbTransitions[[2]int{state, nextProperty}] - if ok { - // We have a specific transition. We'll use it. - newState, wordBreak, rule = transition[0], transition[1] == wbBreak, transition[2] - } else { + newState, wordBreak, rule = wbTransitions(state, nextProperty) + if newState < 0 { // No specific transition found. Try the less specific ones. - transAnyProp, okAnyProp := wbTransitions[[2]int{state, prAny}] - transAnyState, okAnyState := wbTransitions[[2]int{wbAny, nextProperty}] - if okAnyProp && okAnyState { + anyPropState, anyPropWordBreak, anyPropRule := wbTransitions(state, prAny) + anyStateState, anyStateWordBreak, anyStateRule := wbTransitions(wbAny, nextProperty) + if anyPropState >= 0 && anyStateState >= 0 { // Both apply. We'll use a mix (see comments for grTransitions). - newState, wordBreak, rule = transAnyState[0], transAnyState[1] == wbBreak, transAnyState[2] - if transAnyProp[2] < transAnyState[2] { - wordBreak, rule = transAnyProp[1] == wbBreak, transAnyProp[2] + newState, wordBreak, rule = anyStateState, anyStateWordBreak, anyStateRule + if anyPropRule < anyStateRule { + wordBreak, rule = anyPropWordBreak, anyPropRule } - } else if okAnyProp { + } else if anyPropState >= 0 { // We only have a specific state. - newState, wordBreak, rule = transAnyProp[0], transAnyProp[1] == wbBreak, transAnyProp[2] + newState, wordBreak, rule = anyPropState, anyPropWordBreak, anyPropRule // This branch will probably never be reached because okAnyState will // always be true given the current transition map. But we keep it here // for future modifications to the transition map where this may not be // true anymore. - } else if okAnyState { + } else if anyStateState >= 0 { // We only have a specific property. - newState, wordBreak, rule = transAnyState[0], transAnyState[1] == wbBreak, transAnyState[2] + newState, wordBreak, rule = anyStateState, anyStateWordBreak, anyStateRule } else { // No known transition. WB999: Any ÷ Any. newState, wordBreak, rule = wbAny, true, 9990 diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/ed25519ph.go b/vendor/github.com/sigstore/sigstore/pkg/signature/ed25519ph.go new file mode 100644 index 000000000..d1660796a --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/ed25519ph.go @@ -0,0 +1,211 @@ +// +// Copyright 2024 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package signature + +import ( + "crypto" + "crypto/ed25519" + "crypto/rand" + "errors" + "fmt" + "io" + + "github.com/sigstore/sigstore/pkg/signature/options" +) + +var ed25519phSupportedHashFuncs = []crypto.Hash{ + crypto.SHA512, +} + +// ED25519phSigner is a signature.Signer that uses the Ed25519 public-key signature system with pre-hashing +type ED25519phSigner struct { + priv ed25519.PrivateKey +} + +// LoadED25519phSigner calculates signatures using the specified private key. +func LoadED25519phSigner(priv ed25519.PrivateKey) (*ED25519phSigner, error) { + if priv == nil { + return nil, errors.New("invalid ED25519 private key specified") + } + + return &ED25519phSigner{ + priv: priv, + }, nil +} + +// ToED25519SignerVerifier creates a ED25519SignerVerifier from a ED25519phSignerVerifier +// +// Clients that use ED25519phSignerVerifier should use this method to get a +// SignerVerifier that uses the same ED25519 private key, but with the Pure +// Ed25519 algorithm. This might be necessary to interact with Fulcio, which +// only supports the Pure Ed25519 algorithm. +func (e ED25519phSignerVerifier) ToED25519SignerVerifier() (*ED25519SignerVerifier, error) { + return LoadED25519SignerVerifier(e.priv) +} + +// SignMessage signs the provided message. If the message is provided, +// this method will compute the digest according to the hash function specified +// when the ED25519phSigner was created. +// +// This function recognizes the following Options listed in order of preference: +// +// - WithDigest() +// +// All other options are ignored if specified. +func (e ED25519phSigner) SignMessage(message io.Reader, opts ...SignOption) ([]byte, error) { + digest, _, err := ComputeDigestForSigning(message, crypto.SHA512, ed25519phSupportedHashFuncs, opts...) + if err != nil { + return nil, err + } + + return e.priv.Sign(nil, digest, crypto.SHA512) +} + +// Public returns the public key that can be used to verify signatures created by +// this signer. +func (e ED25519phSigner) Public() crypto.PublicKey { + if e.priv == nil { + return nil + } + + return e.priv.Public() +} + +// PublicKey returns the public key that can be used to verify signatures created by +// this signer. As this value is held in memory, all options provided in arguments +// to this method are ignored. +func (e ED25519phSigner) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) { + return e.Public(), nil +} + +// Sign computes the signature for the specified message; the first and third arguments to this +// function are ignored as they are not used by the ED25519ph algorithm. +func (e ED25519phSigner) Sign(_ io.Reader, digest []byte, _ crypto.SignerOpts) ([]byte, error) { + return e.SignMessage(nil, options.WithDigest(digest)) +} + +// ED25519phVerifier is a signature.Verifier that uses the Ed25519 public-key signature system +type ED25519phVerifier struct { + publicKey ed25519.PublicKey +} + +// LoadED25519phVerifier returns a Verifier that verifies signatures using the +// specified ED25519 public key. +func LoadED25519phVerifier(pub ed25519.PublicKey) (*ED25519phVerifier, error) { + if pub == nil { + return nil, errors.New("invalid ED25519 public key specified") + } + + return &ED25519phVerifier{ + publicKey: pub, + }, nil +} + +// PublicKey returns the public key that is used to verify signatures by +// this verifier. As this value is held in memory, all options provided in arguments +// to this method are ignored. +func (e *ED25519phVerifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) { + return e.publicKey, nil +} + +// VerifySignature verifies the signature for the given message. Unless provided +// in an option, the digest of the message will be computed using the hash function specified +// when the ED25519phVerifier was created. +// +// This function returns nil if the verification succeeded, and an error message otherwise. +// +// This function recognizes the following Options listed in order of preference: +// +// - WithDigest() +// +// All other options are ignored if specified. +func (e *ED25519phVerifier) VerifySignature(signature, message io.Reader, opts ...VerifyOption) error { + if signature == nil { + return errors.New("nil signature passed to VerifySignature") + } + + digest, _, err := ComputeDigestForVerifying(message, crypto.SHA512, ed25519phSupportedHashFuncs, opts...) + if err != nil { + return err + } + + sigBytes, err := io.ReadAll(signature) + if err != nil { + return fmt.Errorf("reading signature: %w", err) + } + + if err := ed25519.VerifyWithOptions(e.publicKey, digest, sigBytes, &ed25519.Options{Hash: crypto.SHA512}); err != nil { + return fmt.Errorf("failed to verify signature: %w", err) + } + return nil +} + +// ED25519phSignerVerifier is a signature.SignerVerifier that uses the Ed25519 public-key signature system +type ED25519phSignerVerifier struct { + *ED25519phSigner + *ED25519phVerifier +} + +// LoadED25519phSignerVerifier creates a combined signer and verifier. This is +// a convenience object that simply wraps an instance of ED25519phSigner and ED25519phVerifier. +func LoadED25519phSignerVerifier(priv ed25519.PrivateKey) (*ED25519phSignerVerifier, error) { + signer, err := LoadED25519phSigner(priv) + if err != nil { + return nil, fmt.Errorf("initializing signer: %w", err) + } + pub, ok := priv.Public().(ed25519.PublicKey) + if !ok { + return nil, fmt.Errorf("given key is not ed25519.PublicKey") + } + verifier, err := LoadED25519phVerifier(pub) + if err != nil { + return nil, fmt.Errorf("initializing verifier: %w", err) + } + + return &ED25519phSignerVerifier{ + ED25519phSigner: signer, + ED25519phVerifier: verifier, + }, nil +} + +// NewDefaultED25519phSignerVerifier creates a combined signer and verifier using ED25519. +// This creates a new ED25519 key using crypto/rand as an entropy source. +func NewDefaultED25519phSignerVerifier() (*ED25519phSignerVerifier, ed25519.PrivateKey, error) { + return NewED25519phSignerVerifier(rand.Reader) +} + +// NewED25519phSignerVerifier creates a combined signer and verifier using ED25519. +// This creates a new ED25519 key using the specified entropy source. +func NewED25519phSignerVerifier(rand io.Reader) (*ED25519phSignerVerifier, ed25519.PrivateKey, error) { + _, priv, err := ed25519.GenerateKey(rand) + if err != nil { + return nil, nil, err + } + + sv, err := LoadED25519phSignerVerifier(priv) + if err != nil { + return nil, nil, err + } + + return sv, priv, nil +} + +// PublicKey returns the public key that is used to verify signatures by +// this verifier. As this value is held in memory, all options provided in arguments +// to this method are ignored. +func (e ED25519phSignerVerifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) { + return e.publicKey, nil +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/options.go b/vendor/github.com/sigstore/sigstore/pkg/signature/options.go index 0be699f7e..e17e768c2 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/options.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/options.go @@ -18,6 +18,7 @@ package signature import ( "context" "crypto" + "crypto/rsa" "io" "github.com/sigstore/sigstore/pkg/signature/options" @@ -55,3 +56,10 @@ type VerifyOption interface { RPCOption MessageOption } + +// LoadOption specifies options to be used when creating a Signer/Verifier +type LoadOption interface { + ApplyHash(*crypto.Hash) + ApplyED25519ph(*bool) + ApplyRSAPSS(**rsa.PSSOptions) +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/options/loadoptions.go b/vendor/github.com/sigstore/sigstore/pkg/signature/options/loadoptions.go new file mode 100644 index 000000000..e5f3f0116 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/options/loadoptions.go @@ -0,0 +1,76 @@ +// +// Copyright 2024 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package options + +import ( + "crypto" + "crypto/rsa" +) + +// RequestHash implements the functional option pattern for setting a Hash +// function when loading a signer or verifier +type RequestHash struct { + NoOpOptionImpl + hashFunc crypto.Hash +} + +// ApplyHash sets the hash as requested by the functional option +func (r RequestHash) ApplyHash(hash *crypto.Hash) { + *hash = r.hashFunc +} + +// WithHash specifies that the given hash function should be used when loading a signer or verifier +func WithHash(hash crypto.Hash) RequestHash { + return RequestHash{hashFunc: hash} +} + +// RequestED25519ph implements the functional option pattern for specifying +// ED25519ph (pre-hashed) should be used when loading a signer or verifier and a +// ED25519 key is +type RequestED25519ph struct { + NoOpOptionImpl + useED25519ph bool +} + +// ApplyED25519ph sets the ED25519ph flag as requested by the functional option +func (r RequestED25519ph) ApplyED25519ph(useED25519ph *bool) { + *useED25519ph = r.useED25519ph +} + +// WithED25519ph specifies that the ED25519ph algorithm should be used when a ED25519 key is used +func WithED25519ph() RequestED25519ph { + return RequestED25519ph{useED25519ph: true} +} + +// RequestPSSOptions implements the functional option pattern for specifying RSA +// PSS should be used when loading a signer or verifier and a RSA key is +// detected +type RequestPSSOptions struct { + NoOpOptionImpl + opts *rsa.PSSOptions +} + +// ApplyRSAPSS sets the RSAPSS options as requested by the functional option +func (r RequestPSSOptions) ApplyRSAPSS(opts **rsa.PSSOptions) { + *opts = r.opts +} + +// WithRSAPSS specifies that the RSAPSS algorithm should be used when a RSA key is used +// Note that the RSA PSSOptions contains an hash algorithm, which will override +// the hash function specified with WithHash. +func WithRSAPSS(opts *rsa.PSSOptions) RequestPSSOptions { + return RequestPSSOptions{opts: opts} +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/options/noop.go b/vendor/github.com/sigstore/sigstore/pkg/signature/options/noop.go index c7f1ccb91..0c0e51856 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/options/noop.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/options/noop.go @@ -18,6 +18,7 @@ package options import ( "context" "crypto" + "crypto/rsa" "io" ) @@ -47,3 +48,12 @@ func (NoOpOptionImpl) ApplyKeyVersion(_ *string) {} // ApplyKeyVersionUsed is a no-op required to fully implement the requisite interfaces func (NoOpOptionImpl) ApplyKeyVersionUsed(_ **string) {} + +// ApplyHash is a no-op required to fully implement the requisite interfaces +func (NoOpOptionImpl) ApplyHash(_ *crypto.Hash) {} + +// ApplyED25519ph is a no-op required to fully implement the requisite interfaces +func (NoOpOptionImpl) ApplyED25519ph(_ *bool) {} + +// ApplyRSAPSS is a no-op required to fully implement the requisite interfaces +func (NoOpOptionImpl) ApplyRSAPSS(_ **rsa.PSSOptions) {} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go b/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go index 3bd3823cb..e26def9fa 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go @@ -30,6 +30,7 @@ import ( _ "crypto/sha512" "github.com/sigstore/sigstore/pkg/cryptoutils" + "github.com/sigstore/sigstore/pkg/signature/options" // these ensure we have the implementations loaded _ "golang.org/x/crypto/sha3" @@ -59,12 +60,33 @@ func (s SignerOpts) HashFunc() crypto.Hash { // If privateKey is an RSA key, a RSAPKCS1v15Signer will be returned. If a // RSAPSSSigner is desired instead, use the LoadRSAPSSSigner() method directly. func LoadSigner(privateKey crypto.PrivateKey, hashFunc crypto.Hash) (Signer, error) { + return LoadSignerWithOpts(privateKey, options.WithHash(hashFunc)) +} + +// LoadSignerWithOpts returns a signature.Signer based on the algorithm of the private key +// provided. +func LoadSignerWithOpts(privateKey crypto.PrivateKey, opts ...LoadOption) (Signer, error) { + var rsaPSSOptions *rsa.PSSOptions + var useED25519ph bool + hashFunc := crypto.SHA256 + for _, o := range opts { + o.ApplyED25519ph(&useED25519ph) + o.ApplyHash(&hashFunc) + o.ApplyRSAPSS(&rsaPSSOptions) + } + switch pk := privateKey.(type) { case *rsa.PrivateKey: + if rsaPSSOptions != nil { + return LoadRSAPSSSigner(pk, hashFunc, rsaPSSOptions) + } return LoadRSAPKCS1v15Signer(pk, hashFunc) case *ecdsa.PrivateKey: return LoadECDSASigner(pk, hashFunc) case ed25519.PrivateKey: + if useED25519ph { + return LoadED25519phSigner(pk) + } return LoadED25519Signer(pk) } return nil, errors.New("unsupported public key type") @@ -87,3 +109,17 @@ func LoadSignerFromPEMFile(path string, hashFunc crypto.Hash, pf cryptoutils.Pas } return LoadSigner(priv, hashFunc) } + +// LoadSignerFromPEMFileWithOpts returns a signature.Signer based on the algorithm of the private key +// in the file. The Signer will use the hash function specified in the options when computing digests. +func LoadSignerFromPEMFileWithOpts(path string, pf cryptoutils.PassFunc, opts ...LoadOption) (Signer, error) { + fileBytes, err := os.ReadFile(filepath.Clean(path)) + if err != nil { + return nil, err + } + priv, err := cryptoutils.UnmarshalPEMToPrivateKey(fileBytes, pf) + if err != nil { + return nil, err + } + return LoadSignerWithOpts(priv, opts...) +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go b/vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go index 90667f2a8..70253b121 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go @@ -25,6 +25,7 @@ import ( "path/filepath" "github.com/sigstore/sigstore/pkg/cryptoutils" + "github.com/sigstore/sigstore/pkg/signature/options" ) // SignerVerifier creates and verifies digital signatures over a message using a specified key pair @@ -39,12 +40,33 @@ type SignerVerifier interface { // If privateKey is an RSA key, a RSAPKCS1v15SignerVerifier will be returned. If a // RSAPSSSignerVerifier is desired instead, use the LoadRSAPSSSignerVerifier() method directly. func LoadSignerVerifier(privateKey crypto.PrivateKey, hashFunc crypto.Hash) (SignerVerifier, error) { + return LoadSignerVerifierWithOpts(privateKey, options.WithHash(hashFunc)) +} + +// LoadSignerVerifierWithOpts returns a signature.SignerVerifier based on the +// algorithm of the private key provided and the user's choice. +func LoadSignerVerifierWithOpts(privateKey crypto.PrivateKey, opts ...LoadOption) (SignerVerifier, error) { + var rsaPSSOptions *rsa.PSSOptions + var useED25519ph bool + hashFunc := crypto.SHA256 + for _, o := range opts { + o.ApplyED25519ph(&useED25519ph) + o.ApplyHash(&hashFunc) + o.ApplyRSAPSS(&rsaPSSOptions) + } + switch pk := privateKey.(type) { case *rsa.PrivateKey: + if rsaPSSOptions != nil { + return LoadRSAPSSSignerVerifier(pk, hashFunc, rsaPSSOptions) + } return LoadRSAPKCS1v15SignerVerifier(pk, hashFunc) case *ecdsa.PrivateKey: return LoadECDSASignerVerifier(pk, hashFunc) case ed25519.PrivateKey: + if useED25519ph { + return LoadED25519phSignerVerifier(pk) + } return LoadED25519SignerVerifier(pk) } return nil, errors.New("unsupported public key type") @@ -67,3 +89,17 @@ func LoadSignerVerifierFromPEMFile(path string, hashFunc crypto.Hash, pf cryptou } return LoadSignerVerifier(priv, hashFunc) } + +// LoadSignerVerifierFromPEMFileWithOpts returns a signature.SignerVerifier based on the algorithm of the private key +// in the file. The SignerVerifier will use the hash function specified in the options when computing digests. +func LoadSignerVerifierFromPEMFileWithOpts(path string, pf cryptoutils.PassFunc, opts ...LoadOption) (SignerVerifier, error) { + fileBytes, err := os.ReadFile(filepath.Clean(path)) + if err != nil { + return nil, err + } + priv, err := cryptoutils.UnmarshalPEMToPrivateKey(fileBytes, pf) + if err != nil { + return nil, err + } + return LoadSignerVerifierWithOpts(priv, opts...) +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go b/vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go index 9ca604929..cdde9fc54 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go @@ -26,6 +26,7 @@ import ( "path/filepath" "github.com/sigstore/sigstore/pkg/cryptoutils" + "github.com/sigstore/sigstore/pkg/signature/options" ) // Verifier verifies the digital signature using a specified public key @@ -40,12 +41,33 @@ type Verifier interface { // If publicKey is an RSA key, a RSAPKCS1v15Verifier will be returned. If a // RSAPSSVerifier is desired instead, use the LoadRSAPSSVerifier() method directly. func LoadVerifier(publicKey crypto.PublicKey, hashFunc crypto.Hash) (Verifier, error) { + return LoadVerifierWithOpts(publicKey, options.WithHash(hashFunc)) +} + +// LoadVerifierWithOpts returns a signature.Verifier based on the algorithm of the public key +// provided that will use the hash function specified when computing digests. +func LoadVerifierWithOpts(publicKey crypto.PublicKey, opts ...LoadOption) (Verifier, error) { + var rsaPSSOptions *rsa.PSSOptions + var useED25519ph bool + hashFunc := crypto.SHA256 + for _, o := range opts { + o.ApplyED25519ph(&useED25519ph) + o.ApplyHash(&hashFunc) + o.ApplyRSAPSS(&rsaPSSOptions) + } + switch pk := publicKey.(type) { case *rsa.PublicKey: + if rsaPSSOptions != nil { + return LoadRSAPSSVerifier(pk, hashFunc, rsaPSSOptions) + } return LoadRSAPKCS1v15Verifier(pk, hashFunc) case *ecdsa.PublicKey: return LoadECDSAVerifier(pk, hashFunc) case ed25519.PublicKey: + if useED25519ph { + return LoadED25519phVerifier(pk) + } return LoadED25519Verifier(pk) } return nil, errors.New("unsupported public key type") @@ -98,3 +120,19 @@ func LoadVerifierFromPEMFile(path string, hashFunc crypto.Hash) (Verifier, error return LoadVerifier(pubKey, hashFunc) } + +// LoadVerifierFromPEMFileWithOpts returns a signature.Verifier based on the contents of a +// file located at path. The Verifier wil use the hash function specified in the options when computing digests. +func LoadVerifierFromPEMFileWithOpts(path string, opts ...LoadOption) (Verifier, error) { + fileBytes, err := os.ReadFile(filepath.Clean(path)) + if err != nil { + return nil, err + } + + pubKey, err := cryptoutils.UnmarshalPEMToPublicKey(fileBytes) + if err != nil { + return nil, err + } + + return LoadVerifierWithOpts(pubKey, opts...) +} diff --git a/vendor/github.com/vbauerster/mpb/v8/README.md b/vendor/github.com/vbauerster/mpb/v8/README.md index 09825ca08..af97c92a7 100644 --- a/vendor/github.com/vbauerster/mpb/v8/README.md +++ b/vendor/github.com/vbauerster/mpb/v8/README.md @@ -42,11 +42,9 @@ func main() { mpb.BarStyle().Lbound("╢").Filler("▌").Tip("▌").Padding("░").Rbound("╟"), mpb.PrependDecorators( // display our name with one space on the right - decor.Name(name, decor.WC{W: len(name) + 1, C: decor.DidentRight}), + decor.Name(name, decor.WC{C: decor.DindentRight | decor.DextraSpace}), // replace ETA decorator with "done" message, OnComplete event - decor.OnComplete( - decor.AverageETA(decor.ET_STYLE_GO, decor.WC{W: 4}), "done", - ), + decor.OnComplete(decor.AverageETA(decor.ET_STYLE_GO), "done"), ), mpb.AppendDecorators(decor.Percentage()), ) diff --git a/vendor/github.com/vbauerster/mpb/v8/bar.go b/vendor/github.com/vbauerster/mpb/v8/bar.go index 5f8141210..bca798298 100644 --- a/vendor/github.com/vbauerster/mpb/v8/bar.go +++ b/vendor/github.com/vbauerster/mpb/v8/bar.go @@ -305,9 +305,6 @@ func (b *Bar) EwmaIncrBy(n int, iterDur time.Duration) { // EwmaIncrInt64 increments progress by amount of n and updates EWMA based // decorators by dur of a single iteration. func (b *Bar) EwmaIncrInt64(n int64, iterDur time.Duration) { - if n <= 0 { - return - } select { case b.operateState <- func(s *bState) { s.decoratorEwmaUpdate(n, iterDur) @@ -429,13 +426,11 @@ func (b *Bar) render(tw int) { return } } - frame := &renderFrame{ - rows: rows, - shutdown: s.shutdown, - rmOnComplete: s.rmOnComplete, - noPop: s.noPop, - } + frame := &renderFrame{rows: rows} if s.completed || s.aborted { + frame.shutdown = s.shutdown + frame.rmOnComplete = s.rmOnComplete + frame.noPop = s.noPop // post increment makes sure OnComplete decorators are rendered s.shutdown++ } @@ -460,12 +455,15 @@ func (b *Bar) triggerCompletion(s *bState) { } func (b *Bar) tryEarlyRefresh(renderReq chan<- time.Time) { - var anyOtherRunning bool + var otherRunning int b.container.traverseBars(func(bar *Bar) bool { - anyOtherRunning = b != bar && bar.IsRunning() - return anyOtherRunning + if b != bar && bar.IsRunning() { + otherRunning++ + return false // stop traverse + } + return true // continue traverse }) - if !anyOtherRunning { + if otherRunning == 0 { for { select { case renderReq <- time.Now(): diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/decorator.go b/vendor/github.com/vbauerster/mpb/v8/decor/decorator.go index f537d3f7a..31062ebd3 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/decorator.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/decorator.go @@ -8,29 +8,27 @@ import ( ) const ( - // DidentRight bit specifies identation direction. + // DindentRight sets indentation from right to left. // - // |foo |b | With DidentRight - // | foo| b| Without DidentRight - DidentRight = 1 << iota + // |foo |b | DindentRight is set + // | foo| b| DindentRight is not set + DindentRight = 1 << iota - // DextraSpace bit adds extra space, makes sense with DSyncWidth only. - // When DidentRight bit set, the space will be added to the right, - // otherwise to the left. + // DextraSpace bit adds extra indentation space. DextraSpace // DSyncWidth bit enables same column width synchronization. // Effective with multiple bars only. DSyncWidth - // DSyncWidthR is shortcut for DSyncWidth|DidentRight - DSyncWidthR = DSyncWidth | DidentRight + // DSyncWidthR is shortcut for DSyncWidth|DindentRight + DSyncWidthR = DSyncWidth | DindentRight // DSyncSpace is shortcut for DSyncWidth|DextraSpace DSyncSpace = DSyncWidth | DextraSpace - // DSyncSpaceR is shortcut for DSyncWidth|DextraSpace|DidentRight - DSyncSpaceR = DSyncWidth | DextraSpace | DidentRight + // DSyncSpaceR is shortcut for DSyncWidth|DextraSpace|DindentRight + DSyncSpaceR = DSyncWidth | DextraSpace | DindentRight ) // TimeStyle enum. @@ -143,11 +141,10 @@ func (wc WC) Format(str string) (string, int) { viewWidth := runewidth.StringWidth(str) if wc.W > viewWidth { viewWidth = wc.W + } else if (wc.C & DextraSpace) != 0 { + viewWidth++ } if (wc.C & DSyncWidth) != 0 { - if (wc.C & DextraSpace) != 0 { - viewWidth++ - } wc.wsync <- viewWidth viewWidth = <-wc.wsync } @@ -156,7 +153,7 @@ func (wc WC) Format(str string) (string, int) { // Init initializes width related config. func (wc *WC) Init() WC { - if (wc.C & DidentRight) != 0 { + if (wc.C & DindentRight) != 0 { wc.fill = runewidth.FillRight } else { wc.fill = runewidth.FillLeft diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/eta.go b/vendor/github.com/vbauerster/mpb/v8/decor/eta.go index ecb6f8f92..ecf87b186 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/eta.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/eta.go @@ -54,18 +54,19 @@ func EwmaETA(style TimeStyle, age float64, wcc ...WC) Decorator { func MovingAverageETA(style TimeStyle, average ewma.MovingAverage, normalizer TimeNormalizer, wcc ...WC) Decorator { d := &movingAverageETA{ WC: initWC(wcc...), + producer: chooseTimeProducer(style), average: average, normalizer: normalizer, - producer: chooseTimeProducer(style), } return d } type movingAverageETA struct { WC + producer func(time.Duration) string average ewma.MovingAverage normalizer TimeNormalizer - producer func(time.Duration) string + zDur time.Duration } func (d *movingAverageETA) Decor(s Statistics) (string, int) { @@ -78,11 +79,17 @@ func (d *movingAverageETA) Decor(s Statistics) (string, int) { } func (d *movingAverageETA) EwmaUpdate(n int64, dur time.Duration) { - durPerItem := float64(dur) / float64(n) - if math.IsInf(durPerItem, 0) || math.IsNaN(durPerItem) { - return + if n <= 0 { + d.zDur += dur + } else { + durPerItem := float64(d.zDur+dur) / float64(n) + if math.IsInf(durPerItem, 0) || math.IsNaN(durPerItem) { + d.zDur += dur + return + } + d.zDur = 0 + d.average.Add(durPerItem) } - d.average.Add(durPerItem) } // AverageETA decorator. It's wrapper of NewAverageETA. diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/speed.go b/vendor/github.com/vbauerster/mpb/v8/decor/speed.go index 5879d0604..f3355c425 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/speed.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/speed.go @@ -69,8 +69,8 @@ func EwmaSpeed(unit interface{}, format string, age float64, wcc ...WC) Decorato func MovingAverageSpeed(unit interface{}, format string, average ewma.MovingAverage, wcc ...WC) Decorator { d := &movingAverageSpeed{ WC: initWC(wcc...), - average: average, producer: chooseSpeedProducer(unit, format), + average: average, } return d } @@ -79,26 +79,32 @@ type movingAverageSpeed struct { WC producer func(float64) string average ewma.MovingAverage - msg string + zDur time.Duration } func (d *movingAverageSpeed) Decor(s Statistics) (string, int) { - if !s.Completed { - var speed float64 - if v := d.average.Value(); v > 0 { - speed = 1 / v - } - d.msg = d.producer(speed * 1e9) + var str string + // ewma implementation may return 0 before accumulating certain number of samples + if v := d.average.Value(); v != 0 { + str = d.producer(1e9 / v) + } else { + str = d.producer(0) } - return d.Format(d.msg) + return d.Format(str) } func (d *movingAverageSpeed) EwmaUpdate(n int64, dur time.Duration) { - durPerByte := float64(dur) / float64(n) - if math.IsInf(durPerByte, 0) || math.IsNaN(durPerByte) { - return + if n <= 0 { + d.zDur += dur + } else { + durPerByte := float64(d.zDur+dur) / float64(n) + if math.IsInf(durPerByte, 0) || math.IsNaN(durPerByte) { + d.zDur += dur + return + } + d.zDur = 0 + d.average.Add(durPerByte) } - d.average.Add(durPerByte) } // AverageSpeed decorator with dynamic unit measure adjustment. It's diff --git a/vendor/github.com/vbauerster/mpb/v8/progress.go b/vendor/github.com/vbauerster/mpb/v8/progress.go index 8c609b463..3bdc75b42 100644 --- a/vendor/github.com/vbauerster/mpb/v8/progress.go +++ b/vendor/github.com/vbauerster/mpb/v8/progress.go @@ -74,8 +74,8 @@ func NewWithContext(ctx context.Context, options ...ContainerOption) *Progress { dropS: make(chan struct{}), dropD: make(chan struct{}), renderReq: make(chan time.Time), - refreshRate: defaultRefreshRate, popPriority: math.MinInt32, + refreshRate: defaultRefreshRate, queueBars: make(map[*Bar]*Bar), output: os.Stdout, debugOut: io.Discard, @@ -191,7 +191,7 @@ func (p *Progress) traverseBars(cb func(b *Bar) bool) { select { case p.operateState <- func(s *pState) { s.hm.iter(iter, drop) }: for b := range iter { - if cb(b) { + if !cb(b) { close(drop) break } @@ -258,34 +258,58 @@ func (p *Progress) Shutdown() { func (p *Progress) serve(s *pState, cw *cwriter.Writer) { defer p.pwg.Done() - render := func() error { return s.render(cw) } var err error + var w *cwriter.Writer + renderReq := s.renderReq + operateState := p.operateState + interceptIO := p.interceptIO + + if s.delayRC != nil { + w = cwriter.New(io.Discard) + } else { + w, cw = cw, nil + } for { select { - case op := <-p.operateState: + case <-s.delayRC: + w, cw = cw, nil + s.delayRC = nil + case op := <-operateState: op(s) - case fn := <-p.interceptIO: - fn(cw) - case <-s.renderReq: - e := render() - if e != nil { + case fn := <-interceptIO: + fn(w) + case <-renderReq: + err = s.render(w) + if err != nil { + // (*pState).(autoRefreshListener|manualRefreshListener) may block + // if not launching following short lived goroutine + go func() { + for { + select { + case <-s.renderReq: + case <-p.done: + return + } + } + }() p.cancel() // cancel all bars - render = func() error { return nil } - err = e + renderReq = nil + operateState = nil + interceptIO = nil } case <-p.done: - update := make(chan bool) - for s.autoRefresh && err == nil { - s.hm.state(update) - if <-update { - err = render() - } else { - break - } - } if err != nil { _, _ = fmt.Fprintln(s.debugOut, err.Error()) + } else if s.autoRefresh { + update := make(chan bool) + for i := 0; i == 0 || <-update; i++ { + if err := s.render(w); err != nil { + _, _ = fmt.Fprintln(s.debugOut, err.Error()) + break + } + s.hm.state(update) + } } s.hm.end(s.shutdownNotifier) return @@ -293,10 +317,7 @@ func (p *Progress) serve(s *pState, cw *cwriter.Writer) { } } -func (s pState) autoRefreshListener(done chan struct{}) { - if s.delayRC != nil { - <-s.delayRC - } +func (s *pState) autoRefreshListener(done chan struct{}) { ticker := time.NewTicker(s.refreshRate) defer ticker.Stop() for { @@ -310,7 +331,7 @@ func (s pState) autoRefreshListener(done chan struct{}) { } } -func (s pState) manualRefreshListener(done chan struct{}) { +func (s *pState) manualRefreshListener(done chan struct{}) { for { select { case x := <-s.manualRC: @@ -342,9 +363,9 @@ func (s *pState) render(cw *cwriter.Writer) (err error) { if s.reqWidth > 0 { width = s.reqWidth } else { - width = 100 + width = 80 } - height = 100 + height = width } for b := range iter { @@ -420,7 +441,7 @@ func (s *pState) flush(cw *cwriter.Writer, height int) error { return cw.Flush(len(rows) - popCount) } -func (s pState) push(wg *sync.WaitGroup, b *Bar, sync bool) { +func (s *pState) push(wg *sync.WaitGroup, b *Bar, sync bool) { s.hm.push(b, sync) wg.Done() } diff --git a/vendor/go.etcd.io/bbolt/bucket.go b/vendor/go.etcd.io/bbolt/bucket.go index 054467af3..f3533d344 100644 --- a/vendor/go.etcd.io/bbolt/bucket.go +++ b/vendor/go.etcd.io/bbolt/bucket.go @@ -162,12 +162,17 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { return nil, ErrBucketNameRequired } + // Insert into node. + // Tip: Use a new variable `newKey` instead of reusing the existing `key` to prevent + // it from being marked as leaking, and accordingly cannot be allocated on stack. + newKey := cloneBytes(key) + // Move cursor to correct position. c := b.Cursor() - k, _, flags := c.seek(key) + k, _, flags := c.seek(newKey) // Return an error if there is an existing key. - if bytes.Equal(key, k) { + if bytes.Equal(newKey, k) { if (flags & bucketLeafFlag) != 0 { return nil, ErrBucketExists } @@ -182,16 +187,14 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { } var value = bucket.write() - // Insert into node. - key = cloneBytes(key) - c.node().put(key, key, value, 0, bucketLeafFlag) + c.node().put(newKey, newKey, value, 0, bucketLeafFlag) // Since subbuckets are not allowed on inline buckets, we need to // dereference the inline page, if it exists. This will cause the bucket // to be treated as a regular, non-inline bucket for the rest of the tx. b.page = nil - return b.Bucket(key), nil + return b.Bucket(newKey), nil } // CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it. @@ -288,18 +291,23 @@ func (b *Bucket) Put(key []byte, value []byte) error { return ErrValueTooLarge } + // Insert into node. + // Tip: Use a new variable `newKey` instead of reusing the existing `key` to prevent + // it from being marked as leaking, and accordingly cannot be allocated on stack. + newKey := cloneBytes(key) + // Move cursor to correct position. c := b.Cursor() - k, _, flags := c.seek(key) + k, _, flags := c.seek(newKey) // Return an error if there is an existing key with a bucket value. - if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 { + if bytes.Equal(newKey, k) && (flags&bucketLeafFlag) != 0 { return ErrIncompatibleValue } - // Insert into node. - key = cloneBytes(key) - c.node().put(key, key, value, 0, 0) + // gofail: var beforeBucketPut struct{} + + c.node().put(newKey, newKey, value, 0, 0) return nil } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go index 4e24f9eed..652aa48b8 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go @@ -14,17 +14,22 @@ import ( ) // ArrayCodec is the Codec used for bsoncore.Array values. +// +// Deprecated: ArrayCodec will not be directly accessible in Go Driver 2.0. type ArrayCodec struct{} var defaultArrayCodec = NewArrayCodec() // NewArrayCodec returns an ArrayCodec. +// +// Deprecated: NewArrayCodec will not be available in Go Driver 2.0. See +// [ArrayCodec] for more details. func NewArrayCodec() *ArrayCodec { return &ArrayCodec{} } // EncodeValue is the ValueEncoder for bsoncore.Array values. -func (ac *ArrayCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +func (ac *ArrayCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tCoreArray { return ValueEncoderError{Name: "CoreArrayEncodeValue", Types: []reflect.Type{tCoreArray}, Received: val} } @@ -34,7 +39,7 @@ func (ac *ArrayCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val r } // DecodeValue is the ValueDecoder for bsoncore.Array values. -func (ac *ArrayCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { +func (ac *ArrayCodec) DecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tCoreArray { return ValueDecoderError{Name: "CoreArrayDecodeValue", Types: []reflect.Type{tCoreArray}, Received: val} } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go index 098ed69f9..0693bd432 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go @@ -23,6 +23,8 @@ var ( // Marshaler is an interface implemented by types that can marshal themselves // into a BSON document represented as bytes. The bytes returned must be a valid // BSON document if the error is nil. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Marshaler] instead. type Marshaler interface { MarshalBSON() ([]byte, error) } @@ -31,6 +33,8 @@ type Marshaler interface { // themselves into a BSON value as bytes. The type must be the valid type for // the bytes returned. The bytes and byte type together must be valid if the // error is nil. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.ValueMarshaler] instead. type ValueMarshaler interface { MarshalBSONValue() (bsontype.Type, []byte, error) } @@ -39,6 +43,8 @@ type ValueMarshaler interface { // document representation of themselves. The BSON bytes can be assumed to be // valid. UnmarshalBSON must copy the BSON bytes if it wishes to retain the data // after returning. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Unmarshaler] instead. type Unmarshaler interface { UnmarshalBSON([]byte) error } @@ -47,6 +53,8 @@ type Unmarshaler interface { // BSON value representation of themselves. The BSON bytes and type can be // assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it // wishes to retain the data after returning. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.ValueUnmarshaler] instead. type ValueUnmarshaler interface { UnmarshalBSONValue(bsontype.Type, []byte) error } @@ -111,13 +119,93 @@ func (vde ValueDecoderError) Error() string { // value. type EncodeContext struct { *Registry + + // MinSize causes the Encoder to marshal Go integer values (int, int8, int16, int32, int64, + // uint, uint8, uint16, uint32, or uint64) as the minimum BSON int size (either 32 or 64 bits) + // that can represent the integer value. + // + // Deprecated: Use bson.Encoder.IntMinSize instead. MinSize bool + + errorOnInlineDuplicates bool + stringifyMapKeysWithFmt bool + nilMapAsEmpty bool + nilSliceAsEmpty bool + nilByteSliceAsEmpty bool + omitZeroStruct bool + useJSONStructTags bool +} + +// ErrorOnInlineDuplicates causes the Encoder to return an error if there is a duplicate field in +// the marshaled BSON when the "inline" struct tag option is set. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.ErrorOnInlineDuplicates] instead. +func (ec *EncodeContext) ErrorOnInlineDuplicates() { + ec.errorOnInlineDuplicates = true +} + +// StringifyMapKeysWithFmt causes the Encoder to convert Go map keys to BSON document field name +// strings using fmt.Sprintf() instead of the default string conversion logic. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.StringifyMapKeysWithFmt] instead. +func (ec *EncodeContext) StringifyMapKeysWithFmt() { + ec.stringifyMapKeysWithFmt = true +} + +// NilMapAsEmpty causes the Encoder to marshal nil Go maps as empty BSON documents instead of BSON +// null. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilMapAsEmpty] instead. +func (ec *EncodeContext) NilMapAsEmpty() { + ec.nilMapAsEmpty = true +} + +// NilSliceAsEmpty causes the Encoder to marshal nil Go slices as empty BSON arrays instead of BSON +// null. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilSliceAsEmpty] instead. +func (ec *EncodeContext) NilSliceAsEmpty() { + ec.nilSliceAsEmpty = true +} + +// NilByteSliceAsEmpty causes the Encoder to marshal nil Go byte slices as empty BSON binary values +// instead of BSON null. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilByteSliceAsEmpty] instead. +func (ec *EncodeContext) NilByteSliceAsEmpty() { + ec.nilByteSliceAsEmpty = true +} + +// OmitZeroStruct causes the Encoder to consider the zero value for a struct (e.g. MyStruct{}) +// as empty and omit it from the marshaled BSON when the "omitempty" struct tag option is set. +// +// Note that the Encoder only examines exported struct fields when determining if a struct is the +// zero value. It considers pointers to a zero struct value (e.g. &MyStruct{}) not empty. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.OmitZeroStruct] instead. +func (ec *EncodeContext) OmitZeroStruct() { + ec.omitZeroStruct = true +} + +// UseJSONStructTags causes the Encoder to fall back to using the "json" struct tag if a "bson" +// struct tag is not specified. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.UseJSONStructTags] instead. +func (ec *EncodeContext) UseJSONStructTags() { + ec.useJSONStructTags = true } // DecodeContext is the contextual information required for a Codec to decode a // value. type DecodeContext struct { *Registry + + // Truncate, if true, instructs decoders to to truncate the fractional part of BSON "double" + // values when attempting to unmarshal them into a Go integer (int, int8, int16, int32, int64, + // uint, uint8, uint16, uint32, or uint64) struct field. The truncation logic does not apply to + // BSON "decimal128" values. + // + // Deprecated: Use bson.Decoder.AllowTruncatingDoubles instead. Truncate bool // Ancestor is the type of a containing document. This is mainly used to determine what type @@ -125,7 +213,7 @@ type DecodeContext struct { // Ancestor is a bson.M, BSON embedded document values being decoded into an empty interface // will be decoded into a bson.M. // - // Deprecated: Use DefaultDocumentM or DefaultDocumentD instead. + // Deprecated: Use bson.Decoder.DefaultDocumentM or bson.Decoder.DefaultDocumentD instead. Ancestor reflect.Type // defaultDocumentType specifies the Go type to decode top-level and nested BSON documents into. In particular, the @@ -133,22 +221,74 @@ type DecodeContext struct { // set to a type that a BSON document cannot be unmarshaled into (e.g. "string"), unmarshalling will result in an // error. DocumentType overrides the Ancestor field. defaultDocumentType reflect.Type + + binaryAsSlice bool + useJSONStructTags bool + useLocalTimeZone bool + zeroMaps bool + zeroStructs bool } -// DefaultDocumentM will decode empty documents using the primitive.M type. This behavior is restricted to data typed as -// "interface{}" or "map[string]interface{}". +// BinaryAsSlice causes the Decoder to unmarshal BSON binary field values that are the "Generic" or +// "Old" BSON binary subtype as a Go byte slice instead of a primitive.Binary. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.BinaryAsSlice] instead. +func (dc *DecodeContext) BinaryAsSlice() { + dc.binaryAsSlice = true +} + +// UseJSONStructTags causes the Decoder to fall back to using the "json" struct tag if a "bson" +// struct tag is not specified. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseJSONStructTags] instead. +func (dc *DecodeContext) UseJSONStructTags() { + dc.useJSONStructTags = true +} + +// UseLocalTimeZone causes the Decoder to unmarshal time.Time values in the local timezone instead +// of the UTC timezone. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseLocalTimeZone] instead. +func (dc *DecodeContext) UseLocalTimeZone() { + dc.useLocalTimeZone = true +} + +// ZeroMaps causes the Decoder to delete any existing values from Go maps in the destination value +// passed to Decode before unmarshaling BSON documents into them. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroMaps] instead. +func (dc *DecodeContext) ZeroMaps() { + dc.zeroMaps = true +} + +// ZeroStructs causes the Decoder to delete any existing values from Go structs in the destination +// value passed to Decode before unmarshaling BSON documents into them. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroStructs] instead. +func (dc *DecodeContext) ZeroStructs() { + dc.zeroStructs = true +} + +// DefaultDocumentM causes the Decoder to always unmarshal documents into the primitive.M type. This +// behavior is restricted to data typed as "interface{}" or "map[string]interface{}". +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.DefaultDocumentM] instead. func (dc *DecodeContext) DefaultDocumentM() { dc.defaultDocumentType = reflect.TypeOf(primitive.M{}) } -// DefaultDocumentD will decode empty documents using the primitive.D type. This behavior is restricted to data typed as -// "interface{}" or "map[string]interface{}". +// DefaultDocumentD causes the Decoder to always unmarshal documents into the primitive.D type. This +// behavior is restricted to data typed as "interface{}" or "map[string]interface{}". +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.DefaultDocumentD] instead. func (dc *DecodeContext) DefaultDocumentD() { dc.defaultDocumentType = reflect.TypeOf(primitive.D{}) } -// ValueCodec is the interface that groups the methods to encode and decode +// ValueCodec is an interface for encoding and decoding a reflect.Value. // values. +// +// Deprecated: Use [ValueEncoder] and [ValueDecoder] instead. type ValueCodec interface { ValueEncoder ValueDecoder @@ -233,6 +373,10 @@ func decodeTypeOrValueWithInfo(vd ValueDecoder, td typeDecoder, dc DecodeContext // CodecZeroer is the interface implemented by Codecs that can also determine if // a value of the type that would be encoded is zero. +// +// Deprecated: Defining custom rules for the zero/empty value will not be supported in Go Driver +// 2.0. Users who want to omit empty complex values should use a pointer field and set the value to +// nil instead. type CodecZeroer interface { IsTypeZero(interface{}) bool } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go index 5a916cc15..0134b5a94 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go @@ -16,18 +16,45 @@ import ( ) // ByteSliceCodec is the Codec used for []byte values. +// +// Deprecated: ByteSliceCodec will not be directly configurable in Go Driver +// 2.0. To configure the byte slice encode and decode behavior, use the +// configuration methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the byte slice +// encode and decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to encode nil byte slices as empty +// BSON binary values, use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// NilByteSliceAsEmpty: true, +// }) +// +// See the deprecation notice for each field in ByteSliceCodec for the +// corresponding settings. type ByteSliceCodec struct { + // EncodeNilAsEmpty causes EncodeValue to marshal nil Go byte slices as empty BSON binary values + // instead of BSON null. + // + // Deprecated: Use bson.Encoder.NilByteSliceAsEmpty or options.BSONOptions.NilByteSliceAsEmpty + // instead. EncodeNilAsEmpty bool } var ( defaultByteSliceCodec = NewByteSliceCodec() - _ ValueCodec = defaultByteSliceCodec + // Assert that defaultByteSliceCodec satisfies the typeDecoder interface, which allows it to be + // used by collection type decoders (e.g. map, slice, etc) to set individual values in a + // collection. _ typeDecoder = defaultByteSliceCodec ) -// NewByteSliceCodec returns a StringCodec with options opts. +// NewByteSliceCodec returns a ByteSliceCodec with options opts. +// +// Deprecated: NewByteSliceCodec will not be available in Go Driver 2.0. See +// [ByteSliceCodec] for more details. func NewByteSliceCodec(opts ...*bsonoptions.ByteSliceCodecOptions) *ByteSliceCodec { byteSliceOpt := bsonoptions.MergeByteSliceCodecOptions(opts...) codec := ByteSliceCodec{} @@ -42,13 +69,13 @@ func (bsc *ByteSliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, if !val.IsValid() || val.Type() != tByteSlice { return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val} } - if val.IsNil() && !bsc.EncodeNilAsEmpty { + if val.IsNil() && !bsc.EncodeNilAsEmpty && !ec.nilByteSliceAsEmpty { return vw.WriteNull() } return vw.WriteBinary(val.Interface().([]byte)) } -func (bsc *ByteSliceCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (bsc *ByteSliceCodec) decodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tByteSlice { return emptyValue, ValueDecoderError{ Name: "ByteSliceDecodeValue", diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go new file mode 100644 index 000000000..844b50299 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go @@ -0,0 +1,166 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "reflect" + "sync" + "sync/atomic" +) + +// Runtime check that the kind encoder and decoder caches can store any valid +// reflect.Kind constant. +func init() { + if s := reflect.Kind(len(kindEncoderCache{}.entries)).String(); s != "kind27" { + panic("The capacity of kindEncoderCache is too small.\n" + + "This is due to a new type being added to reflect.Kind.") + } +} + +// statically assert array size +var _ = (kindEncoderCache{}).entries[reflect.UnsafePointer] +var _ = (kindDecoderCache{}).entries[reflect.UnsafePointer] + +type typeEncoderCache struct { + cache sync.Map // map[reflect.Type]ValueEncoder +} + +func (c *typeEncoderCache) Store(rt reflect.Type, enc ValueEncoder) { + c.cache.Store(rt, enc) +} + +func (c *typeEncoderCache) Load(rt reflect.Type) (ValueEncoder, bool) { + if v, _ := c.cache.Load(rt); v != nil { + return v.(ValueEncoder), true + } + return nil, false +} + +func (c *typeEncoderCache) LoadOrStore(rt reflect.Type, enc ValueEncoder) ValueEncoder { + if v, loaded := c.cache.LoadOrStore(rt, enc); loaded { + enc = v.(ValueEncoder) + } + return enc +} + +func (c *typeEncoderCache) Clone() *typeEncoderCache { + cc := new(typeEncoderCache) + c.cache.Range(func(k, v interface{}) bool { + if k != nil && v != nil { + cc.cache.Store(k, v) + } + return true + }) + return cc +} + +type typeDecoderCache struct { + cache sync.Map // map[reflect.Type]ValueDecoder +} + +func (c *typeDecoderCache) Store(rt reflect.Type, dec ValueDecoder) { + c.cache.Store(rt, dec) +} + +func (c *typeDecoderCache) Load(rt reflect.Type) (ValueDecoder, bool) { + if v, _ := c.cache.Load(rt); v != nil { + return v.(ValueDecoder), true + } + return nil, false +} + +func (c *typeDecoderCache) LoadOrStore(rt reflect.Type, dec ValueDecoder) ValueDecoder { + if v, loaded := c.cache.LoadOrStore(rt, dec); loaded { + dec = v.(ValueDecoder) + } + return dec +} + +func (c *typeDecoderCache) Clone() *typeDecoderCache { + cc := new(typeDecoderCache) + c.cache.Range(func(k, v interface{}) bool { + if k != nil && v != nil { + cc.cache.Store(k, v) + } + return true + }) + return cc +} + +// atomic.Value requires that all calls to Store() have the same concrete type +// so we wrap the ValueEncoder with a kindEncoderCacheEntry to ensure the type +// is always the same (since different concrete types may implement the +// ValueEncoder interface). +type kindEncoderCacheEntry struct { + enc ValueEncoder +} + +type kindEncoderCache struct { + entries [reflect.UnsafePointer + 1]atomic.Value // *kindEncoderCacheEntry +} + +func (c *kindEncoderCache) Store(rt reflect.Kind, enc ValueEncoder) { + if enc != nil && rt < reflect.Kind(len(c.entries)) { + c.entries[rt].Store(&kindEncoderCacheEntry{enc: enc}) + } +} + +func (c *kindEncoderCache) Load(rt reflect.Kind) (ValueEncoder, bool) { + if rt < reflect.Kind(len(c.entries)) { + if ent, ok := c.entries[rt].Load().(*kindEncoderCacheEntry); ok { + return ent.enc, ent.enc != nil + } + } + return nil, false +} + +func (c *kindEncoderCache) Clone() *kindEncoderCache { + cc := new(kindEncoderCache) + for i, v := range c.entries { + if val := v.Load(); val != nil { + cc.entries[i].Store(val) + } + } + return cc +} + +// atomic.Value requires that all calls to Store() have the same concrete type +// so we wrap the ValueDecoder with a kindDecoderCacheEntry to ensure the type +// is always the same (since different concrete types may implement the +// ValueDecoder interface). +type kindDecoderCacheEntry struct { + dec ValueDecoder +} + +type kindDecoderCache struct { + entries [reflect.UnsafePointer + 1]atomic.Value // *kindDecoderCacheEntry +} + +func (c *kindDecoderCache) Store(rt reflect.Kind, dec ValueDecoder) { + if rt < reflect.Kind(len(c.entries)) { + c.entries[rt].Store(&kindDecoderCacheEntry{dec: dec}) + } +} + +func (c *kindDecoderCache) Load(rt reflect.Kind) (ValueDecoder, bool) { + if rt < reflect.Kind(len(c.entries)) { + if ent, ok := c.entries[rt].Load().(*kindDecoderCacheEntry); ok { + return ent.dec, ent.dec != nil + } + } + return nil, false +} + +func (c *kindDecoderCache) Clone() *kindDecoderCache { + cc := new(kindDecoderCache) + for i, v := range c.entries { + if val := v.Load(); val != nil { + cc.entries[i].Store(val) + } + } + return cc +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go index e95cab585..7e08aab35 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go @@ -24,7 +24,7 @@ import ( var ( defaultValueDecoders DefaultValueDecoders - errCannotTruncate = errors.New("float64 can only be truncated to an integer type when truncation is enabled") + errCannotTruncate = errors.New("float64 can only be truncated to a lower precision type when truncation is enabled") ) type decodeBinaryError struct { @@ -41,13 +41,16 @@ func newDefaultStructCodec() *StructCodec { if err != nil { // This function is called from the codec registration path, so errors can't be propagated. If there's an error // constructing the StructCodec, we panic to avoid losing it. - panic(fmt.Errorf("error creating default StructCodec: %v", err)) + panic(fmt.Errorf("error creating default StructCodec: %w", err)) } return codec } // DefaultValueDecoders is a namespace type for the default ValueDecoders used // when creating a registry. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. type DefaultValueDecoders struct{} // RegisterDefaultDecoders will register the decoder methods attached to DefaultValueDecoders with @@ -56,6 +59,9 @@ type DefaultValueDecoders struct{} // There is no support for decoding map[string]interface{} because there is no decoder for // interface{}, so users must either register this decoder themselves or use the // EmptyInterfaceDecoder available in the bson package. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) RegisterDefaultDecoders(rb *RegistryBuilder) { if rb == nil { panic(errors.New("argument to RegisterDefaultDecoders must not be nil")) @@ -132,6 +138,9 @@ func (dvd DefaultValueDecoders) RegisterDefaultDecoders(rb *RegistryBuilder) { } // DDecodeValue is the ValueDecoderFunc for primitive.D instances. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) DDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.IsValid() || !val.CanSet() || val.Type() != tD { return ValueDecoderError{Name: "DDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} @@ -169,7 +178,7 @@ func (dvd DefaultValueDecoders) DDecodeValue(dc DecodeContext, vr bsonrw.ValueRe for { key, elemVr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { + if errors.Is(err, bsonrw.ErrEOD) { break } else if err != nil { return err @@ -188,7 +197,7 @@ func (dvd DefaultValueDecoders) DDecodeValue(dc DecodeContext, vr bsonrw.ValueRe return nil } -func (dvd DefaultValueDecoders) booleanDecodeType(dctx DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (dvd DefaultValueDecoders) booleanDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t.Kind() != reflect.Bool { return emptyValue, ValueDecoderError{ Name: "BooleanDecodeValue", @@ -235,6 +244,9 @@ func (dvd DefaultValueDecoders) booleanDecodeType(dctx DecodeContext, vr bsonrw. } // BooleanDecodeValue is the ValueDecoderFunc for bool types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) BooleanDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.IsValid() || !val.CanSet() || val.Kind() != reflect.Bool { return ValueDecoderError{Name: "BooleanDecodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val} @@ -333,6 +345,9 @@ func (DefaultValueDecoders) intDecodeType(dc DecodeContext, vr bsonrw.ValueReade } // IntDecodeValue is the ValueDecoderFunc for int types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) IntDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() { return ValueDecoderError{ @@ -434,7 +449,7 @@ func (dvd DefaultValueDecoders) UintDecodeValue(dc DecodeContext, vr bsonrw.Valu return nil } -func (dvd DefaultValueDecoders) floatDecodeType(ec DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (dvd DefaultValueDecoders) floatDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { var f float64 var err error switch vrType := vr.Type(); vrType { @@ -477,7 +492,7 @@ func (dvd DefaultValueDecoders) floatDecodeType(ec DecodeContext, vr bsonrw.Valu switch t.Kind() { case reflect.Float32: - if !ec.Truncate && float64(float32(f)) != f { + if !dc.Truncate && float64(float32(f)) != f { return emptyValue, errCannotTruncate } @@ -494,6 +509,9 @@ func (dvd DefaultValueDecoders) floatDecodeType(ec DecodeContext, vr bsonrw.Valu } // FloatDecodeValue is the ValueDecoderFunc for float types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) FloatDecodeValue(ec DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() { return ValueDecoderError{ @@ -515,7 +533,7 @@ func (dvd DefaultValueDecoders) FloatDecodeValue(ec DecodeContext, vr bsonrw.Val // StringDecodeValue is the ValueDecoderFunc for string types. // // Deprecated: StringDecodeValue is not registered by default. Use StringCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) StringDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { +func (dvd DefaultValueDecoders) StringDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { var str string var err error switch vr.Type() { @@ -536,7 +554,7 @@ func (dvd DefaultValueDecoders) StringDecodeValue(dctx DecodeContext, vr bsonrw. return nil } -func (DefaultValueDecoders) javaScriptDecodeType(dctx DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) javaScriptDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tJavaScript { return emptyValue, ValueDecoderError{ Name: "JavaScriptDecodeValue", @@ -565,6 +583,9 @@ func (DefaultValueDecoders) javaScriptDecodeType(dctx DecodeContext, vr bsonrw.V } // JavaScriptDecodeValue is the ValueDecoderFunc for the primitive.JavaScript type. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) JavaScriptDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tJavaScript { return ValueDecoderError{Name: "JavaScriptDecodeValue", Types: []reflect.Type{tJavaScript}, Received: val} @@ -579,7 +600,7 @@ func (dvd DefaultValueDecoders) JavaScriptDecodeValue(dctx DecodeContext, vr bso return nil } -func (DefaultValueDecoders) symbolDecodeType(dctx DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) symbolDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tSymbol { return emptyValue, ValueDecoderError{ Name: "SymbolDecodeValue", @@ -620,6 +641,9 @@ func (DefaultValueDecoders) symbolDecodeType(dctx DecodeContext, vr bsonrw.Value } // SymbolDecodeValue is the ValueDecoderFunc for the primitive.Symbol type. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) SymbolDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tSymbol { return ValueDecoderError{Name: "SymbolDecodeValue", Types: []reflect.Type{tSymbol}, Received: val} @@ -634,7 +658,7 @@ func (dvd DefaultValueDecoders) SymbolDecodeValue(dctx DecodeContext, vr bsonrw. return nil } -func (DefaultValueDecoders) binaryDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) binaryDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tBinary { return emptyValue, ValueDecoderError{ Name: "BinaryDecodeValue", @@ -664,6 +688,9 @@ func (DefaultValueDecoders) binaryDecodeType(dc DecodeContext, vr bsonrw.ValueRe } // BinaryDecodeValue is the ValueDecoderFunc for Binary. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) BinaryDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tBinary { return ValueDecoderError{Name: "BinaryDecodeValue", Types: []reflect.Type{tBinary}, Received: val} @@ -678,7 +705,7 @@ func (dvd DefaultValueDecoders) BinaryDecodeValue(dc DecodeContext, vr bsonrw.Va return nil } -func (DefaultValueDecoders) undefinedDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) undefinedDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tUndefined { return emptyValue, ValueDecoderError{ Name: "UndefinedDecodeValue", @@ -704,6 +731,9 @@ func (DefaultValueDecoders) undefinedDecodeType(dc DecodeContext, vr bsonrw.Valu } // UndefinedDecodeValue is the ValueDecoderFunc for Undefined. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) UndefinedDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tUndefined { return ValueDecoderError{Name: "UndefinedDecodeValue", Types: []reflect.Type{tUndefined}, Received: val} @@ -719,7 +749,7 @@ func (dvd DefaultValueDecoders) UndefinedDecodeValue(dc DecodeContext, vr bsonrw } // Accept both 12-byte string and pretty-printed 24-byte hex string formats. -func (dvd DefaultValueDecoders) objectIDDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (dvd DefaultValueDecoders) objectIDDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tOID { return emptyValue, ValueDecoderError{ Name: "ObjectIDDecodeValue", @@ -765,6 +795,9 @@ func (dvd DefaultValueDecoders) objectIDDecodeType(dc DecodeContext, vr bsonrw.V } // ObjectIDDecodeValue is the ValueDecoderFunc for primitive.ObjectID. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) ObjectIDDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tOID { return ValueDecoderError{Name: "ObjectIDDecodeValue", Types: []reflect.Type{tOID}, Received: val} @@ -779,7 +812,7 @@ func (dvd DefaultValueDecoders) ObjectIDDecodeValue(dc DecodeContext, vr bsonrw. return nil } -func (DefaultValueDecoders) dateTimeDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) dateTimeDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tDateTime { return emptyValue, ValueDecoderError{ Name: "DateTimeDecodeValue", @@ -808,6 +841,9 @@ func (DefaultValueDecoders) dateTimeDecodeType(dc DecodeContext, vr bsonrw.Value } // DateTimeDecodeValue is the ValueDecoderFunc for DateTime. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) DateTimeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tDateTime { return ValueDecoderError{Name: "DateTimeDecodeValue", Types: []reflect.Type{tDateTime}, Received: val} @@ -822,7 +858,7 @@ func (dvd DefaultValueDecoders) DateTimeDecodeValue(dc DecodeContext, vr bsonrw. return nil } -func (DefaultValueDecoders) nullDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) nullDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tNull { return emptyValue, ValueDecoderError{ Name: "NullDecodeValue", @@ -848,6 +884,9 @@ func (DefaultValueDecoders) nullDecodeType(dc DecodeContext, vr bsonrw.ValueRead } // NullDecodeValue is the ValueDecoderFunc for Null. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) NullDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tNull { return ValueDecoderError{Name: "NullDecodeValue", Types: []reflect.Type{tNull}, Received: val} @@ -862,7 +901,7 @@ func (dvd DefaultValueDecoders) NullDecodeValue(dc DecodeContext, vr bsonrw.Valu return nil } -func (DefaultValueDecoders) regexDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) regexDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tRegex { return emptyValue, ValueDecoderError{ Name: "RegexDecodeValue", @@ -891,6 +930,9 @@ func (DefaultValueDecoders) regexDecodeType(dc DecodeContext, vr bsonrw.ValueRea } // RegexDecodeValue is the ValueDecoderFunc for Regex. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) RegexDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tRegex { return ValueDecoderError{Name: "RegexDecodeValue", Types: []reflect.Type{tRegex}, Received: val} @@ -905,7 +947,7 @@ func (dvd DefaultValueDecoders) RegexDecodeValue(dc DecodeContext, vr bsonrw.Val return nil } -func (DefaultValueDecoders) dBPointerDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) dBPointerDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tDBPointer { return emptyValue, ValueDecoderError{ Name: "DBPointerDecodeValue", @@ -935,6 +977,9 @@ func (DefaultValueDecoders) dBPointerDecodeType(dc DecodeContext, vr bsonrw.Valu } // DBPointerDecodeValue is the ValueDecoderFunc for DBPointer. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) DBPointerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tDBPointer { return ValueDecoderError{Name: "DBPointerDecodeValue", Types: []reflect.Type{tDBPointer}, Received: val} @@ -949,7 +994,7 @@ func (dvd DefaultValueDecoders) DBPointerDecodeValue(dc DecodeContext, vr bsonrw return nil } -func (DefaultValueDecoders) timestampDecodeType(dc DecodeContext, vr bsonrw.ValueReader, reflectType reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) timestampDecodeType(_ DecodeContext, vr bsonrw.ValueReader, reflectType reflect.Type) (reflect.Value, error) { if reflectType != tTimestamp { return emptyValue, ValueDecoderError{ Name: "TimestampDecodeValue", @@ -978,6 +1023,9 @@ func (DefaultValueDecoders) timestampDecodeType(dc DecodeContext, vr bsonrw.Valu } // TimestampDecodeValue is the ValueDecoderFunc for Timestamp. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) TimestampDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tTimestamp { return ValueDecoderError{Name: "TimestampDecodeValue", Types: []reflect.Type{tTimestamp}, Received: val} @@ -992,7 +1040,7 @@ func (dvd DefaultValueDecoders) TimestampDecodeValue(dc DecodeContext, vr bsonrw return nil } -func (DefaultValueDecoders) minKeyDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) minKeyDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tMinKey { return emptyValue, ValueDecoderError{ Name: "MinKeyDecodeValue", @@ -1020,6 +1068,9 @@ func (DefaultValueDecoders) minKeyDecodeType(dc DecodeContext, vr bsonrw.ValueRe } // MinKeyDecodeValue is the ValueDecoderFunc for MinKey. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) MinKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tMinKey { return ValueDecoderError{Name: "MinKeyDecodeValue", Types: []reflect.Type{tMinKey}, Received: val} @@ -1034,7 +1085,7 @@ func (dvd DefaultValueDecoders) MinKeyDecodeValue(dc DecodeContext, vr bsonrw.Va return nil } -func (DefaultValueDecoders) maxKeyDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (DefaultValueDecoders) maxKeyDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tMaxKey { return emptyValue, ValueDecoderError{ Name: "MaxKeyDecodeValue", @@ -1062,6 +1113,9 @@ func (DefaultValueDecoders) maxKeyDecodeType(dc DecodeContext, vr bsonrw.ValueRe } // MaxKeyDecodeValue is the ValueDecoderFunc for MaxKey. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) MaxKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tMaxKey { return ValueDecoderError{Name: "MaxKeyDecodeValue", Types: []reflect.Type{tMaxKey}, Received: val} @@ -1076,7 +1130,7 @@ func (dvd DefaultValueDecoders) MaxKeyDecodeValue(dc DecodeContext, vr bsonrw.Va return nil } -func (dvd DefaultValueDecoders) decimal128DecodeType(dctx DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (dvd DefaultValueDecoders) decimal128DecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tDecimal { return emptyValue, ValueDecoderError{ Name: "Decimal128DecodeValue", @@ -1105,6 +1159,9 @@ func (dvd DefaultValueDecoders) decimal128DecodeType(dctx DecodeContext, vr bson } // Decimal128DecodeValue is the ValueDecoderFunc for primitive.Decimal128. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) Decimal128DecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tDecimal { return ValueDecoderError{Name: "Decimal128DecodeValue", Types: []reflect.Type{tDecimal}, Received: val} @@ -1119,7 +1176,7 @@ func (dvd DefaultValueDecoders) Decimal128DecodeValue(dctx DecodeContext, vr bso return nil } -func (dvd DefaultValueDecoders) jsonNumberDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (dvd DefaultValueDecoders) jsonNumberDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tJSONNumber { return emptyValue, ValueDecoderError{ Name: "JSONNumberDecodeValue", @@ -1164,6 +1221,9 @@ func (dvd DefaultValueDecoders) jsonNumberDecodeType(dc DecodeContext, vr bsonrw } // JSONNumberDecodeValue is the ValueDecoderFunc for json.Number. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) JSONNumberDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tJSONNumber { return ValueDecoderError{Name: "JSONNumberDecodeValue", Types: []reflect.Type{tJSONNumber}, Received: val} @@ -1178,7 +1238,7 @@ func (dvd DefaultValueDecoders) JSONNumberDecodeValue(dc DecodeContext, vr bsonr return nil } -func (dvd DefaultValueDecoders) urlDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (dvd DefaultValueDecoders) urlDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t != tURL { return emptyValue, ValueDecoderError{ Name: "URLDecodeValue", @@ -1213,6 +1273,9 @@ func (dvd DefaultValueDecoders) urlDecodeType(dc DecodeContext, vr bsonrw.ValueR } // URLDecodeValue is the ValueDecoderFunc for url.URL. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) URLDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tURL { return ValueDecoderError{Name: "URLDecodeValue", Types: []reflect.Type{tURL}, Received: val} @@ -1230,7 +1293,7 @@ func (dvd DefaultValueDecoders) URLDecodeValue(dc DecodeContext, vr bsonrw.Value // TimeDecodeValue is the ValueDecoderFunc for time.Time. // // Deprecated: TimeDecodeValue is not registered by default. Use TimeCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) TimeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { +func (dvd DefaultValueDecoders) TimeDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if vr.Type() != bsontype.DateTime { return fmt.Errorf("cannot decode %v into a time.Time", vr.Type()) } @@ -1251,7 +1314,7 @@ func (dvd DefaultValueDecoders) TimeDecodeValue(dc DecodeContext, vr bsonrw.Valu // ByteSliceDecodeValue is the ValueDecoderFunc for []byte. // // Deprecated: ByteSliceDecodeValue is not registered by default. Use ByteSliceCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) ByteSliceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { +func (dvd DefaultValueDecoders) ByteSliceDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if vr.Type() != bsontype.Binary && vr.Type() != bsontype.Null { return fmt.Errorf("cannot decode %v into a []byte", vr.Type()) } @@ -1316,7 +1379,7 @@ func (dvd DefaultValueDecoders) MapDecodeValue(dc DecodeContext, vr bsonrw.Value keyType := val.Type().Key() for { key, vr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { + if errors.Is(err, bsonrw.ErrEOD) { break } if err != nil { @@ -1336,6 +1399,9 @@ func (dvd DefaultValueDecoders) MapDecodeValue(dc DecodeContext, vr bsonrw.Value } // ArrayDecodeValue is the ValueDecoderFunc for array types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) ArrayDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.IsValid() || val.Kind() != reflect.Array { return ValueDecoderError{Name: "ArrayDecodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val} @@ -1447,7 +1513,10 @@ func (dvd DefaultValueDecoders) SliceDecodeValue(dc DecodeContext, vr bsonrw.Val } // ValueUnmarshalerDecodeValue is the ValueDecoderFunc for ValueUnmarshaler implementations. -func (dvd DefaultValueDecoders) ValueUnmarshalerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) ValueUnmarshalerDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.IsValid() || (!val.Type().Implements(tValueUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tValueUnmarshaler)) { return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} } @@ -1471,16 +1540,19 @@ func (dvd DefaultValueDecoders) ValueUnmarshalerDecodeValue(dc DecodeContext, vr return err } - fn := val.Convert(tValueUnmarshaler).MethodByName("UnmarshalBSONValue") - errVal := fn.Call([]reflect.Value{reflect.ValueOf(t), reflect.ValueOf(src)})[0] - if !errVal.IsNil() { - return errVal.Interface().(error) + m, ok := val.Interface().(ValueUnmarshaler) + if !ok { + // NB: this error should be unreachable due to the above checks + return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} } - return nil + return m.UnmarshalBSONValue(t, src) } // UnmarshalerDecodeValue is the ValueDecoderFunc for Unmarshaler implementations. -func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.IsValid() || (!val.Type().Implements(tUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tUnmarshaler)) { return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} } @@ -1516,12 +1588,12 @@ func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(dc DecodeContext, vr bson val = val.Addr() // If the type doesn't implement the interface, a pointer to it must. } - fn := val.Convert(tUnmarshaler).MethodByName("UnmarshalBSON") - errVal := fn.Call([]reflect.Value{reflect.ValueOf(src)})[0] - if !errVal.IsNil() { - return errVal.Interface().(error) + m, ok := val.Interface().(Unmarshaler) + if !ok { + // NB: this error should be unreachable due to the above checks + return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} } - return nil + return m.UnmarshalBSON(src) } // EmptyInterfaceDecodeValue is the ValueDecoderFunc for interface{}. @@ -1565,7 +1637,10 @@ func (dvd DefaultValueDecoders) EmptyInterfaceDecodeValue(dc DecodeContext, vr b } // CoreDocumentDecodeValue is the ValueDecoderFunc for bsoncore.Document. -func (DefaultValueDecoders) CoreDocumentDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (DefaultValueDecoders) CoreDocumentDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tCoreDocument { return ValueDecoderError{Name: "CoreDocumentDecodeValue", Types: []reflect.Type{tCoreDocument}, Received: val} } @@ -1600,7 +1675,7 @@ func (dvd DefaultValueDecoders) decodeDefault(dc DecodeContext, vr bsonrw.ValueR idx := 0 for { vr, err := ar.ReadValue() - if err == bsonrw.ErrEOA { + if errors.Is(err, bsonrw.ErrEOA) { break } if err != nil { @@ -1671,6 +1746,9 @@ func (dvd DefaultValueDecoders) codeWithScopeDecodeType(dc DecodeContext, vr bso } // CodeWithScopeDecodeValue is the ValueDecoderFunc for CodeWithScope. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. func (dvd DefaultValueDecoders) CodeWithScopeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tCodeWithScope { return ValueDecoderError{Name: "CodeWithScopeDecodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val} @@ -1709,7 +1787,7 @@ func (DefaultValueDecoders) decodeElemsFromDocumentReader(dc DecodeContext, dr b elems := make([]reflect.Value, 0) for { key, vr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { + if errors.Is(err, bsonrw.ErrEOD) { break } if err != nil { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go index 6bdb43cb4..4751ae995 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go @@ -58,10 +58,16 @@ func encodeElement(ec EncodeContext, dw bsonrw.DocumentWriter, e primitive.E) er // DefaultValueEncoders is a namespace type for the default ValueEncoders used // when creating a registry. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. type DefaultValueEncoders struct{} // RegisterDefaultEncoders will register the encoder methods attached to DefaultValueEncoders with // the provided RegistryBuilder. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. func (dve DefaultValueEncoders) RegisterDefaultEncoders(rb *RegistryBuilder) { if rb == nil { panic(errors.New("argument to RegisterDefaultEncoders must not be nil")) @@ -113,7 +119,10 @@ func (dve DefaultValueEncoders) RegisterDefaultEncoders(rb *RegistryBuilder) { } // BooleanEncodeValue is the ValueEncoderFunc for bool types. -func (dve DefaultValueEncoders) BooleanEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) BooleanEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Kind() != reflect.Bool { return ValueEncoderError{Name: "BooleanEncodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val} } @@ -125,6 +134,9 @@ func fitsIn32Bits(i int64) bool { } // IntEncodeValue is the ValueEncoderFunc for int types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. func (dve DefaultValueEncoders) IntEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { switch val.Kind() { case reflect.Int8, reflect.Int16, reflect.Int32: @@ -176,7 +188,10 @@ func (dve DefaultValueEncoders) UintEncodeValue(ec EncodeContext, vw bsonrw.Valu } // FloatEncodeValue is the ValueEncoderFunc for float types. -func (dve DefaultValueEncoders) FloatEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) FloatEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { switch val.Kind() { case reflect.Float32, reflect.Float64: return vw.WriteDouble(val.Float()) @@ -188,7 +203,7 @@ func (dve DefaultValueEncoders) FloatEncodeValue(ec EncodeContext, vw bsonrw.Val // StringEncodeValue is the ValueEncoderFunc for string types. // // Deprecated: StringEncodeValue is not registered by default. Use StringCodec.EncodeValue instead. -func (dve DefaultValueEncoders) StringEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +func (dve DefaultValueEncoders) StringEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if val.Kind() != reflect.String { return ValueEncoderError{ Name: "StringEncodeValue", @@ -201,7 +216,10 @@ func (dve DefaultValueEncoders) StringEncodeValue(ectx EncodeContext, vw bsonrw. } // ObjectIDEncodeValue is the ValueEncoderFunc for primitive.ObjectID. -func (dve DefaultValueEncoders) ObjectIDEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) ObjectIDEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tOID { return ValueEncoderError{Name: "ObjectIDEncodeValue", Types: []reflect.Type{tOID}, Received: val} } @@ -209,7 +227,10 @@ func (dve DefaultValueEncoders) ObjectIDEncodeValue(ec EncodeContext, vw bsonrw. } // Decimal128EncodeValue is the ValueEncoderFunc for primitive.Decimal128. -func (dve DefaultValueEncoders) Decimal128EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) Decimal128EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tDecimal { return ValueEncoderError{Name: "Decimal128EncodeValue", Types: []reflect.Type{tDecimal}, Received: val} } @@ -217,6 +238,9 @@ func (dve DefaultValueEncoders) Decimal128EncodeValue(ec EncodeContext, vw bsonr } // JSONNumberEncodeValue is the ValueEncoderFunc for json.Number. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. func (dve DefaultValueEncoders) JSONNumberEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tJSONNumber { return ValueEncoderError{Name: "JSONNumberEncodeValue", Types: []reflect.Type{tJSONNumber}, Received: val} @@ -237,7 +261,10 @@ func (dve DefaultValueEncoders) JSONNumberEncodeValue(ec EncodeContext, vw bsonr } // URLEncodeValue is the ValueEncoderFunc for url.URL. -func (dve DefaultValueEncoders) URLEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) URLEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tURL { return ValueEncoderError{Name: "URLEncodeValue", Types: []reflect.Type{tURL}, Received: val} } @@ -248,7 +275,7 @@ func (dve DefaultValueEncoders) URLEncodeValue(ec EncodeContext, vw bsonrw.Value // TimeEncodeValue is the ValueEncoderFunc for time.TIme. // // Deprecated: TimeEncodeValue is not registered by default. Use TimeCodec.EncodeValue instead. -func (dve DefaultValueEncoders) TimeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +func (dve DefaultValueEncoders) TimeEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tTime { return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val} } @@ -260,7 +287,7 @@ func (dve DefaultValueEncoders) TimeEncodeValue(ec EncodeContext, vw bsonrw.Valu // ByteSliceEncodeValue is the ValueEncoderFunc for []byte. // // Deprecated: ByteSliceEncodeValue is not registered by default. Use ByteSliceCodec.EncodeValue instead. -func (dve DefaultValueEncoders) ByteSliceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +func (dve DefaultValueEncoders) ByteSliceEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tByteSlice { return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val} } @@ -316,7 +343,7 @@ func (dve DefaultValueEncoders) mapEncodeValue(ec EncodeContext, dw bsonrw.Docum } currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.MapIndex(key)) - if lookupErr != nil && lookupErr != errInvalidValue { + if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { return lookupErr } @@ -325,7 +352,7 @@ func (dve DefaultValueEncoders) mapEncodeValue(ec EncodeContext, dw bsonrw.Docum return err } - if lookupErr == errInvalidValue { + if errors.Is(lookupErr, errInvalidValue) { err = vw.WriteNull() if err != nil { return err @@ -343,6 +370,9 @@ func (dve DefaultValueEncoders) mapEncodeValue(ec EncodeContext, dw bsonrw.Docum } // ArrayEncodeValue is the ValueEncoderFunc for array types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. func (dve DefaultValueEncoders) ArrayEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Kind() != reflect.Array { return ValueEncoderError{Name: "ArrayEncodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val} @@ -388,7 +418,7 @@ func (dve DefaultValueEncoders) ArrayEncodeValue(ec EncodeContext, vw bsonrw.Val for idx := 0; idx < val.Len(); idx++ { currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.Index(idx)) - if lookupErr != nil && lookupErr != errInvalidValue { + if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { return lookupErr } @@ -397,7 +427,7 @@ func (dve DefaultValueEncoders) ArrayEncodeValue(ec EncodeContext, vw bsonrw.Val return err } - if lookupErr == errInvalidValue { + if errors.Is(lookupErr, errInvalidValue) { err = vw.WriteNull() if err != nil { return err @@ -457,7 +487,7 @@ func (dve DefaultValueEncoders) SliceEncodeValue(ec EncodeContext, vw bsonrw.Val for idx := 0; idx < val.Len(); idx++ { currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.Index(idx)) - if lookupErr != nil && lookupErr != errInvalidValue { + if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { return lookupErr } @@ -466,7 +496,7 @@ func (dve DefaultValueEncoders) SliceEncodeValue(ec EncodeContext, vw bsonrw.Val return err } - if lookupErr == errInvalidValue { + if errors.Is(lookupErr, errInvalidValue) { err = vw.WriteNull() if err != nil { return err @@ -515,7 +545,10 @@ func (dve DefaultValueEncoders) EmptyInterfaceEncodeValue(ec EncodeContext, vw b } // ValueMarshalerEncodeValue is the ValueEncoderFunc for ValueMarshaler implementations. -func (dve DefaultValueEncoders) ValueMarshalerEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) ValueMarshalerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { // Either val or a pointer to val must implement ValueMarshaler switch { case !val.IsValid(): @@ -531,17 +564,22 @@ func (dve DefaultValueEncoders) ValueMarshalerEncodeValue(ec EncodeContext, vw b return ValueEncoderError{Name: "ValueMarshalerEncodeValue", Types: []reflect.Type{tValueMarshaler}, Received: val} } - fn := val.Convert(tValueMarshaler).MethodByName("MarshalBSONValue") - returns := fn.Call(nil) - if !returns[2].IsNil() { - return returns[2].Interface().(error) + m, ok := val.Interface().(ValueMarshaler) + if !ok { + return vw.WriteNull() + } + t, data, err := m.MarshalBSONValue() + if err != nil { + return err } - t, data := returns[0].Interface().(bsontype.Type), returns[1].Interface().([]byte) return bsonrw.Copier{}.CopyValueFromBytes(vw, t, data) } // MarshalerEncodeValue is the ValueEncoderFunc for Marshaler implementations. -func (dve DefaultValueEncoders) MarshalerEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) MarshalerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { // Either val or a pointer to val must implement Marshaler switch { case !val.IsValid(): @@ -557,16 +595,21 @@ func (dve DefaultValueEncoders) MarshalerEncodeValue(ec EncodeContext, vw bsonrw return ValueEncoderError{Name: "MarshalerEncodeValue", Types: []reflect.Type{tMarshaler}, Received: val} } - fn := val.Convert(tMarshaler).MethodByName("MarshalBSON") - returns := fn.Call(nil) - if !returns[1].IsNil() { - return returns[1].Interface().(error) + m, ok := val.Interface().(Marshaler) + if !ok { + return vw.WriteNull() + } + data, err := m.MarshalBSON() + if err != nil { + return err } - data := returns[0].Interface().([]byte) return bsonrw.Copier{}.CopyValueFromBytes(vw, bsontype.EmbeddedDocument, data) } // ProxyEncodeValue is the ValueEncoderFunc for Proxy implementations. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. func (dve DefaultValueEncoders) ProxyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { // Either val or a pointer to val must implement Proxy switch { @@ -583,27 +626,38 @@ func (dve DefaultValueEncoders) ProxyEncodeValue(ec EncodeContext, vw bsonrw.Val return ValueEncoderError{Name: "ProxyEncodeValue", Types: []reflect.Type{tProxy}, Received: val} } - fn := val.Convert(tProxy).MethodByName("ProxyBSON") - returns := fn.Call(nil) - if !returns[1].IsNil() { - return returns[1].Interface().(error) + m, ok := val.Interface().(Proxy) + if !ok { + return vw.WriteNull() } - data := returns[0] - var encoder ValueEncoder - var err error - if data.Elem().IsValid() { - encoder, err = ec.LookupEncoder(data.Elem().Type()) - } else { - encoder, err = ec.LookupEncoder(nil) + v, err := m.ProxyBSON() + if err != nil { + return err } + if v == nil { + encoder, err := ec.LookupEncoder(nil) + if err != nil { + return err + } + return encoder.EncodeValue(ec, vw, reflect.ValueOf(nil)) + } + vv := reflect.ValueOf(v) + switch vv.Kind() { + case reflect.Ptr, reflect.Interface: + vv = vv.Elem() + } + encoder, err := ec.LookupEncoder(vv.Type()) if err != nil { return err } - return encoder.EncodeValue(ec, vw, data.Elem()) + return encoder.EncodeValue(ec, vw, vv) } // JavaScriptEncodeValue is the ValueEncoderFunc for the primitive.JavaScript type. -func (DefaultValueEncoders) JavaScriptEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) JavaScriptEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tJavaScript { return ValueEncoderError{Name: "JavaScriptEncodeValue", Types: []reflect.Type{tJavaScript}, Received: val} } @@ -612,7 +666,10 @@ func (DefaultValueEncoders) JavaScriptEncodeValue(ectx EncodeContext, vw bsonrw. } // SymbolEncodeValue is the ValueEncoderFunc for the primitive.Symbol type. -func (DefaultValueEncoders) SymbolEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) SymbolEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tSymbol { return ValueEncoderError{Name: "SymbolEncodeValue", Types: []reflect.Type{tSymbol}, Received: val} } @@ -621,7 +678,10 @@ func (DefaultValueEncoders) SymbolEncodeValue(ectx EncodeContext, vw bsonrw.Valu } // BinaryEncodeValue is the ValueEncoderFunc for Binary. -func (DefaultValueEncoders) BinaryEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) BinaryEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tBinary { return ValueEncoderError{Name: "BinaryEncodeValue", Types: []reflect.Type{tBinary}, Received: val} } @@ -631,7 +691,10 @@ func (DefaultValueEncoders) BinaryEncodeValue(ec EncodeContext, vw bsonrw.ValueW } // UndefinedEncodeValue is the ValueEncoderFunc for Undefined. -func (DefaultValueEncoders) UndefinedEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) UndefinedEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tUndefined { return ValueEncoderError{Name: "UndefinedEncodeValue", Types: []reflect.Type{tUndefined}, Received: val} } @@ -640,7 +703,10 @@ func (DefaultValueEncoders) UndefinedEncodeValue(ec EncodeContext, vw bsonrw.Val } // DateTimeEncodeValue is the ValueEncoderFunc for DateTime. -func (DefaultValueEncoders) DateTimeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) DateTimeEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tDateTime { return ValueEncoderError{Name: "DateTimeEncodeValue", Types: []reflect.Type{tDateTime}, Received: val} } @@ -649,7 +715,10 @@ func (DefaultValueEncoders) DateTimeEncodeValue(ec EncodeContext, vw bsonrw.Valu } // NullEncodeValue is the ValueEncoderFunc for Null. -func (DefaultValueEncoders) NullEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) NullEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tNull { return ValueEncoderError{Name: "NullEncodeValue", Types: []reflect.Type{tNull}, Received: val} } @@ -658,7 +727,10 @@ func (DefaultValueEncoders) NullEncodeValue(ec EncodeContext, vw bsonrw.ValueWri } // RegexEncodeValue is the ValueEncoderFunc for Regex. -func (DefaultValueEncoders) RegexEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) RegexEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tRegex { return ValueEncoderError{Name: "RegexEncodeValue", Types: []reflect.Type{tRegex}, Received: val} } @@ -669,7 +741,10 @@ func (DefaultValueEncoders) RegexEncodeValue(ec EncodeContext, vw bsonrw.ValueWr } // DBPointerEncodeValue is the ValueEncoderFunc for DBPointer. -func (DefaultValueEncoders) DBPointerEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) DBPointerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tDBPointer { return ValueEncoderError{Name: "DBPointerEncodeValue", Types: []reflect.Type{tDBPointer}, Received: val} } @@ -680,7 +755,10 @@ func (DefaultValueEncoders) DBPointerEncodeValue(ec EncodeContext, vw bsonrw.Val } // TimestampEncodeValue is the ValueEncoderFunc for Timestamp. -func (DefaultValueEncoders) TimestampEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) TimestampEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tTimestamp { return ValueEncoderError{Name: "TimestampEncodeValue", Types: []reflect.Type{tTimestamp}, Received: val} } @@ -691,7 +769,10 @@ func (DefaultValueEncoders) TimestampEncodeValue(ec EncodeContext, vw bsonrw.Val } // MinKeyEncodeValue is the ValueEncoderFunc for MinKey. -func (DefaultValueEncoders) MinKeyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) MinKeyEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tMinKey { return ValueEncoderError{Name: "MinKeyEncodeValue", Types: []reflect.Type{tMinKey}, Received: val} } @@ -700,7 +781,10 @@ func (DefaultValueEncoders) MinKeyEncodeValue(ec EncodeContext, vw bsonrw.ValueW } // MaxKeyEncodeValue is the ValueEncoderFunc for MaxKey. -func (DefaultValueEncoders) MaxKeyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) MaxKeyEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tMaxKey { return ValueEncoderError{Name: "MaxKeyEncodeValue", Types: []reflect.Type{tMaxKey}, Received: val} } @@ -709,7 +793,10 @@ func (DefaultValueEncoders) MaxKeyEncodeValue(ec EncodeContext, vw bsonrw.ValueW } // CoreDocumentEncodeValue is the ValueEncoderFunc for bsoncore.Document. -func (DefaultValueEncoders) CoreDocumentEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) CoreDocumentEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tCoreDocument { return ValueEncoderError{Name: "CoreDocumentEncodeValue", Types: []reflect.Type{tCoreDocument}, Received: val} } @@ -720,6 +807,9 @@ func (DefaultValueEncoders) CoreDocumentEncodeValue(ec EncodeContext, vw bsonrw. } // CodeWithScopeEncodeValue is the ValueEncoderFunc for CodeWithScope. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. func (dve DefaultValueEncoders) CodeWithScopeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tCodeWithScope { return ValueEncoderError{Name: "CodeWithScopeEncodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go index 5f903ebea..4613e5a1e 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go @@ -31,35 +31,39 @@ // allow the use of a function with the correct signature as a ValueDecoder. A DecodeContext // instance is provided and serves similar functionality to the EncodeContext. // -// # Registry and RegistryBuilder -// -// A Registry is an immutable store for ValueEncoders, ValueDecoders, and a type map. See the Registry type -// documentation for examples of registering various custom encoders and decoders. A Registry can be constructed using a -// RegistryBuilder, which handles three main types of codecs: -// -// 1. Type encoders/decoders - These can be registered using the RegisterTypeEncoder and RegisterTypeDecoder methods. -// The registered codec will be invoked when encoding/decoding a value whose type matches the registered type exactly. -// If the registered type is an interface, the codec will be invoked when encoding or decoding values whose type is the -// interface, but not for values with concrete types that implement the interface. -// -// 2. Hook encoders/decoders - These can be registered using the RegisterHookEncoder and RegisterHookDecoder methods. -// These methods only accept interface types and the registered codecs will be invoked when encoding or decoding values -// whose types implement the interface. An example of a hook defined by the driver is bson.Marshaler. The driver will -// call the MarshalBSON method for any value whose type implements bson.Marshaler, regardless of the value's concrete -// type. -// -// 3. Type map entries - This can be used to associate a BSON type with a Go type. These type associations are used when -// decoding into a bson.D/bson.M or a struct field of type interface{}. For example, by default, BSON int32 and int64 -// values decode as Go int32 and int64 instances, respectively, when decoding into a bson.D. The following code would -// change the behavior so these values decode as Go int instances instead: +// # Registry +// +// A Registry is a store for ValueEncoders, ValueDecoders, and a type map. See the Registry type +// documentation for examples of registering various custom encoders and decoders. A Registry can +// have three main types of codecs: +// +// 1. Type encoders/decoders - These can be registered using the RegisterTypeEncoder and +// RegisterTypeDecoder methods. The registered codec will be invoked when encoding/decoding a value +// whose type matches the registered type exactly. +// If the registered type is an interface, the codec will be invoked when encoding or decoding +// values whose type is the interface, but not for values with concrete types that implement the +// interface. +// +// 2. Hook encoders/decoders - These can be registered using the RegisterHookEncoder and +// RegisterHookDecoder methods. These methods only accept interface types and the registered codecs +// will be invoked when encoding or decoding values whose types implement the interface. An example +// of a hook defined by the driver is bson.Marshaler. The driver will call the MarshalBSON method +// for any value whose type implements bson.Marshaler, regardless of the value's concrete type. +// +// 3. Type map entries - This can be used to associate a BSON type with a Go type. These type +// associations are used when decoding into a bson.D/bson.M or a struct field of type interface{}. +// For example, by default, BSON int32 and int64 values decode as Go int32 and int64 instances, +// respectively, when decoding into a bson.D. The following code would change the behavior so these +// values decode as Go int instances instead: // // intType := reflect.TypeOf(int(0)) -// registryBuilder.RegisterTypeMapEntry(bsontype.Int32, intType).RegisterTypeMapEntry(bsontype.Int64, intType) +// registry.RegisterTypeMapEntry(bsontype.Int32, intType).RegisterTypeMapEntry(bsontype.Int64, intType) // -// 4. Kind encoder/decoders - These can be registered using the RegisterDefaultEncoder and RegisterDefaultDecoder -// methods. The registered codec will be invoked when encoding or decoding values whose reflect.Kind matches the -// registered reflect.Kind as long as the value's type doesn't match a registered type or hook encoder/decoder first. -// These methods should be used to change the behavior for all values for a specific kind. +// 4. Kind encoder/decoders - These can be registered using the RegisterDefaultEncoder and +// RegisterDefaultDecoder methods. The registered codec will be invoked when encoding or decoding +// values whose reflect.Kind matches the registered reflect.Kind as long as the value's type doesn't +// match a registered type or hook encoder/decoder first. These methods should be used to change the +// behavior for all values for a specific kind. // // # Registry Lookup Procedure // @@ -67,17 +71,18 @@ // // 1. A type encoder registered for the exact type of the value. // -// 2. A hook encoder registered for an interface that is implemented by the value or by a pointer to the value. If the -// value matches multiple hooks (e.g. the type implements bsoncodec.Marshaler and bsoncodec.ValueMarshaler), the first -// one registered will be selected. Note that registries constructed using bson.NewRegistryBuilder have driver-defined -// hooks registered for the bsoncodec.Marshaler, bsoncodec.ValueMarshaler, and bsoncodec.Proxy interfaces, so those -// will take precedence over any new hooks. +// 2. A hook encoder registered for an interface that is implemented by the value or by a pointer to +// the value. If the value matches multiple hooks (e.g. the type implements bsoncodec.Marshaler and +// bsoncodec.ValueMarshaler), the first one registered will be selected. Note that registries +// constructed using bson.NewRegistry have driver-defined hooks registered for the +// bsoncodec.Marshaler, bsoncodec.ValueMarshaler, and bsoncodec.Proxy interfaces, so those will take +// precedence over any new hooks. // // 3. A kind encoder registered for the value's kind. // -// If all of these lookups fail to find an encoder, an error of type ErrNoEncoder is returned. The same precedence -// rules apply for decoders, with the exception that an error of type ErrNoDecoder will be returned if no decoder is -// found. +// If all of these lookups fail to find an encoder, an error of type ErrNoEncoder is returned. The +// same precedence rules apply for decoders, with the exception that an error of type ErrNoDecoder +// will be returned if no decoder is found. // // # DefaultValueEncoders and DefaultValueDecoders // diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go index eda417cff..098368f07 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go @@ -16,18 +16,44 @@ import ( ) // EmptyInterfaceCodec is the Codec used for interface{} values. +// +// Deprecated: EmptyInterfaceCodec will not be directly configurable in Go +// Driver 2.0. To configure the empty interface encode and decode behavior, use +// the configuration methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the empty interface +// encode and decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to unmarshal BSON binary field +// values as a Go byte slice, use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// BinaryAsSlice: true, +// }) +// +// See the deprecation notice for each field in EmptyInterfaceCodec for the +// corresponding settings. type EmptyInterfaceCodec struct { + // DecodeBinaryAsSlice causes DecodeValue to unmarshal BSON binary field values that are the + // "Generic" or "Old" BSON binary subtype as a Go byte slice instead of a primitive.Binary. + // + // Deprecated: Use bson.Decoder.BinaryAsSlice or options.BSONOptions.BinaryAsSlice instead. DecodeBinaryAsSlice bool } var ( defaultEmptyInterfaceCodec = NewEmptyInterfaceCodec() - _ ValueCodec = defaultEmptyInterfaceCodec + // Assert that defaultEmptyInterfaceCodec satisfies the typeDecoder interface, which allows it + // to be used by collection type decoders (e.g. map, slice, etc) to set individual values in a + // collection. _ typeDecoder = defaultEmptyInterfaceCodec ) // NewEmptyInterfaceCodec returns a EmptyInterfaceCodec with options opts. +// +// Deprecated: NewEmptyInterfaceCodec will not be available in Go Driver 2.0. See +// [EmptyInterfaceCodec] for more details. func NewEmptyInterfaceCodec(opts ...*bsonoptions.EmptyInterfaceCodecOptions) *EmptyInterfaceCodec { interfaceOpt := bsonoptions.MergeEmptyInterfaceCodecOptions(opts...) @@ -121,7 +147,7 @@ func (eic EmptyInterfaceCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReade return emptyValue, err } - if eic.DecodeBinaryAsSlice && rtype == tBinary { + if (eic.DecodeBinaryAsSlice || dc.binaryAsSlice) && rtype == tBinary { binElem := elem.Interface().(primitive.Binary) if binElem.Subtype == bsontype.BinaryGeneric || binElem.Subtype == bsontype.BinaryBinaryOld { elem = reflect.ValueOf(binElem.Data) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go index e1fbef9c6..d7e00ffa8 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go @@ -8,6 +8,7 @@ package bsoncodec import ( "encoding" + "errors" "fmt" "reflect" "strconv" @@ -20,14 +21,44 @@ import ( var defaultMapCodec = NewMapCodec() // MapCodec is the Codec used for map values. +// +// Deprecated: MapCodec will not be directly configurable in Go Driver 2.0. To +// configure the map encode and decode behavior, use the configuration methods +// on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the map encode and +// decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to marshal nil Go maps as empty BSON +// documents, use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// NilMapAsEmpty: true, +// }) +// +// See the deprecation notice for each field in MapCodec for the corresponding +// settings. type MapCodec struct { - DecodeZerosMap bool - EncodeNilAsEmpty bool + // DecodeZerosMap causes DecodeValue to delete any existing values from Go maps in the destination + // value passed to Decode before unmarshaling BSON documents into them. + // + // Deprecated: Use bson.Decoder.ZeroMaps or options.BSONOptions.ZeroMaps instead. + DecodeZerosMap bool + + // EncodeNilAsEmpty causes EncodeValue to marshal nil Go maps as empty BSON documents instead of + // BSON null. + // + // Deprecated: Use bson.Encoder.NilMapAsEmpty or options.BSONOptions.NilMapAsEmpty instead. + EncodeNilAsEmpty bool + + // EncodeKeysWithStringer causes the Encoder to convert Go map keys to BSON document field name + // strings using fmt.Sprintf() instead of the default string conversion logic. + // + // Deprecated: Use bson.Encoder.StringifyMapKeysWithFmt or + // options.BSONOptions.StringifyMapKeysWithFmt instead. EncodeKeysWithStringer bool } -var _ ValueCodec = &MapCodec{} - // KeyMarshaler is the interface implemented by an object that can marshal itself into a string key. // This applies to types used as map keys and is similar to encoding.TextMarshaler. type KeyMarshaler interface { @@ -45,6 +76,9 @@ type KeyUnmarshaler interface { } // NewMapCodec returns a MapCodec with options opts. +// +// Deprecated: NewMapCodec will not be available in Go Driver 2.0. See +// [MapCodec] for more details. func NewMapCodec(opts ...*bsonoptions.MapCodecOptions) *MapCodec { mapOpt := bsonoptions.MergeMapCodecOptions(opts...) @@ -67,7 +101,7 @@ func (mc *MapCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val ref return ValueEncoderError{Name: "MapEncodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} } - if val.IsNil() && !mc.EncodeNilAsEmpty { + if val.IsNil() && !mc.EncodeNilAsEmpty && !ec.nilMapAsEmpty { // If we have a nil map but we can't WriteNull, that means we're probably trying to encode // to a TopLevel document. We can't currently tell if this is what actually happened, but if // there's a deeper underlying problem, the error will also be returned from WriteDocument, @@ -100,7 +134,7 @@ func (mc *MapCodec) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, v keys := val.MapKeys() for _, key := range keys { - keyStr, err := mc.encodeKey(key) + keyStr, err := mc.encodeKey(key, ec.stringifyMapKeysWithFmt) if err != nil { return err } @@ -110,7 +144,7 @@ func (mc *MapCodec) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, v } currEncoder, currVal, lookupErr := defaultValueEncoders.lookupElementEncoder(ec, encoder, val.MapIndex(key)) - if lookupErr != nil && lookupErr != errInvalidValue { + if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { return lookupErr } @@ -119,7 +153,7 @@ func (mc *MapCodec) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, v return err } - if lookupErr == errInvalidValue { + if errors.Is(lookupErr, errInvalidValue) { err = vw.WriteNull() if err != nil { return err @@ -163,7 +197,7 @@ func (mc *MapCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val ref val.Set(reflect.MakeMap(val.Type())) } - if val.Len() > 0 && mc.DecodeZerosMap { + if val.Len() > 0 && (mc.DecodeZerosMap || dc.zeroMaps) { clearMap(val) } @@ -182,7 +216,7 @@ func (mc *MapCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val ref for { key, vr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { + if errors.Is(err, bsonrw.ErrEOD) { break } if err != nil { @@ -211,8 +245,8 @@ func clearMap(m reflect.Value) { } } -func (mc *MapCodec) encodeKey(val reflect.Value) (string, error) { - if mc.EncodeKeysWithStringer { +func (mc *MapCodec) encodeKey(val reflect.Value, encodeKeysWithStringer bool) (string, error) { + if mc.EncodeKeysWithStringer || encodeKeysWithStringer { return fmt.Sprint(val), nil } @@ -295,7 +329,7 @@ func (mc *MapCodec) decodeKey(key string, keyType reflect.Type) (reflect.Value, if mc.EncodeKeysWithStringer { parsed, err := strconv.ParseFloat(key, 64) if err != nil { - return keyVal, fmt.Errorf("Map key is defined to be a decimal type (%v) but got error %v", keyType.Kind(), err) + return keyVal, fmt.Errorf("Map key is defined to be a decimal type (%v) but got error %w", keyType.Kind(), err) } keyVal = reflect.ValueOf(parsed) break diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go index 616a3e701..ddfa4a33e 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go @@ -8,7 +8,6 @@ package bsoncodec import ( "reflect" - "sync" "go.mongodb.org/mongo-driver/bson/bsonrw" "go.mongodb.org/mongo-driver/bson/bsontype" @@ -18,18 +17,28 @@ var _ ValueEncoder = &PointerCodec{} var _ ValueDecoder = &PointerCodec{} // PointerCodec is the Codec used for pointers. +// +// Deprecated: PointerCodec will not be directly accessible in Go Driver 2.0. To +// override the default pointer encode and decode behavior, create a new registry +// with [go.mongodb.org/mongo-driver/bson.NewRegistry] and register a new +// encoder and decoder for pointers. +// +// For example, +// +// reg := bson.NewRegistry() +// reg.RegisterKindEncoder(reflect.Ptr, myPointerEncoder) +// reg.RegisterKindDecoder(reflect.Ptr, myPointerDecoder) type PointerCodec struct { - ecache map[reflect.Type]ValueEncoder - dcache map[reflect.Type]ValueDecoder - l sync.RWMutex + ecache typeEncoderCache + dcache typeDecoderCache } // NewPointerCodec returns a PointerCodec that has been initialized. +// +// Deprecated: NewPointerCodec will not be available in Go Driver 2.0. See +// [PointerCodec] for more details. func NewPointerCodec() *PointerCodec { - return &PointerCodec{ - ecache: make(map[reflect.Type]ValueEncoder), - dcache: make(map[reflect.Type]ValueDecoder), - } + return &PointerCodec{} } // EncodeValue handles encoding a pointer by either encoding it to BSON Null if the pointer is nil @@ -46,24 +55,19 @@ func (pc *PointerCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val return vw.WriteNull() } - pc.l.RLock() - enc, ok := pc.ecache[val.Type()] - pc.l.RUnlock() - if ok { - if enc == nil { - return ErrNoEncoder{Type: val.Type()} + typ := val.Type() + if v, ok := pc.ecache.Load(typ); ok { + if v == nil { + return ErrNoEncoder{Type: typ} } - return enc.EncodeValue(ec, vw, val.Elem()) + return v.EncodeValue(ec, vw, val.Elem()) } - - enc, err := ec.LookupEncoder(val.Type().Elem()) - pc.l.Lock() - pc.ecache[val.Type()] = enc - pc.l.Unlock() + // TODO(charlie): handle concurrent requests for the same type + enc, err := ec.LookupEncoder(typ.Elem()) + enc = pc.ecache.LoadOrStore(typ, enc) if err != nil { return err } - return enc.EncodeValue(ec, vw, val.Elem()) } @@ -74,36 +78,31 @@ func (pc *PointerCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val return ValueDecoderError{Name: "PointerCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: val} } + typ := val.Type() if vr.Type() == bsontype.Null { - val.Set(reflect.Zero(val.Type())) + val.Set(reflect.Zero(typ)) return vr.ReadNull() } if vr.Type() == bsontype.Undefined { - val.Set(reflect.Zero(val.Type())) + val.Set(reflect.Zero(typ)) return vr.ReadUndefined() } if val.IsNil() { - val.Set(reflect.New(val.Type().Elem())) + val.Set(reflect.New(typ.Elem())) } - pc.l.RLock() - dec, ok := pc.dcache[val.Type()] - pc.l.RUnlock() - if ok { - if dec == nil { - return ErrNoDecoder{Type: val.Type()} + if v, ok := pc.dcache.Load(typ); ok { + if v == nil { + return ErrNoDecoder{Type: typ} } - return dec.DecodeValue(dc, vr, val.Elem()) + return v.DecodeValue(dc, vr, val.Elem()) } - - dec, err := dc.LookupDecoder(val.Type().Elem()) - pc.l.Lock() - pc.dcache[val.Type()] = dec - pc.l.Unlock() + // TODO(charlie): handle concurrent requests for the same type + dec, err := dc.LookupDecoder(typ.Elem()) + dec = pc.dcache.LoadOrStore(typ, dec) if err != nil { return err } - return dec.DecodeValue(dc, vr, val.Elem()) } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go index 80644023c..196c491bb 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go @@ -16,12 +16,18 @@ import ( ) // ErrNilType is returned when nil is passed to either LookupEncoder or LookupDecoder. +// +// Deprecated: ErrNilType will not be supported in Go Driver 2.0. var ErrNilType = errors.New("cannot perform a decoder lookup on ") // ErrNotPointer is returned when a non-pointer type is provided to LookupDecoder. +// +// Deprecated: ErrNotPointer will not be supported in Go Driver 2.0. var ErrNotPointer = errors.New("non-pointer provided to LookupDecoder") // ErrNoEncoder is returned when there wasn't an encoder available for a type. +// +// Deprecated: ErrNoEncoder will not be supported in Go Driver 2.0. type ErrNoEncoder struct { Type reflect.Type } @@ -34,6 +40,8 @@ func (ene ErrNoEncoder) Error() string { } // ErrNoDecoder is returned when there wasn't a decoder available for a type. +// +// Deprecated: ErrNoDecoder will not be supported in Go Driver 2.0. type ErrNoDecoder struct { Type reflect.Type } @@ -43,6 +51,8 @@ func (end ErrNoDecoder) Error() string { } // ErrNoTypeMapEntry is returned when there wasn't a type available for the provided BSON type. +// +// Deprecated: ErrNoTypeMapEntry will not be supported in Go Driver 2.0. type ErrNoTypeMapEntry struct { Type bsontype.Type } @@ -52,63 +62,30 @@ func (entme ErrNoTypeMapEntry) Error() string { } // ErrNotInterface is returned when the provided type is not an interface. +// +// Deprecated: ErrNotInterface will not be supported in Go Driver 2.0. var ErrNotInterface = errors.New("The provided type is not an interface") // A RegistryBuilder is used to build a Registry. This type is not goroutine // safe. +// +// Deprecated: Use Registry instead. type RegistryBuilder struct { - typeEncoders map[reflect.Type]ValueEncoder - interfaceEncoders []interfaceValueEncoder - kindEncoders map[reflect.Kind]ValueEncoder - - typeDecoders map[reflect.Type]ValueDecoder - interfaceDecoders []interfaceValueDecoder - kindDecoders map[reflect.Kind]ValueDecoder - - typeMap map[bsontype.Type]reflect.Type -} - -// A Registry is used to store and retrieve codecs for types and interfaces. This type is the main -// typed passed around and Encoders and Decoders are constructed from it. -type Registry struct { - typeEncoders map[reflect.Type]ValueEncoder - typeDecoders map[reflect.Type]ValueDecoder - - interfaceEncoders []interfaceValueEncoder - interfaceDecoders []interfaceValueDecoder - - kindEncoders map[reflect.Kind]ValueEncoder - kindDecoders map[reflect.Kind]ValueDecoder - - typeMap map[bsontype.Type]reflect.Type - - mu sync.RWMutex + registry *Registry } // NewRegistryBuilder creates a new empty RegistryBuilder. +// +// Deprecated: Use NewRegistry instead. func NewRegistryBuilder() *RegistryBuilder { return &RegistryBuilder{ - typeEncoders: make(map[reflect.Type]ValueEncoder), - typeDecoders: make(map[reflect.Type]ValueDecoder), - - interfaceEncoders: make([]interfaceValueEncoder, 0), - interfaceDecoders: make([]interfaceValueDecoder, 0), - - kindEncoders: make(map[reflect.Kind]ValueEncoder), - kindDecoders: make(map[reflect.Kind]ValueDecoder), - - typeMap: make(map[bsontype.Type]reflect.Type), + registry: NewRegistry(), } } -func buildDefaultRegistry() *Registry { - rb := NewRegistryBuilder() - defaultValueEncoders.RegisterDefaultEncoders(rb) - defaultValueDecoders.RegisterDefaultDecoders(rb) - return rb.Build() -} - // RegisterCodec will register the provided ValueCodec for the provided type. +// +// Deprecated: Use Registry.RegisterTypeEncoder and Registry.RegisterTypeDecoder instead. func (rb *RegistryBuilder) RegisterCodec(t reflect.Type, codec ValueCodec) *RegistryBuilder { rb.RegisterTypeEncoder(t, codec) rb.RegisterTypeDecoder(t, codec) @@ -120,31 +97,22 @@ func (rb *RegistryBuilder) RegisterCodec(t reflect.Type, codec ValueCodec) *Regi // The type will be used directly, so an encoder can be registered for a type and a different encoder can be registered // for a pointer to that type. // -// If the given type is an interface, the encoder will be called when marshalling a type that is that interface. It -// will not be called when marshalling a non-interface type that implements the interface. +// If the given type is an interface, the encoder will be called when marshaling a type that is that interface. It +// will not be called when marshaling a non-interface type that implements the interface. +// +// Deprecated: Use Registry.RegisterTypeEncoder instead. func (rb *RegistryBuilder) RegisterTypeEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { - rb.typeEncoders[t] = enc + rb.registry.RegisterTypeEncoder(t, enc) return rb } // RegisterHookEncoder will register an encoder for the provided interface type t. This encoder will be called when -// marshalling a type if the type implements t or a pointer to the type implements t. If the provided type is not +// marshaling a type if the type implements t or a pointer to the type implements t. If the provided type is not // an interface (i.e. t.Kind() != reflect.Interface), this method will panic. +// +// Deprecated: Use Registry.RegisterInterfaceEncoder instead. func (rb *RegistryBuilder) RegisterHookEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { - if t.Kind() != reflect.Interface { - panicStr := fmt.Sprintf("RegisterHookEncoder expects a type with kind reflect.Interface, "+ - "got type %s with kind %s", t, t.Kind()) - panic(panicStr) - } - - for idx, encoder := range rb.interfaceEncoders { - if encoder.i == t { - rb.interfaceEncoders[idx].ve = enc - return rb - } - } - - rb.interfaceEncoders = append(rb.interfaceEncoders, interfaceValueEncoder{i: t, ve: enc}) + rb.registry.RegisterInterfaceEncoder(t, enc) return rb } @@ -153,97 +121,78 @@ func (rb *RegistryBuilder) RegisterHookEncoder(t reflect.Type, enc ValueEncoder) // The type will be used directly, so a decoder can be registered for a type and a different decoder can be registered // for a pointer to that type. // -// If the given type is an interface, the decoder will be called when unmarshalling into a type that is that interface. -// It will not be called when unmarshalling into a non-interface type that implements the interface. +// If the given type is an interface, the decoder will be called when unmarshaling into a type that is that interface. +// It will not be called when unmarshaling into a non-interface type that implements the interface. +// +// Deprecated: Use Registry.RegisterTypeDecoder instead. func (rb *RegistryBuilder) RegisterTypeDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { - rb.typeDecoders[t] = dec + rb.registry.RegisterTypeDecoder(t, dec) return rb } // RegisterHookDecoder will register an decoder for the provided interface type t. This decoder will be called when -// unmarshalling into a type if the type implements t or a pointer to the type implements t. If the provided type is not +// unmarshaling into a type if the type implements t or a pointer to the type implements t. If the provided type is not // an interface (i.e. t.Kind() != reflect.Interface), this method will panic. +// +// Deprecated: Use Registry.RegisterInterfaceDecoder instead. func (rb *RegistryBuilder) RegisterHookDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { - if t.Kind() != reflect.Interface { - panicStr := fmt.Sprintf("RegisterHookDecoder expects a type with kind reflect.Interface, "+ - "got type %s with kind %s", t, t.Kind()) - panic(panicStr) - } - - for idx, decoder := range rb.interfaceDecoders { - if decoder.i == t { - rb.interfaceDecoders[idx].vd = dec - return rb - } - } - - rb.interfaceDecoders = append(rb.interfaceDecoders, interfaceValueDecoder{i: t, vd: dec}) + rb.registry.RegisterInterfaceDecoder(t, dec) return rb } // RegisterEncoder registers the provided type and encoder pair. // -// Deprecated: Use RegisterTypeEncoder or RegisterHookEncoder instead. +// Deprecated: Use Registry.RegisterTypeEncoder or Registry.RegisterInterfaceEncoder instead. func (rb *RegistryBuilder) RegisterEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { if t == tEmpty { - rb.typeEncoders[t] = enc + rb.registry.RegisterTypeEncoder(t, enc) return rb } switch t.Kind() { case reflect.Interface: - for idx, ir := range rb.interfaceEncoders { - if ir.i == t { - rb.interfaceEncoders[idx].ve = enc - return rb - } - } - - rb.interfaceEncoders = append(rb.interfaceEncoders, interfaceValueEncoder{i: t, ve: enc}) + rb.registry.RegisterInterfaceEncoder(t, enc) default: - rb.typeEncoders[t] = enc + rb.registry.RegisterTypeEncoder(t, enc) } return rb } // RegisterDecoder registers the provided type and decoder pair. // -// Deprecated: Use RegisterTypeDecoder or RegisterHookDecoder instead. +// Deprecated: Use Registry.RegisterTypeDecoder or Registry.RegisterInterfaceDecoder instead. func (rb *RegistryBuilder) RegisterDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { if t == nil { - rb.typeDecoders[nil] = dec + rb.registry.RegisterTypeDecoder(t, dec) return rb } if t == tEmpty { - rb.typeDecoders[t] = dec + rb.registry.RegisterTypeDecoder(t, dec) return rb } switch t.Kind() { case reflect.Interface: - for idx, ir := range rb.interfaceDecoders { - if ir.i == t { - rb.interfaceDecoders[idx].vd = dec - return rb - } - } - - rb.interfaceDecoders = append(rb.interfaceDecoders, interfaceValueDecoder{i: t, vd: dec}) + rb.registry.RegisterInterfaceDecoder(t, dec) default: - rb.typeDecoders[t] = dec + rb.registry.RegisterTypeDecoder(t, dec) } return rb } -// RegisterDefaultEncoder will registr the provided ValueEncoder to the provided +// RegisterDefaultEncoder will register the provided ValueEncoder to the provided // kind. +// +// Deprecated: Use Registry.RegisterKindEncoder instead. func (rb *RegistryBuilder) RegisterDefaultEncoder(kind reflect.Kind, enc ValueEncoder) *RegistryBuilder { - rb.kindEncoders[kind] = enc + rb.registry.RegisterKindEncoder(kind, enc) return rb } // RegisterDefaultDecoder will register the provided ValueDecoder to the // provided kind. +// +// Deprecated: Use Registry.RegisterKindDecoder instead. func (rb *RegistryBuilder) RegisterDefaultDecoder(kind reflect.Kind, dec ValueDecoder) *RegistryBuilder { - rb.kindDecoders[kind] = dec + rb.registry.RegisterKindDecoder(kind, dec) return rb } @@ -256,120 +205,233 @@ func (rb *RegistryBuilder) RegisterDefaultDecoder(kind reflect.Kind, dec ValueDe // to decode to bson.Raw, use the following code: // // rb.RegisterTypeMapEntry(bsontype.EmbeddedDocument, reflect.TypeOf(bson.Raw{})) +// +// Deprecated: Use Registry.RegisterTypeMapEntry instead. func (rb *RegistryBuilder) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) *RegistryBuilder { - rb.typeMap[bt] = rt + rb.registry.RegisterTypeMapEntry(bt, rt) return rb } // Build creates a Registry from the current state of this RegistryBuilder. +// +// Deprecated: Use NewRegistry instead. func (rb *RegistryBuilder) Build() *Registry { - registry := new(Registry) - - registry.typeEncoders = make(map[reflect.Type]ValueEncoder) - for t, enc := range rb.typeEncoders { - registry.typeEncoders[t] = enc + r := &Registry{ + interfaceEncoders: append([]interfaceValueEncoder(nil), rb.registry.interfaceEncoders...), + interfaceDecoders: append([]interfaceValueDecoder(nil), rb.registry.interfaceDecoders...), + typeEncoders: rb.registry.typeEncoders.Clone(), + typeDecoders: rb.registry.typeDecoders.Clone(), + kindEncoders: rb.registry.kindEncoders.Clone(), + kindDecoders: rb.registry.kindDecoders.Clone(), } + rb.registry.typeMap.Range(func(k, v interface{}) bool { + if k != nil && v != nil { + r.typeMap.Store(k, v) + } + return true + }) + return r +} - registry.typeDecoders = make(map[reflect.Type]ValueDecoder) - for t, dec := range rb.typeDecoders { - registry.typeDecoders[t] = dec +// A Registry is used to store and retrieve codecs for types and interfaces. This type is the main +// typed passed around and Encoders and Decoders are constructed from it. +type Registry struct { + interfaceEncoders []interfaceValueEncoder + interfaceDecoders []interfaceValueDecoder + typeEncoders *typeEncoderCache + typeDecoders *typeDecoderCache + kindEncoders *kindEncoderCache + kindDecoders *kindDecoderCache + typeMap sync.Map // map[bsontype.Type]reflect.Type +} + +// NewRegistry creates a new empty Registry. +func NewRegistry() *Registry { + return &Registry{ + typeEncoders: new(typeEncoderCache), + typeDecoders: new(typeDecoderCache), + kindEncoders: new(kindEncoderCache), + kindDecoders: new(kindDecoderCache), } +} + +// RegisterTypeEncoder registers the provided ValueEncoder for the provided type. +// +// The type will be used as provided, so an encoder can be registered for a type and a different +// encoder can be registered for a pointer to that type. +// +// If the given type is an interface, the encoder will be called when marshaling a type that is +// that interface. It will not be called when marshaling a non-interface type that implements the +// interface. To get the latter behavior, call RegisterHookEncoder instead. +// +// RegisterTypeEncoder should not be called concurrently with any other Registry method. +func (r *Registry) RegisterTypeEncoder(valueType reflect.Type, enc ValueEncoder) { + r.typeEncoders.Store(valueType, enc) +} + +// RegisterTypeDecoder registers the provided ValueDecoder for the provided type. +// +// The type will be used as provided, so a decoder can be registered for a type and a different +// decoder can be registered for a pointer to that type. +// +// If the given type is an interface, the decoder will be called when unmarshaling into a type that +// is that interface. It will not be called when unmarshaling into a non-interface type that +// implements the interface. To get the latter behavior, call RegisterHookDecoder instead. +// +// RegisterTypeDecoder should not be called concurrently with any other Registry method. +func (r *Registry) RegisterTypeDecoder(valueType reflect.Type, dec ValueDecoder) { + r.typeDecoders.Store(valueType, dec) +} - registry.interfaceEncoders = make([]interfaceValueEncoder, len(rb.interfaceEncoders)) - copy(registry.interfaceEncoders, rb.interfaceEncoders) +// RegisterKindEncoder registers the provided ValueEncoder for the provided kind. +// +// Use RegisterKindEncoder to register an encoder for any type with the same underlying kind. For +// example, consider the type MyInt defined as +// +// type MyInt int32 +// +// To define an encoder for MyInt and int32, use RegisterKindEncoder like +// +// reg.RegisterKindEncoder(reflect.Int32, myEncoder) +// +// RegisterKindEncoder should not be called concurrently with any other Registry method. +func (r *Registry) RegisterKindEncoder(kind reflect.Kind, enc ValueEncoder) { + r.kindEncoders.Store(kind, enc) +} - registry.interfaceDecoders = make([]interfaceValueDecoder, len(rb.interfaceDecoders)) - copy(registry.interfaceDecoders, rb.interfaceDecoders) +// RegisterKindDecoder registers the provided ValueDecoder for the provided kind. +// +// Use RegisterKindDecoder to register a decoder for any type with the same underlying kind. For +// example, consider the type MyInt defined as +// +// type MyInt int32 +// +// To define an decoder for MyInt and int32, use RegisterKindDecoder like +// +// reg.RegisterKindDecoder(reflect.Int32, myDecoder) +// +// RegisterKindDecoder should not be called concurrently with any other Registry method. +func (r *Registry) RegisterKindDecoder(kind reflect.Kind, dec ValueDecoder) { + r.kindDecoders.Store(kind, dec) +} - registry.kindEncoders = make(map[reflect.Kind]ValueEncoder) - for kind, enc := range rb.kindEncoders { - registry.kindEncoders[kind] = enc +// RegisterInterfaceEncoder registers an encoder for the provided interface type iface. This encoder will +// be called when marshaling a type if the type implements iface or a pointer to the type +// implements iface. If the provided type is not an interface +// (i.e. iface.Kind() != reflect.Interface), this method will panic. +// +// RegisterInterfaceEncoder should not be called concurrently with any other Registry method. +func (r *Registry) RegisterInterfaceEncoder(iface reflect.Type, enc ValueEncoder) { + if iface.Kind() != reflect.Interface { + panicStr := fmt.Errorf("RegisterInterfaceEncoder expects a type with kind reflect.Interface, "+ + "got type %s with kind %s", iface, iface.Kind()) + panic(panicStr) } - registry.kindDecoders = make(map[reflect.Kind]ValueDecoder) - for kind, dec := range rb.kindDecoders { - registry.kindDecoders[kind] = dec + for idx, encoder := range r.interfaceEncoders { + if encoder.i == iface { + r.interfaceEncoders[idx].ve = enc + return + } + } + + r.interfaceEncoders = append(r.interfaceEncoders, interfaceValueEncoder{i: iface, ve: enc}) +} + +// RegisterInterfaceDecoder registers an decoder for the provided interface type iface. This decoder will +// be called when unmarshaling into a type if the type implements iface or a pointer to the type +// implements iface. If the provided type is not an interface (i.e. iface.Kind() != reflect.Interface), +// this method will panic. +// +// RegisterInterfaceDecoder should not be called concurrently with any other Registry method. +func (r *Registry) RegisterInterfaceDecoder(iface reflect.Type, dec ValueDecoder) { + if iface.Kind() != reflect.Interface { + panicStr := fmt.Errorf("RegisterInterfaceDecoder expects a type with kind reflect.Interface, "+ + "got type %s with kind %s", iface, iface.Kind()) + panic(panicStr) } - registry.typeMap = make(map[bsontype.Type]reflect.Type) - for bt, rt := range rb.typeMap { - registry.typeMap[bt] = rt + for idx, decoder := range r.interfaceDecoders { + if decoder.i == iface { + r.interfaceDecoders[idx].vd = dec + return + } } - return registry + r.interfaceDecoders = append(r.interfaceDecoders, interfaceValueDecoder{i: iface, vd: dec}) } -// LookupEncoder inspects the registry for an encoder for the given type. The lookup precedence works as follows: +// RegisterTypeMapEntry will register the provided type to the BSON type. The primary usage for this +// mapping is decoding situations where an empty interface is used and a default type needs to be +// created and decoded into. // -// 1. An encoder registered for the exact type. If the given type represents an interface, an encoder registered using -// RegisterTypeEncoder for the interface will be selected. +// By default, BSON documents will decode into interface{} values as bson.D. To change the default type for BSON +// documents, a type map entry for bsontype.EmbeddedDocument should be registered. For example, to force BSON documents +// to decode to bson.Raw, use the following code: // -// 2. An encoder registered using RegisterHookEncoder for an interface implemented by the type or by a pointer to the -// type. +// reg.RegisterTypeMapEntry(bsontype.EmbeddedDocument, reflect.TypeOf(bson.Raw{})) +func (r *Registry) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) { + r.typeMap.Store(bt, rt) +} + +// LookupEncoder returns the first matching encoder in the Registry. It uses the following lookup +// order: +// +// 1. An encoder registered for the exact type. If the given type is an interface, an encoder +// registered using RegisterTypeEncoder for that interface will be selected. // -// 3. An encoder registered for the reflect.Kind of the value. +// 2. An encoder registered using RegisterInterfaceEncoder for an interface implemented by the type +// or by a pointer to the type. // -// If no encoder is found, an error of type ErrNoEncoder is returned. -func (r *Registry) LookupEncoder(t reflect.Type) (ValueEncoder, error) { - encodererr := ErrNoEncoder{Type: t} - r.mu.RLock() - enc, found := r.lookupTypeEncoder(t) - r.mu.RUnlock() +// 3. An encoder registered using RegisterKindEncoder for the kind of value. +// +// If no encoder is found, an error of type ErrNoEncoder is returned. LookupEncoder is safe for +// concurrent use by multiple goroutines after all codecs and encoders are registered. +func (r *Registry) LookupEncoder(valueType reflect.Type) (ValueEncoder, error) { + if valueType == nil { + return nil, ErrNoEncoder{Type: valueType} + } + enc, found := r.lookupTypeEncoder(valueType) if found { if enc == nil { - return nil, ErrNoEncoder{Type: t} + return nil, ErrNoEncoder{Type: valueType} } return enc, nil } - enc, found = r.lookupInterfaceEncoder(t, true) + enc, found = r.lookupInterfaceEncoder(valueType, true) if found { - r.mu.Lock() - r.typeEncoders[t] = enc - r.mu.Unlock() - return enc, nil - } - - if t == nil { - r.mu.Lock() - r.typeEncoders[t] = nil - r.mu.Unlock() - return nil, encodererr + return r.typeEncoders.LoadOrStore(valueType, enc), nil } - enc, found = r.kindEncoders[t.Kind()] - if !found { - r.mu.Lock() - r.typeEncoders[t] = nil - r.mu.Unlock() - return nil, encodererr + if v, ok := r.kindEncoders.Load(valueType.Kind()); ok { + return r.storeTypeEncoder(valueType, v), nil } + return nil, ErrNoEncoder{Type: valueType} +} - r.mu.Lock() - r.typeEncoders[t] = enc - r.mu.Unlock() - return enc, nil +func (r *Registry) storeTypeEncoder(rt reflect.Type, enc ValueEncoder) ValueEncoder { + return r.typeEncoders.LoadOrStore(rt, enc) } -func (r *Registry) lookupTypeEncoder(t reflect.Type) (ValueEncoder, bool) { - enc, found := r.typeEncoders[t] - return enc, found +func (r *Registry) lookupTypeEncoder(rt reflect.Type) (ValueEncoder, bool) { + return r.typeEncoders.Load(rt) } -func (r *Registry) lookupInterfaceEncoder(t reflect.Type, allowAddr bool) (ValueEncoder, bool) { - if t == nil { +func (r *Registry) lookupInterfaceEncoder(valueType reflect.Type, allowAddr bool) (ValueEncoder, bool) { + if valueType == nil { return nil, false } for _, ienc := range r.interfaceEncoders { - if t.Implements(ienc.i) { + if valueType.Implements(ienc.i) { return ienc.ve, true } - if allowAddr && t.Kind() != reflect.Ptr && reflect.PtrTo(t).Implements(ienc.i) { - // if *t implements an interface, this will catch if t implements an interface further ahead - // in interfaceEncoders - defaultEnc, found := r.lookupInterfaceEncoder(t, false) + if allowAddr && valueType.Kind() != reflect.Ptr && reflect.PtrTo(valueType).Implements(ienc.i) { + // if *t implements an interface, this will catch if t implements an interface further + // ahead in interfaceEncoders + defaultEnc, found := r.lookupInterfaceEncoder(valueType, false) if !found { - defaultEnc = r.kindEncoders[t.Kind()] + defaultEnc, _ = r.kindEncoders.Load(valueType.Kind()) } return newCondAddrEncoder(ienc.ve, defaultEnc), true } @@ -377,70 +439,61 @@ func (r *Registry) lookupInterfaceEncoder(t reflect.Type, allowAddr bool) (Value return nil, false } -// LookupDecoder inspects the registry for an decoder for the given type. The lookup precedence works as follows: +// LookupDecoder returns the first matching decoder in the Registry. It uses the following lookup +// order: // -// 1. A decoder registered for the exact type. If the given type represents an interface, a decoder registered using -// RegisterTypeDecoder for the interface will be selected. +// 1. A decoder registered for the exact type. If the given type is an interface, a decoder +// registered using RegisterTypeDecoder for that interface will be selected. // -// 2. A decoder registered using RegisterHookDecoder for an interface implemented by the type or by a pointer to the -// type. +// 2. A decoder registered using RegisterInterfaceDecoder for an interface implemented by the type or by +// a pointer to the type. // -// 3. A decoder registered for the reflect.Kind of the value. +// 3. A decoder registered using RegisterKindDecoder for the kind of value. // -// If no decoder is found, an error of type ErrNoDecoder is returned. -func (r *Registry) LookupDecoder(t reflect.Type) (ValueDecoder, error) { - if t == nil { +// If no decoder is found, an error of type ErrNoDecoder is returned. LookupDecoder is safe for +// concurrent use by multiple goroutines after all codecs and decoders are registered. +func (r *Registry) LookupDecoder(valueType reflect.Type) (ValueDecoder, error) { + if valueType == nil { return nil, ErrNilType } - decodererr := ErrNoDecoder{Type: t} - r.mu.RLock() - dec, found := r.lookupTypeDecoder(t) - r.mu.RUnlock() + dec, found := r.lookupTypeDecoder(valueType) if found { if dec == nil { - return nil, ErrNoDecoder{Type: t} + return nil, ErrNoDecoder{Type: valueType} } return dec, nil } - dec, found = r.lookupInterfaceDecoder(t, true) + dec, found = r.lookupInterfaceDecoder(valueType, true) if found { - r.mu.Lock() - r.typeDecoders[t] = dec - r.mu.Unlock() - return dec, nil + return r.storeTypeDecoder(valueType, dec), nil } - dec, found = r.kindDecoders[t.Kind()] - if !found { - r.mu.Lock() - r.typeDecoders[t] = nil - r.mu.Unlock() - return nil, decodererr + if v, ok := r.kindDecoders.Load(valueType.Kind()); ok { + return r.storeTypeDecoder(valueType, v), nil } + return nil, ErrNoDecoder{Type: valueType} +} - r.mu.Lock() - r.typeDecoders[t] = dec - r.mu.Unlock() - return dec, nil +func (r *Registry) lookupTypeDecoder(valueType reflect.Type) (ValueDecoder, bool) { + return r.typeDecoders.Load(valueType) } -func (r *Registry) lookupTypeDecoder(t reflect.Type) (ValueDecoder, bool) { - dec, found := r.typeDecoders[t] - return dec, found +func (r *Registry) storeTypeDecoder(typ reflect.Type, dec ValueDecoder) ValueDecoder { + return r.typeDecoders.LoadOrStore(typ, dec) } -func (r *Registry) lookupInterfaceDecoder(t reflect.Type, allowAddr bool) (ValueDecoder, bool) { +func (r *Registry) lookupInterfaceDecoder(valueType reflect.Type, allowAddr bool) (ValueDecoder, bool) { for _, idec := range r.interfaceDecoders { - if t.Implements(idec.i) { + if valueType.Implements(idec.i) { return idec.vd, true } - if allowAddr && t.Kind() != reflect.Ptr && reflect.PtrTo(t).Implements(idec.i) { - // if *t implements an interface, this will catch if t implements an interface further ahead - // in interfaceDecoders - defaultDec, found := r.lookupInterfaceDecoder(t, false) + if allowAddr && valueType.Kind() != reflect.Ptr && reflect.PtrTo(valueType).Implements(idec.i) { + // if *t implements an interface, this will catch if t implements an interface further + // ahead in interfaceDecoders + defaultDec, found := r.lookupInterfaceDecoder(valueType, false) if !found { - defaultDec = r.kindDecoders[t.Kind()] + defaultDec, _ = r.kindDecoders.Load(valueType.Kind()) } return newCondAddrDecoder(idec.vd, defaultDec), true } @@ -450,12 +503,14 @@ func (r *Registry) lookupInterfaceDecoder(t reflect.Type, allowAddr bool) (Value // LookupTypeMapEntry inspects the registry's type map for a Go type for the corresponding BSON // type. If no type is found, ErrNoTypeMapEntry is returned. +// +// LookupTypeMapEntry should not be called concurrently with any other Registry method. func (r *Registry) LookupTypeMapEntry(bt bsontype.Type) (reflect.Type, error) { - t, ok := r.typeMap[bt] - if !ok || t == nil { + v, ok := r.typeMap.Load(bt) + if v == nil || !ok { return nil, ErrNoTypeMapEntry{Type: bt} } - return t, nil + return v.(reflect.Type), nil } type interfaceValueEncoder struct { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go index 3c1b6b860..14c9fd256 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go @@ -7,6 +7,7 @@ package bsoncodec import ( + "errors" "fmt" "reflect" @@ -19,13 +20,35 @@ import ( var defaultSliceCodec = NewSliceCodec() // SliceCodec is the Codec used for slice values. +// +// Deprecated: SliceCodec will not be directly configurable in Go Driver 2.0. To +// configure the slice encode and decode behavior, use the configuration methods +// on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the slice encode and +// decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to marshal nil Go slices as empty +// BSON arrays, use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// NilSliceAsEmpty: true, +// }) +// +// See the deprecation notice for each field in SliceCodec for the corresponding +// settings. type SliceCodec struct { + // EncodeNilAsEmpty causes EncodeValue to marshal nil Go slices as empty BSON arrays instead of + // BSON null. + // + // Deprecated: Use bson.Encoder.NilSliceAsEmpty instead. EncodeNilAsEmpty bool } -var _ ValueCodec = &MapCodec{} - // NewSliceCodec returns a MapCodec with options opts. +// +// Deprecated: NewSliceCodec will not be available in Go Driver 2.0. See +// [SliceCodec] for more details. func NewSliceCodec(opts ...*bsonoptions.SliceCodecOptions) *SliceCodec { sliceOpt := bsonoptions.MergeSliceCodecOptions(opts...) @@ -42,21 +65,19 @@ func (sc SliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val re return ValueEncoderError{Name: "SliceEncodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} } - if val.IsNil() && !sc.EncodeNilAsEmpty { + if val.IsNil() && !sc.EncodeNilAsEmpty && !ec.nilSliceAsEmpty { return vw.WriteNull() } // If we have a []byte we want to treat it as a binary instead of as an array. if val.Type().Elem() == tByte { - var byteSlice []byte - for idx := 0; idx < val.Len(); idx++ { - byteSlice = append(byteSlice, val.Index(idx).Interface().(byte)) - } + byteSlice := make([]byte, val.Len()) + reflect.Copy(reflect.ValueOf(byteSlice), val) return vw.WriteBinary(byteSlice) } // If we have a []primitive.E we want to treat it as a document instead of as an array. - if val.Type().ConvertibleTo(tD) { + if val.Type() == tD || val.Type().ConvertibleTo(tD) { d := val.Convert(tD).Interface().(primitive.D) dw, err := vw.WriteDocument() @@ -87,7 +108,7 @@ func (sc SliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val re for idx := 0; idx < val.Len(); idx++ { currEncoder, currVal, lookupErr := defaultValueEncoders.lookupElementEncoder(ec, encoder, val.Index(idx)) - if lookupErr != nil && lookupErr != errInvalidValue { + if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { return lookupErr } @@ -96,7 +117,7 @@ func (sc SliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val re return err } - if lookupErr == errInvalidValue { + if errors.Is(lookupErr, errInvalidValue) { err = vw.WriteNull() if err != nil { return err @@ -145,11 +166,8 @@ func (sc *SliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val r if val.IsNil() { val.Set(reflect.MakeSlice(val.Type(), 0, len(data))) } - val.SetLen(0) - for _, elem := range data { - val.Set(reflect.Append(val, reflect.ValueOf(elem))) - } + val.Set(reflect.AppendSlice(val, reflect.ValueOf(data))) return nil case bsontype.String: if sliceType := val.Type().Elem(); sliceType != tByte { @@ -164,11 +182,8 @@ func (sc *SliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val r if val.IsNil() { val.Set(reflect.MakeSlice(val.Type(), 0, len(byteStr))) } - val.SetLen(0) - for _, elem := range byteStr { - val.Set(reflect.Append(val, reflect.ValueOf(elem))) - } + val.Set(reflect.AppendSlice(val, reflect.ValueOf(byteStr))) return nil default: return fmt.Errorf("cannot decode %v into a slice", vrType) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go index 5332b7c3b..a8f885a85 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go @@ -15,26 +15,46 @@ import ( "go.mongodb.org/mongo-driver/bson/bsontype" ) -// StringCodec is the Codec used for struct values. +// StringCodec is the Codec used for string values. +// +// Deprecated: StringCodec will not be directly accessible in Go Driver 2.0. To +// override the default string encode and decode behavior, create a new registry +// with [go.mongodb.org/mongo-driver/bson.NewRegistry] and register a new +// encoder and decoder for strings. +// +// For example, +// +// reg := bson.NewRegistry() +// reg.RegisterKindEncoder(reflect.String, myStringEncoder) +// reg.RegisterKindDecoder(reflect.String, myStringDecoder) type StringCodec struct { + // DecodeObjectIDAsHex specifies if object IDs should be decoded as their hex representation. + // If false, a string made from the raw object ID bytes will be used. Defaults to true. + // + // Deprecated: Decoding object IDs as raw bytes will not be supported in Go Driver 2.0. DecodeObjectIDAsHex bool } var ( defaultStringCodec = NewStringCodec() - _ ValueCodec = defaultStringCodec + // Assert that defaultStringCodec satisfies the typeDecoder interface, which allows it to be + // used by collection type decoders (e.g. map, slice, etc) to set individual values in a + // collection. _ typeDecoder = defaultStringCodec ) // NewStringCodec returns a StringCodec with options opts. +// +// Deprecated: NewStringCodec will not be available in Go Driver 2.0. See +// [StringCodec] for more details. func NewStringCodec(opts ...*bsonoptions.StringCodecOptions) *StringCodec { stringOpt := bsonoptions.MergeStringCodecOptions(opts...) return &StringCodec{*stringOpt.DecodeObjectIDAsHex} } // EncodeValue is the ValueEncoder for string types. -func (sc *StringCodec) EncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +func (sc *StringCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if val.Kind() != reflect.String { return ValueEncoderError{ Name: "StringEncodeValue", @@ -46,7 +66,7 @@ func (sc *StringCodec) EncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, va return vw.WriteString(val.String()) } -func (sc *StringCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { +func (sc *StringCodec) decodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { if t.Kind() != reflect.String { return emptyValue, ValueDecoderError{ Name: "StringDecodeValue", @@ -71,6 +91,7 @@ func (sc *StringCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t ref if sc.DecodeObjectIDAsHex { str = oid.Hex() } else { + // TODO(GODRIVER-2796): Return an error here instead of decoding to a garbled string. byteArray := [12]byte(oid) str = string(byteArray[:]) } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go index be3f2081e..f8d9690c1 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go @@ -59,14 +59,58 @@ type Zeroer interface { } // StructCodec is the Codec used for struct values. +// +// Deprecated: StructCodec will not be directly configurable in Go Driver 2.0. +// To configure the struct encode and decode behavior, use the configuration +// methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the struct encode +// and decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to omit zero-value structs when +// using the "omitempty" struct tag, use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// OmitZeroStruct: true, +// }) +// +// See the deprecation notice for each field in StructCodec for the corresponding +// settings. type StructCodec struct { - cache map[reflect.Type]*structDescription - l sync.RWMutex - parser StructTagParser - DecodeZeroStruct bool - DecodeDeepZeroInline bool - EncodeOmitDefaultStruct bool - AllowUnexportedFields bool + cache sync.Map // map[reflect.Type]*structDescription + parser StructTagParser + + // DecodeZeroStruct causes DecodeValue to delete any existing values from Go structs in the + // destination value passed to Decode before unmarshaling BSON documents into them. + // + // Deprecated: Use bson.Decoder.ZeroStructs or options.BSONOptions.ZeroStructs instead. + DecodeZeroStruct bool + + // DecodeDeepZeroInline causes DecodeValue to delete any existing values from Go structs in the + // destination value passed to Decode before unmarshaling BSON documents into them. + // + // Deprecated: DecodeDeepZeroInline will not be supported in Go Driver 2.0. + DecodeDeepZeroInline bool + + // EncodeOmitDefaultStruct causes the Encoder to consider the zero value for a struct (e.g. + // MyStruct{}) as empty and omit it from the marshaled BSON when the "omitempty" struct tag + // option is set. + // + // Deprecated: Use bson.Encoder.OmitZeroStruct or options.BSONOptions.OmitZeroStruct instead. + EncodeOmitDefaultStruct bool + + // AllowUnexportedFields allows encoding and decoding values from un-exported struct fields. + // + // Deprecated: AllowUnexportedFields does not work on recent versions of Go and will not be + // supported in Go Driver 2.0. + AllowUnexportedFields bool + + // OverwriteDuplicatedInlinedFields, if false, causes EncodeValue to return an error if there is + // a duplicate field in the marshaled BSON when the "inline" struct tag option is set. The + // default value is true. + // + // Deprecated: Use bson.Encoder.ErrorOnInlineDuplicates or + // options.BSONOptions.ErrorOnInlineDuplicates instead. OverwriteDuplicatedInlinedFields bool } @@ -74,6 +118,9 @@ var _ ValueEncoder = &StructCodec{} var _ ValueDecoder = &StructCodec{} // NewStructCodec returns a StructCodec that uses p for struct tag parsing. +// +// Deprecated: NewStructCodec will not be available in Go Driver 2.0. See +// [StructCodec] for more details. func NewStructCodec(p StructTagParser, opts ...*bsonoptions.StructCodecOptions) (*StructCodec, error) { if p == nil { return nil, errors.New("a StructTagParser must be provided to NewStructCodec") @@ -82,7 +129,6 @@ func NewStructCodec(p StructTagParser, opts ...*bsonoptions.StructCodecOptions) structOpt := bsonoptions.MergeStructCodecOptions(opts...) codec := &StructCodec{ - cache: make(map[reflect.Type]*structDescription), parser: p, } @@ -106,12 +152,12 @@ func NewStructCodec(p StructTagParser, opts ...*bsonoptions.StructCodecOptions) } // EncodeValue handles encoding generic struct types. -func (sc *StructCodec) EncodeValue(r EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +func (sc *StructCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Kind() != reflect.Struct { return ValueEncoderError{Name: "StructCodec.EncodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val} } - sd, err := sc.describeStruct(r.Registry, val.Type()) + sd, err := sc.describeStruct(ec.Registry, val.Type(), ec.useJSONStructTags, ec.errorOnInlineDuplicates) if err != nil { return err } @@ -131,13 +177,13 @@ func (sc *StructCodec) EncodeValue(r EncodeContext, vw bsonrw.ValueWriter, val r } } - desc.encoder, rv, err = defaultValueEncoders.lookupElementEncoder(r, desc.encoder, rv) + desc.encoder, rv, err = defaultValueEncoders.lookupElementEncoder(ec, desc.encoder, rv) - if err != nil && err != errInvalidValue { + if err != nil && !errors.Is(err, errInvalidValue) { return err } - if err == errInvalidValue { + if errors.Is(err, errInvalidValue) { if desc.omitEmpty { continue } @@ -158,17 +204,17 @@ func (sc *StructCodec) EncodeValue(r EncodeContext, vw bsonrw.ValueWriter, val r encoder := desc.encoder - var isZero bool - rvInterface := rv.Interface() + var empty bool if cz, ok := encoder.(CodecZeroer); ok { - isZero = cz.IsTypeZero(rvInterface) + empty = cz.IsTypeZero(rv.Interface()) } else if rv.Kind() == reflect.Interface { - // sc.isZero will not treat an interface rv as an interface, so we need to check for the zero interface separately. - isZero = rv.IsNil() + // isEmpty will not treat an interface rv as an interface, so we need to check for the + // nil interface separately. + empty = rv.IsNil() } else { - isZero = sc.isZero(rvInterface) + empty = isEmpty(rv, sc.EncodeOmitDefaultStruct || ec.omitZeroStruct) } - if desc.omitEmpty && isZero { + if desc.omitEmpty && empty { continue } @@ -177,7 +223,17 @@ func (sc *StructCodec) EncodeValue(r EncodeContext, vw bsonrw.ValueWriter, val r return err } - ectx := EncodeContext{Registry: r.Registry, MinSize: desc.minSize} + ectx := EncodeContext{ + Registry: ec.Registry, + MinSize: desc.minSize || ec.MinSize, + errorOnInlineDuplicates: ec.errorOnInlineDuplicates, + stringifyMapKeysWithFmt: ec.stringifyMapKeysWithFmt, + nilMapAsEmpty: ec.nilMapAsEmpty, + nilSliceAsEmpty: ec.nilSliceAsEmpty, + nilByteSliceAsEmpty: ec.nilByteSliceAsEmpty, + omitZeroStruct: ec.omitZeroStruct, + useJSONStructTags: ec.useJSONStructTags, + } err = encoder.EncodeValue(ectx, vw2, rv) if err != nil { return err @@ -191,15 +247,15 @@ func (sc *StructCodec) EncodeValue(r EncodeContext, vw bsonrw.ValueWriter, val r return exists } - return defaultMapCodec.mapEncodeValue(r, dw, rv, collisionFn) + return defaultMapCodec.mapEncodeValue(ec, dw, rv, collisionFn) } return dw.WriteDocumentEnd() } func newDecodeError(key string, original error) error { - de, ok := original.(*DecodeError) - if !ok { + var de *DecodeError + if !errors.As(original, &de) { return &DecodeError{ keys: []string{key}, wrapped: original, @@ -213,7 +269,7 @@ func newDecodeError(key string, original error) error { // DecodeValue implements the Codec interface. // By default, map types in val will not be cleared. If a map has existing key/value pairs, it will be extended with the new ones from vr. // For slices, the decoder will set the length of the slice to zero and append all elements. The underlying array will not be cleared. -func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { +func (sc *StructCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Kind() != reflect.Struct { return ValueDecoderError{Name: "StructCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val} } @@ -238,12 +294,12 @@ func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val r return fmt.Errorf("cannot decode %v into a %s", vrType, val.Type()) } - sd, err := sc.describeStruct(r.Registry, val.Type()) + sd, err := sc.describeStruct(dc.Registry, val.Type(), dc.useJSONStructTags, false) if err != nil { return err } - if sc.DecodeZeroStruct { + if sc.DecodeZeroStruct || dc.zeroStructs { val.Set(reflect.Zero(val.Type())) } if sc.DecodeDeepZeroInline && sd.inline { @@ -254,7 +310,7 @@ func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val r var inlineMap reflect.Value if sd.inlineMap >= 0 { inlineMap = val.Field(sd.inlineMap) - decoder, err = r.LookupDecoder(inlineMap.Type().Elem()) + decoder, err = dc.LookupDecoder(inlineMap.Type().Elem()) if err != nil { return err } @@ -267,7 +323,7 @@ func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val r for { name, vr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { + if errors.Is(err, bsonrw.ErrEOD) { break } if err != nil { @@ -298,8 +354,8 @@ func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val r } elem := reflect.New(inlineMap.Type().Elem()).Elem() - r.Ancestor = inlineMap.Type() - err = decoder.DecodeValue(r, vr, elem) + dc.Ancestor = inlineMap.Type() + err = decoder.DecodeValue(dc, vr, elem) if err != nil { return err } @@ -326,7 +382,17 @@ func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val r } field = field.Addr() - dctx := DecodeContext{Registry: r.Registry, Truncate: fd.truncate || r.Truncate} + dctx := DecodeContext{ + Registry: dc.Registry, + Truncate: fd.truncate || dc.Truncate, + defaultDocumentType: dc.defaultDocumentType, + binaryAsSlice: dc.binaryAsSlice, + useJSONStructTags: dc.useJSONStructTags, + useLocalTimeZone: dc.useLocalTimeZone, + zeroMaps: dc.zeroMaps, + zeroStructs: dc.zeroStructs, + } + if fd.decoder == nil { return newDecodeError(fd.name, ErrNoDecoder{Type: field.Elem().Type()}) } @@ -340,51 +406,35 @@ func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val r return nil } -func (sc *StructCodec) isZero(i interface{}) bool { - v := reflect.ValueOf(i) - - // check the value validity - if !v.IsValid() { - return true - } - - if z, ok := v.Interface().(Zeroer); ok && (v.Kind() != reflect.Ptr || !v.IsNil()) { - return z.IsZero() +func isEmpty(v reflect.Value, omitZeroStruct bool) bool { + kind := v.Kind() + if (kind != reflect.Ptr || !v.IsNil()) && v.Type().Implements(tZeroer) { + return v.Interface().(Zeroer).IsZero() } - - switch v.Kind() { + switch kind { case reflect.Array, reflect.Map, reflect.Slice, reflect.String: return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() case reflect.Struct: - if sc.EncodeOmitDefaultStruct { - vt := v.Type() - if vt == tTime { - return v.Interface().(time.Time).IsZero() + if !omitZeroStruct { + return false + } + vt := v.Type() + if vt == tTime { + return v.Interface().(time.Time).IsZero() + } + numField := vt.NumField() + for i := 0; i < numField; i++ { + ff := vt.Field(i) + if ff.PkgPath != "" && !ff.Anonymous { + continue // Private field } - for i := 0; i < v.NumField(); i++ { - if vt.Field(i).PkgPath != "" && !vt.Field(i).Anonymous { - continue // Private field - } - fld := v.Field(i) - if !sc.isZero(fld.Interface()) { - return false - } + if !isEmpty(v.Field(i), omitZeroStruct) { + return false } - return true } + return true } - - return false + return !v.IsValid() || v.IsZero() } type structDescription struct { @@ -435,16 +485,35 @@ func (bi byIndex) Less(i, j int) bool { return len(bi[i].inline) < len(bi[j].inline) } -func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescription, error) { +func (sc *StructCodec) describeStruct( + r *Registry, + t reflect.Type, + useJSONStructTags bool, + errorOnDuplicates bool, +) (*structDescription, error) { // We need to analyze the struct, including getting the tags, collecting // information about inlining, and create a map of the field name to the field. - sc.l.RLock() - ds, exists := sc.cache[t] - sc.l.RUnlock() - if exists { - return ds, nil + if v, ok := sc.cache.Load(t); ok { + return v.(*structDescription), nil } + // TODO(charlie): Only describe the struct once when called + // concurrently with the same type. + ds, err := sc.describeStructSlow(r, t, useJSONStructTags, errorOnDuplicates) + if err != nil { + return nil, err + } + if v, loaded := sc.cache.LoadOrStore(t, ds); loaded { + ds = v.(*structDescription) + } + return ds, nil +} +func (sc *StructCodec) describeStructSlow( + r *Registry, + t reflect.Type, + useJSONStructTags bool, + errorOnDuplicates bool, +) (*structDescription, error) { numFields := t.NumField() sd := &structDescription{ fm: make(map[string]fieldDescription, numFields), @@ -477,7 +546,14 @@ func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescr decoder: decoder, } - stags, err := sc.parser.ParseStructTags(sf) + var stags StructTags + // If the caller requested that we use JSON struct tags, use the JSONFallbackStructTagParser + // instead of the parser defined on the codec. + if useJSONStructTags { + stags, err = JSONFallbackStructTagParser.ParseStructTags(sf) + } else { + stags, err = sc.parser.ParseStructTags(sf) + } if err != nil { return nil, err } @@ -507,7 +583,7 @@ func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescr } fallthrough case reflect.Struct: - inlinesf, err := sc.describeStruct(r, sfType) + inlinesf, err := sc.describeStruct(r, sfType, useJSONStructTags, errorOnDuplicates) if err != nil { return nil, err } @@ -559,7 +635,7 @@ func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescr continue } dominant, ok := dominantField(fields[i : i+advance]) - if !ok || !sc.OverwriteDuplicatedInlinedFields { + if !ok || !sc.OverwriteDuplicatedInlinedFields || errorOnDuplicates { return nil, fmt.Errorf("struct %s has duplicated key %s", t.String(), name) } sd.fl = append(sd.fl, dominant) @@ -568,10 +644,6 @@ func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescr sort.Sort(byIndex(sd.fl)) - sc.l.Lock() - sc.cache[t] = sd - sc.l.Unlock() - return sd, nil } @@ -629,21 +701,21 @@ func getInlineField(val reflect.Value, index []int) (reflect.Value, error) { // DeepZero returns recursive zero object func deepZero(st reflect.Type) (result reflect.Value) { - result = reflect.Indirect(reflect.New(st)) - - if result.Kind() == reflect.Struct { - for i := 0; i < result.NumField(); i++ { - if f := result.Field(i); f.Kind() == reflect.Ptr { - if f.CanInterface() { - if ft := reflect.TypeOf(f.Interface()); ft.Elem().Kind() == reflect.Struct { - result.Field(i).Set(recursivePointerTo(deepZero(ft.Elem()))) - } + if st.Kind() == reflect.Struct { + numField := st.NumField() + for i := 0; i < numField; i++ { + if result == emptyValue { + result = reflect.Indirect(reflect.New(st)) + } + f := result.Field(i) + if f.CanInterface() { + if f.Type().Kind() == reflect.Struct { + result.Field(i).Set(recursivePointerTo(deepZero(f.Type().Elem()))) } } } } - - return + return result } // recursivePointerTo calls reflect.New(v.Type) but recursively for its fields inside diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go index 62708c5c7..18d85bfb0 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go @@ -12,12 +12,16 @@ import ( ) // StructTagParser returns the struct tags for a given struct field. +// +// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0. type StructTagParser interface { ParseStructTags(reflect.StructField) (StructTags, error) } // StructTagParserFunc is an adapter that allows a generic function to be used // as a StructTagParser. +// +// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0. type StructTagParserFunc func(reflect.StructField) (StructTags, error) // ParseStructTags implements the StructTagParser interface. @@ -50,7 +54,7 @@ func (stpf StructTagParserFunc) ParseStructTags(sf reflect.StructField) (StructT // Skip This struct field should be skipped. This is usually denoted by parsing a "-" // for the name. // -// TODO(skriptble): Add tags for undefined as nil and for null as nil. +// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0. type StructTags struct { Name string OmitEmpty bool @@ -85,6 +89,8 @@ type StructTags struct { // A struct tag either consisting entirely of '-' or with a bson key with a // value consisting entirely of '-' will return a StructTags with Skip true and // the remaining fields will be their default values. +// +// Deprecated: DefaultStructTagParser will be removed in Go Driver 2.0. var DefaultStructTagParser StructTagParserFunc = func(sf reflect.StructField) (StructTags, error) { key := strings.ToLower(sf.Name) tag, ok := sf.Tag.Lookup("bson") @@ -125,6 +131,9 @@ func parseTags(key string, tag string) (StructTags, error) { // JSONFallbackStructTagParser has the same behavior as DefaultStructTagParser // but will also fallback to parsing the json tag instead on a field where the // bson tag isn't available. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.UseJSONStructTags] and +// [go.mongodb.org/mongo-driver/bson.Decoder.UseJSONStructTags] instead. var JSONFallbackStructTagParser StructTagParserFunc = func(sf reflect.StructField) (StructTags, error) { key := strings.ToLower(sf.Name) tag, ok := sf.Tag.Lookup("bson") diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go index ec7e30f72..22fb762c4 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go @@ -22,18 +22,42 @@ const ( ) // TimeCodec is the Codec used for time.Time values. +// +// Deprecated: TimeCodec will not be directly configurable in Go Driver 2.0. +// To configure the time.Time encode and decode behavior, use the configuration +// methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the time.Time encode +// and decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to ..., use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// UseLocalTimeZone: true, +// }) +// +// See the deprecation notice for each field in TimeCodec for the corresponding +// settings. type TimeCodec struct { + // UseLocalTimeZone specifies if we should decode into the local time zone. Defaults to false. + // + // Deprecated: Use bson.Decoder.UseLocalTimeZone or options.BSONOptions.UseLocalTimeZone + // instead. UseLocalTimeZone bool } var ( defaultTimeCodec = NewTimeCodec() - _ ValueCodec = defaultTimeCodec + // Assert that defaultTimeCodec satisfies the typeDecoder interface, which allows it to be used + // by collection type decoders (e.g. map, slice, etc) to set individual values in a collection. _ typeDecoder = defaultTimeCodec ) // NewTimeCodec returns a TimeCodec with options opts. +// +// Deprecated: NewTimeCodec will not be available in Go Driver 2.0. See +// [TimeCodec] for more details. func NewTimeCodec(opts ...*bsonoptions.TimeCodecOptions) *TimeCodec { timeOpt := bsonoptions.MergeTimeCodecOptions(opts...) @@ -95,7 +119,7 @@ func (tc *TimeCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t refle return emptyValue, fmt.Errorf("cannot decode %v into a time.Time", vrType) } - if !tc.UseLocalTimeZone { + if !tc.UseLocalTimeZone && !dc.useLocalTimeZone { timeVal = timeVal.UTC() } return reflect.ValueOf(timeVal), nil @@ -117,7 +141,7 @@ func (tc *TimeCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val re } // EncodeValue is the ValueEncoderFunc for time.TIme. -func (tc *TimeCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +func (tc *TimeCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tTime { return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val} } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go index 07f4b70e6..6ade17b7d 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go @@ -34,6 +34,7 @@ var tValueUnmarshaler = reflect.TypeOf((*ValueUnmarshaler)(nil)).Elem() var tMarshaler = reflect.TypeOf((*Marshaler)(nil)).Elem() var tUnmarshaler = reflect.TypeOf((*Unmarshaler)(nil)).Elem() var tProxy = reflect.TypeOf((*Proxy)(nil)).Elem() +var tZeroer = reflect.TypeOf((*Zeroer)(nil)).Elem() var tBinary = reflect.TypeOf(primitive.Binary{}) var tUndefined = reflect.TypeOf(primitive.Undefined{}) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go index 0b21ce999..852547276 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go @@ -17,18 +17,43 @@ import ( ) // UIntCodec is the Codec used for uint values. +// +// Deprecated: UIntCodec will not be directly configurable in Go Driver 2.0. To +// configure the uint encode and decode behavior, use the configuration methods +// on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the uint encode and +// decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to marshal Go uint values as the +// minimum BSON int size that can represent the value, use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// IntMinSize: true, +// }) +// +// See the deprecation notice for each field in UIntCodec for the corresponding +// settings. type UIntCodec struct { + // EncodeToMinSize causes EncodeValue to marshal Go uint values (excluding uint64) as the + // minimum BSON int size (either 32-bit or 64-bit) that can represent the integer value. + // + // Deprecated: Use bson.Encoder.IntMinSize or options.BSONOptions.IntMinSize instead. EncodeToMinSize bool } var ( defaultUIntCodec = NewUIntCodec() - _ ValueCodec = defaultUIntCodec + // Assert that defaultUIntCodec satisfies the typeDecoder interface, which allows it to be used + // by collection type decoders (e.g. map, slice, etc) to set individual values in a collection. _ typeDecoder = defaultUIntCodec ) // NewUIntCodec returns a UIntCodec with options opts. +// +// Deprecated: NewUIntCodec will not be available in Go Driver 2.0. See +// [UIntCodec] for more details. func NewUIntCodec(opts ...*bsonoptions.UIntCodecOptions) *UIntCodec { uintOpt := bsonoptions.MergeUIntCodecOptions(opts...) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go index b1256a4dc..996bd1712 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go @@ -7,22 +7,33 @@ package bsonoptions // ByteSliceCodecOptions represents all possible options for byte slice encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. type ByteSliceCodecOptions struct { EncodeNilAsEmpty *bool // Specifies if a nil byte slice should encode as an empty binary instead of null. Defaults to false. } // ByteSliceCodec creates a new *ByteSliceCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. func ByteSliceCodec() *ByteSliceCodecOptions { return &ByteSliceCodecOptions{} } // SetEncodeNilAsEmpty specifies if a nil byte slice should encode as an empty binary instead of null. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilByteSliceAsEmpty] instead. func (bs *ByteSliceCodecOptions) SetEncodeNilAsEmpty(b bool) *ByteSliceCodecOptions { bs.EncodeNilAsEmpty = &b return bs } // MergeByteSliceCodecOptions combines the given *ByteSliceCodecOptions into a single *ByteSliceCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeByteSliceCodecOptions(opts ...*ByteSliceCodecOptions) *ByteSliceCodecOptions { bs := ByteSliceCodec() for _, opt := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go index 6caaa000e..f522c7e03 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go @@ -7,22 +7,33 @@ package bsonoptions // EmptyInterfaceCodecOptions represents all possible options for interface{} encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. type EmptyInterfaceCodecOptions struct { DecodeBinaryAsSlice *bool // Specifies if Old and Generic type binarys should default to []slice instead of primitive.Binary. Defaults to false. } // EmptyInterfaceCodec creates a new *EmptyInterfaceCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. func EmptyInterfaceCodec() *EmptyInterfaceCodecOptions { return &EmptyInterfaceCodecOptions{} } // SetDecodeBinaryAsSlice specifies if Old and Generic type binarys should default to []slice instead of primitive.Binary. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.BinaryAsSlice] instead. func (e *EmptyInterfaceCodecOptions) SetDecodeBinaryAsSlice(b bool) *EmptyInterfaceCodecOptions { e.DecodeBinaryAsSlice = &b return e } // MergeEmptyInterfaceCodecOptions combines the given *EmptyInterfaceCodecOptions into a single *EmptyInterfaceCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeEmptyInterfaceCodecOptions(opts ...*EmptyInterfaceCodecOptions) *EmptyInterfaceCodecOptions { e := EmptyInterfaceCodec() for _, opt := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go index 7a6a880b8..a7a7c1d98 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go @@ -7,6 +7,9 @@ package bsonoptions // MapCodecOptions represents all possible options for map encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. type MapCodecOptions struct { DecodeZerosMap *bool // Specifies if the map should be zeroed before decoding into it. Defaults to false. EncodeNilAsEmpty *bool // Specifies if a nil map should encode as an empty document instead of null. Defaults to false. @@ -19,17 +22,24 @@ type MapCodecOptions struct { } // MapCodec creates a new *MapCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. func MapCodec() *MapCodecOptions { return &MapCodecOptions{} } // SetDecodeZerosMap specifies if the map should be zeroed before decoding into it. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroMaps] instead. func (t *MapCodecOptions) SetDecodeZerosMap(b bool) *MapCodecOptions { t.DecodeZerosMap = &b return t } // SetEncodeNilAsEmpty specifies if a nil map should encode as an empty document instead of null. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilMapAsEmpty] instead. func (t *MapCodecOptions) SetEncodeNilAsEmpty(b bool) *MapCodecOptions { t.EncodeNilAsEmpty = &b return t @@ -40,12 +50,17 @@ func (t *MapCodecOptions) SetEncodeNilAsEmpty(b bool) *MapCodecOptions { // type must either be a string, an integer type, or implement bsoncodec.KeyUnmarshaler. If true, keys are encoded with // fmt.Sprint() and the encoding key type must be a string, an integer type, or a float. If true, the use of Stringer // will override TextMarshaler/TextUnmarshaler. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.StringifyMapKeysWithFmt] instead. func (t *MapCodecOptions) SetEncodeKeysWithStringer(b bool) *MapCodecOptions { t.EncodeKeysWithStringer = &b return t } // MergeMapCodecOptions combines the given *MapCodecOptions into a single *MapCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeMapCodecOptions(opts ...*MapCodecOptions) *MapCodecOptions { s := MapCodec() for _, opt := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go index ef965e4b4..3c1e4f35b 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go @@ -7,22 +7,33 @@ package bsonoptions // SliceCodecOptions represents all possible options for slice encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. type SliceCodecOptions struct { EncodeNilAsEmpty *bool // Specifies if a nil slice should encode as an empty array instead of null. Defaults to false. } // SliceCodec creates a new *SliceCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. func SliceCodec() *SliceCodecOptions { return &SliceCodecOptions{} } // SetEncodeNilAsEmpty specifies if a nil slice should encode as an empty array instead of null. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilSliceAsEmpty] instead. func (s *SliceCodecOptions) SetEncodeNilAsEmpty(b bool) *SliceCodecOptions { s.EncodeNilAsEmpty = &b return s } // MergeSliceCodecOptions combines the given *SliceCodecOptions into a single *SliceCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeSliceCodecOptions(opts ...*SliceCodecOptions) *SliceCodecOptions { s := SliceCodec() for _, opt := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go index 65964f420..f8b76f996 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go @@ -9,23 +9,34 @@ package bsonoptions var defaultDecodeOIDAsHex = true // StringCodecOptions represents all possible options for string encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. type StringCodecOptions struct { DecodeObjectIDAsHex *bool // Specifies if we should decode ObjectID as the hex value. Defaults to true. } // StringCodec creates a new *StringCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. func StringCodec() *StringCodecOptions { return &StringCodecOptions{} } // SetDecodeObjectIDAsHex specifies if object IDs should be decoded as their hex representation. If false, a string made // from the raw object ID bytes will be used. Defaults to true. +// +// Deprecated: Decoding object IDs as raw bytes will not be supported in Go Driver 2.0. func (t *StringCodecOptions) SetDecodeObjectIDAsHex(b bool) *StringCodecOptions { t.DecodeObjectIDAsHex = &b return t } // MergeStringCodecOptions combines the given *StringCodecOptions into a single *StringCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeStringCodecOptions(opts ...*StringCodecOptions) *StringCodecOptions { s := &StringCodecOptions{&defaultDecodeOIDAsHex} for _, opt := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go index 78d1dd866..1cbfa32e8 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go @@ -9,6 +9,9 @@ package bsonoptions var defaultOverwriteDuplicatedInlinedFields = true // StructCodecOptions represents all possible options for struct encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. type StructCodecOptions struct { DecodeZeroStruct *bool // Specifies if structs should be zeroed before decoding into them. Defaults to false. DecodeDeepZeroInline *bool // Specifies if structs should be recursively zeroed when a inline value is decoded. Defaults to false. @@ -18,17 +21,24 @@ type StructCodecOptions struct { } // StructCodec creates a new *StructCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. func StructCodec() *StructCodecOptions { return &StructCodecOptions{} } // SetDecodeZeroStruct specifies if structs should be zeroed before decoding into them. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroStructs] instead. func (t *StructCodecOptions) SetDecodeZeroStruct(b bool) *StructCodecOptions { t.DecodeZeroStruct = &b return t } // SetDecodeDeepZeroInline specifies if structs should be zeroed before decoding into them. Defaults to false. +// +// Deprecated: DecodeDeepZeroInline will not be supported in Go Driver 2.0. func (t *StructCodecOptions) SetDecodeDeepZeroInline(b bool) *StructCodecOptions { t.DecodeDeepZeroInline = &b return t @@ -36,6 +46,8 @@ func (t *StructCodecOptions) SetDecodeDeepZeroInline(b bool) *StructCodecOptions // SetEncodeOmitDefaultStruct specifies if default structs should be considered empty by omitempty. A default struct has all // its values set to their default value. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.OmitZeroStruct] instead. func (t *StructCodecOptions) SetEncodeOmitDefaultStruct(b bool) *StructCodecOptions { t.EncodeOmitDefaultStruct = &b return t @@ -45,18 +57,26 @@ func (t *StructCodecOptions) SetEncodeOmitDefaultStruct(b bool) *StructCodecOpti // same bson key. When true and decoding, values will be written to the outermost struct with a matching key, and when // encoding, keys will have the value of the top-most matching field. When false, decoding and encoding will error if // there are duplicate keys after the struct is inlined. Defaults to true. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.ErrorOnInlineDuplicates] instead. func (t *StructCodecOptions) SetOverwriteDuplicatedInlinedFields(b bool) *StructCodecOptions { t.OverwriteDuplicatedInlinedFields = &b return t } // SetAllowUnexportedFields specifies if unexported fields should be marshaled/unmarshaled. Defaults to false. +// +// Deprecated: AllowUnexportedFields does not work on recent versions of Go and will not be +// supported in Go Driver 2.0. func (t *StructCodecOptions) SetAllowUnexportedFields(b bool) *StructCodecOptions { t.AllowUnexportedFields = &b return t } // MergeStructCodecOptions combines the given *StructCodecOptions into a single *StructCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeStructCodecOptions(opts ...*StructCodecOptions) *StructCodecOptions { s := &StructCodecOptions{ OverwriteDuplicatedInlinedFields: &defaultOverwriteDuplicatedInlinedFields, diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go index 13496d121..3f38433d2 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go @@ -7,22 +7,33 @@ package bsonoptions // TimeCodecOptions represents all possible options for time.Time encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. type TimeCodecOptions struct { UseLocalTimeZone *bool // Specifies if we should decode into the local time zone. Defaults to false. } // TimeCodec creates a new *TimeCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. func TimeCodec() *TimeCodecOptions { return &TimeCodecOptions{} } // SetUseLocalTimeZone specifies if we should decode into the local time zone. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseLocalTimeZone] instead. func (t *TimeCodecOptions) SetUseLocalTimeZone(b bool) *TimeCodecOptions { t.UseLocalTimeZone = &b return t } // MergeTimeCodecOptions combines the given *TimeCodecOptions into a single *TimeCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeTimeCodecOptions(opts ...*TimeCodecOptions) *TimeCodecOptions { t := TimeCodec() for _, opt := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go index e08b7f192..5091e4d96 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go @@ -7,22 +7,33 @@ package bsonoptions // UIntCodecOptions represents all possible options for uint encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. type UIntCodecOptions struct { EncodeToMinSize *bool // Specifies if all uints except uint64 should be decoded to minimum size bsontype. Defaults to false. } // UIntCodec creates a new *UIntCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. func UIntCodec() *UIntCodecOptions { return &UIntCodecOptions{} } // SetEncodeToMinSize specifies if all uints except uint64 should be decoded to minimum size bsontype. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.IntMinSize] instead. func (u *UIntCodecOptions) SetEncodeToMinSize(b bool) *UIntCodecOptions { u.EncodeToMinSize = &b return u } // MergeUIntCodecOptions combines the given *UIntCodecOptions into a single *UIntCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. func MergeUIntCodecOptions(opts ...*UIntCodecOptions) *UIntCodecOptions { u := UIntCodec() for _, opt := range opts { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go index 5cdf6460b..1e25570b8 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go @@ -7,6 +7,7 @@ package bsonrw import ( + "errors" "fmt" "io" @@ -17,20 +18,32 @@ import ( // Copier is a type that allows copying between ValueReaders, ValueWriters, and // []byte values. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. type Copier struct{} // NewCopier creates a new copier with the given registry. If a nil registry is provided // a default registry is used. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. func NewCopier() Copier { return Copier{} } // CopyDocument handles copying a document from src to dst. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. func CopyDocument(dst ValueWriter, src ValueReader) error { return Copier{}.CopyDocument(dst, src) } // CopyDocument handles copying one document from the src to the dst. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. func (c Copier) CopyDocument(dst ValueWriter, src ValueReader) error { dr, err := src.ReadDocument() if err != nil { @@ -47,6 +60,9 @@ func (c Copier) CopyDocument(dst ValueWriter, src ValueReader) error { // CopyArrayFromBytes copies the values from a BSON array represented as a // []byte to a ValueWriter. +// +// Deprecated: Copying BSON arrays using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. func (c Copier) CopyArrayFromBytes(dst ValueWriter, src []byte) error { aw, err := dst.WriteArray() if err != nil { @@ -63,6 +79,9 @@ func (c Copier) CopyArrayFromBytes(dst ValueWriter, src []byte) error { // CopyDocumentFromBytes copies the values from a BSON document represented as a // []byte to a ValueWriter. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. func (c Copier) CopyDocumentFromBytes(dst ValueWriter, src []byte) error { dw, err := dst.WriteDocument() if err != nil { @@ -81,6 +100,9 @@ type writeElementFn func(key string) (ValueWriter, error) // CopyBytesToArrayWriter copies the values from a BSON Array represented as a []byte to an // ArrayWriter. +// +// Deprecated: Copying BSON arrays using the ArrayWriter interface will not be supported in Go +// Driver 2.0. func (c Copier) CopyBytesToArrayWriter(dst ArrayWriter, src []byte) error { wef := func(_ string) (ValueWriter, error) { return dst.WriteArrayElement() @@ -91,6 +113,9 @@ func (c Copier) CopyBytesToArrayWriter(dst ArrayWriter, src []byte) error { // CopyBytesToDocumentWriter copies the values from a BSON document represented as a []byte to a // DocumentWriter. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. func (c Copier) CopyBytesToDocumentWriter(dst DocumentWriter, src []byte) error { wef := func(key string) (ValueWriter, error) { return dst.WriteDocumentElement(key) @@ -100,7 +125,7 @@ func (c Copier) CopyBytesToDocumentWriter(dst DocumentWriter, src []byte) error } func (c Copier) copyBytesToValueWriter(src []byte, wef writeElementFn) error { - // TODO(skriptble): Create errors types here. Anything thats a tag should be a property. + // TODO(skriptble): Create errors types here. Anything that is a tag should be a property. length, rem, ok := bsoncore.ReadLength(src) if !ok { return fmt.Errorf("couldn't read length from src, not enough bytes. length=%d", len(src)) @@ -150,12 +175,18 @@ func (c Copier) copyBytesToValueWriter(src []byte, wef writeElementFn) error { // CopyDocumentToBytes copies an entire document from the ValueReader and // returns it as bytes. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. func (c Copier) CopyDocumentToBytes(src ValueReader) ([]byte, error) { return c.AppendDocumentBytes(nil, src) } // AppendDocumentBytes functions the same as CopyDocumentToBytes, but will // append the result to dst. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. func (c Copier) AppendDocumentBytes(dst []byte, src ValueReader) ([]byte, error) { if br, ok := src.(BytesReader); ok { _, dst, err := br.ReadValueBytes(dst) @@ -163,7 +194,7 @@ func (c Copier) AppendDocumentBytes(dst []byte, src ValueReader) ([]byte, error) } vw := vwPool.Get().(*valueWriter) - defer vwPool.Put(vw) + defer putValueWriter(vw) vw.reset(dst) @@ -173,6 +204,9 @@ func (c Copier) AppendDocumentBytes(dst []byte, src ValueReader) ([]byte, error) } // AppendArrayBytes copies an array from the ValueReader to dst. +// +// Deprecated: Copying BSON arrays using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. func (c Copier) AppendArrayBytes(dst []byte, src ValueReader) ([]byte, error) { if br, ok := src.(BytesReader); ok { _, dst, err := br.ReadValueBytes(dst) @@ -180,7 +214,7 @@ func (c Copier) AppendArrayBytes(dst []byte, src ValueReader) ([]byte, error) { } vw := vwPool.Get().(*valueWriter) - defer vwPool.Put(vw) + defer putValueWriter(vw) vw.reset(dst) @@ -190,6 +224,8 @@ func (c Copier) AppendArrayBytes(dst []byte, src ValueReader) ([]byte, error) { } // CopyValueFromBytes will write the value represtend by t and src to dst. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.UnmarshalValue] instead. func (c Copier) CopyValueFromBytes(dst ValueWriter, t bsontype.Type, src []byte) error { if wvb, ok := dst.(BytesWriter); ok { return wvb.WriteValueBytes(t, src) @@ -206,19 +242,24 @@ func (c Copier) CopyValueFromBytes(dst ValueWriter, t bsontype.Type, src []byte) // CopyValueToBytes copies a value from src and returns it as a bsontype.Type and a // []byte. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.MarshalValue] instead. func (c Copier) CopyValueToBytes(src ValueReader) (bsontype.Type, []byte, error) { return c.AppendValueBytes(nil, src) } // AppendValueBytes functions the same as CopyValueToBytes, but will append the // result to dst. +// +// Deprecated: Appending individual BSON elements to an existing slice will not be supported in Go +// Driver 2.0. func (c Copier) AppendValueBytes(dst []byte, src ValueReader) (bsontype.Type, []byte, error) { if br, ok := src.(BytesReader); ok { return br.ReadValueBytes(dst) } vw := vwPool.Get().(*valueWriter) - defer vwPool.Put(vw) + defer putValueWriter(vw) start := len(dst) @@ -234,6 +275,9 @@ func (c Copier) AppendValueBytes(dst []byte, src ValueReader) (bsontype.Type, [] } // CopyValue will copy a single value from src to dst. +// +// Deprecated: Copying BSON values using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. func (c Copier) CopyValue(dst ValueWriter, src ValueReader) error { var err error switch src.Type() { @@ -399,7 +443,7 @@ func (c Copier) copyArray(dst ValueWriter, src ValueReader) error { for { vr, err := ar.ReadValue() - if err == ErrEOA { + if errors.Is(err, ErrEOA) { break } if err != nil { @@ -423,7 +467,7 @@ func (c Copier) copyArray(dst ValueWriter, src ValueReader) error { func (c Copier) copyDocumentCore(dw DocumentWriter, dr DocumentReader) error { for { key, vr, err := dr.ReadElement() - if err == ErrEOD { + if errors.Is(err, ErrEOD) { break } if err != nil { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go index 54c76bf74..bb52a0ec3 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go @@ -313,7 +313,7 @@ func (ejp *extJSONParser) readValue(t bsontype.Type) (*extJSONValue, error) { // convert hex to bytes bytes, err := hex.DecodeString(uuidNoHyphens) if err != nil { - return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding hex bytes: %v", err) + return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding hex bytes: %w", err) } ejp.advanceState() diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go index 35832d73a..59ddfc448 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go @@ -7,6 +7,7 @@ package bsonrw import ( + "errors" "fmt" "io" "sync" @@ -16,11 +17,15 @@ import ( ) // ExtJSONValueReaderPool is a pool for ValueReaders that read ExtJSON. +// +// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. type ExtJSONValueReaderPool struct { pool sync.Pool } // NewExtJSONValueReaderPool instantiates a new ExtJSONValueReaderPool. +// +// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. func NewExtJSONValueReaderPool() *ExtJSONValueReaderPool { return &ExtJSONValueReaderPool{ pool: sync.Pool{ @@ -32,6 +37,8 @@ func NewExtJSONValueReaderPool() *ExtJSONValueReaderPool { } // Get retrieves a ValueReader from the pool and uses src as the underlying ExtJSON. +// +// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. func (bvrp *ExtJSONValueReaderPool) Get(r io.Reader, canonical bool) (ValueReader, error) { vr := bvrp.pool.Get().(*extJSONValueReader) return vr.reset(r, canonical) @@ -39,6 +46,8 @@ func (bvrp *ExtJSONValueReaderPool) Get(r io.Reader, canonical bool) (ValueReade // Put inserts a ValueReader into the pool. If the ValueReader is not a ExtJSON ValueReader nothing // is inserted into the pool and ok will be false. +// +// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. func (bvrp *ExtJSONValueReaderPool) Put(vr ValueReader) (ok bool) { bvr, ok := vr.(*extJSONValueReader) if !ok { @@ -605,7 +614,7 @@ func (ejvr *extJSONValueReader) ReadElement() (string, ValueReader, error) { name, t, err := ejvr.p.readKey() if err != nil { - if err == ErrEOD { + if errors.Is(err, ErrEOD) { if ejvr.stack[ejvr.frame].mode == mCodeWithScope { _, err := ejvr.p.peekType() if err != nil { @@ -632,7 +641,7 @@ func (ejvr *extJSONValueReader) ReadValue() (ValueReader, error) { t, err := ejvr.p.peekType() if err != nil { - if err == ErrEOA { + if errors.Is(err, ErrEOA) { ejvr.pop() } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go index 99ed524b7..bb9303167 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go @@ -23,11 +23,15 @@ import ( ) // ExtJSONValueWriterPool is a pool for ExtJSON ValueWriters. +// +// Deprecated: ExtJSONValueWriterPool will not be supported in Go Driver 2.0. type ExtJSONValueWriterPool struct { pool sync.Pool } // NewExtJSONValueWriterPool creates a new pool for ValueWriter instances that write to ExtJSON. +// +// Deprecated: ExtJSONValueWriterPool will not be supported in Go Driver 2.0. func NewExtJSONValueWriterPool() *ExtJSONValueWriterPool { return &ExtJSONValueWriterPool{ pool: sync.Pool{ @@ -39,6 +43,8 @@ func NewExtJSONValueWriterPool() *ExtJSONValueWriterPool { } // Get retrieves a ExtJSON ValueWriter from the pool and resets it to use w as the destination. +// +// Deprecated: ExtJSONValueWriterPool will not be supported in Go Driver 2.0. func (bvwp *ExtJSONValueWriterPool) Get(w io.Writer, canonical, escapeHTML bool) ValueWriter { vw := bvwp.pool.Get().(*extJSONValueWriter) if writer, ok := w.(*SliceWriter); ok { @@ -53,6 +59,8 @@ func (bvwp *ExtJSONValueWriterPool) Get(w io.Writer, canonical, escapeHTML bool) // Put inserts a ValueWriter into the pool. If the ValueWriter is not a ExtJSON ValueWriter, nothing // happens and ok will be false. +// +// Deprecated: ExtJSONValueWriterPool will not be supported in Go Driver 2.0. func (bvwp *ExtJSONValueWriterPool) Put(vw ValueWriter) (ok bool) { bvw, ok := vw.(*extJSONValueWriter) if !ok { @@ -80,6 +88,7 @@ type extJSONValueWriter struct { frame int64 canonical bool escapeHTML bool + newlines bool } // NewExtJSONValueWriter creates a ValueWriter that writes Extended JSON to w. @@ -88,10 +97,13 @@ func NewExtJSONValueWriter(w io.Writer, canonical, escapeHTML bool) (ValueWriter return nil, errNilWriter } - return newExtJSONWriter(w, canonical, escapeHTML), nil + // Enable newlines for all Extended JSON value writers created by NewExtJSONValueWriter. We + // expect these value writers to be used with an Encoder, which should add newlines after + // encoded Extended JSON documents. + return newExtJSONWriter(w, canonical, escapeHTML, true), nil } -func newExtJSONWriter(w io.Writer, canonical, escapeHTML bool) *extJSONValueWriter { +func newExtJSONWriter(w io.Writer, canonical, escapeHTML, newlines bool) *extJSONValueWriter { stack := make([]ejvwState, 1, 5) stack[0] = ejvwState{mode: mTopLevel} @@ -101,6 +113,7 @@ func newExtJSONWriter(w io.Writer, canonical, escapeHTML bool) *extJSONValueWrit stack: stack, canonical: canonical, escapeHTML: escapeHTML, + newlines: newlines, } } @@ -564,6 +577,12 @@ func (ejvw *extJSONValueWriter) WriteDocumentEnd() error { case mDocument: ejvw.buf = append(ejvw.buf, ',') case mTopLevel: + // If the value writer has newlines enabled, end top-level documents with a newline so that + // multiple documents encoded to the same writer are separated by newlines. That matches the + // Go json.Encoder behavior and also works with bsonrw.NewExtJSONValueReader. + if ejvw.newlines { + ejvw.buf = append(ejvw.buf, '\n') + } if ejvw.w != nil { if _, err := ejvw.w.Write(ejvw.buf); err != nil { return err diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/json_scanner.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/json_scanner.go index cd4843a3a..43f3e4f38 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/json_scanner.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/json_scanner.go @@ -58,7 +58,7 @@ func (js *jsonScanner) nextToken() (*jsonToken, error) { c, err = js.readNextByte() } - if err == io.EOF { + if errors.Is(err, io.EOF) { return &jsonToken{t: jttEOF}, nil } else if err != nil { return nil, err @@ -198,7 +198,7 @@ func (js *jsonScanner) scanString() (*jsonToken, error) { for { c, err = js.readNextByte() if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { return nil, errors.New("end of input in JSON string") } return nil, err @@ -209,7 +209,7 @@ func (js *jsonScanner) scanString() (*jsonToken, error) { case '\\': c, err = js.readNextByte() if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { return nil, errors.New("end of input in JSON string") } return nil, err @@ -248,7 +248,7 @@ func (js *jsonScanner) scanString() (*jsonToken, error) { if utf16.IsSurrogate(rn) { c, err = js.readNextByte() if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { return nil, errors.New("end of input in JSON string") } return nil, err @@ -264,7 +264,7 @@ func (js *jsonScanner) scanString() (*jsonToken, error) { c, err = js.readNextByte() if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { return nil, errors.New("end of input in JSON string") } return nil, err @@ -325,17 +325,17 @@ func (js *jsonScanner) scanLiteral(first byte) (*jsonToken, error) { c5, err := js.readNextByte() - if bytes.Equal([]byte("true"), lit) && (isValueTerminator(c5) || err == io.EOF) { + if bytes.Equal([]byte("true"), lit) && (isValueTerminator(c5) || errors.Is(err, io.EOF)) { js.pos = int(math.Max(0, float64(js.pos-1))) return &jsonToken{t: jttBool, v: true, p: p}, nil - } else if bytes.Equal([]byte("null"), lit) && (isValueTerminator(c5) || err == io.EOF) { + } else if bytes.Equal([]byte("null"), lit) && (isValueTerminator(c5) || errors.Is(err, io.EOF)) { js.pos = int(math.Max(0, float64(js.pos-1))) return &jsonToken{t: jttNull, v: nil, p: p}, nil } else if bytes.Equal([]byte("fals"), lit) { if c5 == 'e' { c5, err = js.readNextByte() - if isValueTerminator(c5) || err == io.EOF { + if isValueTerminator(c5) || errors.Is(err, io.EOF) { js.pos = int(math.Max(0, float64(js.pos-1))) return &jsonToken{t: jttBool, v: false, p: p}, nil } @@ -384,7 +384,7 @@ func (js *jsonScanner) scanNumber(first byte) (*jsonToken, error) { for { c, err = js.readNextByte() - if err != nil && err != io.EOF { + if err != nil && !errors.Is(err, io.EOF) { return nil, err } @@ -413,7 +413,7 @@ func (js *jsonScanner) scanNumber(first byte) (*jsonToken, error) { case '}', ']', ',': s = nssDone default: - if isWhiteSpace(c) || err == io.EOF { + if isWhiteSpace(c) || errors.Is(err, io.EOF) { s = nssDone } else { s = nssInvalid @@ -430,7 +430,7 @@ func (js *jsonScanner) scanNumber(first byte) (*jsonToken, error) { case '}', ']', ',': s = nssDone default: - if isWhiteSpace(c) || err == io.EOF { + if isWhiteSpace(c) || errors.Is(err, io.EOF) { s = nssDone } else if isDigit(c) { s = nssSawIntegerDigits @@ -455,7 +455,7 @@ func (js *jsonScanner) scanNumber(first byte) (*jsonToken, error) { case '}', ']', ',': s = nssDone default: - if isWhiteSpace(c) || err == io.EOF { + if isWhiteSpace(c) || errors.Is(err, io.EOF) { s = nssDone } else if isDigit(c) { s = nssSawFractionDigits @@ -490,7 +490,7 @@ func (js *jsonScanner) scanNumber(first byte) (*jsonToken, error) { case '}', ']', ',': s = nssDone default: - if isWhiteSpace(c) || err == io.EOF { + if isWhiteSpace(c) || errors.Is(err, io.EOF) { s = nssDone } else if isDigit(c) { s = nssSawExponentDigits diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/reader.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/reader.go index 0b8fa28d5..324b10b61 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/reader.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/reader.go @@ -58,6 +58,8 @@ type ValueReader interface { // types that implement ValueReader may also implement this interface. // // The bytes of the value will be appended to dst. +// +// Deprecated: BytesReader will not be supported in Go Driver 2.0. type BytesReader interface { ReadValueBytes(dst []byte) (bsontype.Type, []byte, error) } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go index ef5d837c2..a242bb57c 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go @@ -28,11 +28,15 @@ var vrPool = sync.Pool{ } // BSONValueReaderPool is a pool for ValueReaders that read BSON. +// +// Deprecated: BSONValueReaderPool will not be supported in Go Driver 2.0. type BSONValueReaderPool struct { pool sync.Pool } // NewBSONValueReaderPool instantiates a new BSONValueReaderPool. +// +// Deprecated: BSONValueReaderPool will not be supported in Go Driver 2.0. func NewBSONValueReaderPool() *BSONValueReaderPool { return &BSONValueReaderPool{ pool: sync.Pool{ @@ -44,6 +48,8 @@ func NewBSONValueReaderPool() *BSONValueReaderPool { } // Get retrieves a ValueReader from the pool and uses src as the underlying BSON. +// +// Deprecated: BSONValueReaderPool will not be supported in Go Driver 2.0. func (bvrp *BSONValueReaderPool) Get(src []byte) ValueReader { vr := bvrp.pool.Get().(*valueReader) vr.reset(src) @@ -52,6 +58,8 @@ func (bvrp *BSONValueReaderPool) Get(src []byte) ValueReader { // Put inserts a ValueReader into the pool. If the ValueReader is not a BSON ValueReader nothing // is inserted into the pool and ok will be false. +// +// Deprecated: BSONValueReaderPool will not be supported in Go Driver 2.0. func (bvrp *BSONValueReaderPool) Put(vr ValueReader) (ok bool) { bvr, ok := vr.(*valueReader) if !ok { @@ -731,8 +739,7 @@ func (vr *valueReader) ReadValue() (ValueReader, error) { return nil, ErrEOA } - _, err = vr.readCString() - if err != nil { + if err := vr.skipCString(); err != nil { return nil, err } @@ -786,6 +793,15 @@ func (vr *valueReader) readByte() (byte, error) { return vr.d[vr.offset-1], nil } +func (vr *valueReader) skipCString() error { + idx := bytes.IndexByte(vr.d[vr.offset:], 0x00) + if idx < 0 { + return io.EOF + } + vr.offset += int64(idx) + 1 + return nil +} + func (vr *valueReader) readCString() (string, error) { idx := bytes.IndexByte(vr.d[vr.offset:], 0x00) if idx < 0 { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go index f95a08afd..311518a80 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go @@ -28,12 +28,23 @@ var vwPool = sync.Pool{ }, } +func putValueWriter(vw *valueWriter) { + if vw != nil { + vw.w = nil // don't leak the writer + vwPool.Put(vw) + } +} + // BSONValueWriterPool is a pool for BSON ValueWriters. +// +// Deprecated: BSONValueWriterPool will not be supported in Go Driver 2.0. type BSONValueWriterPool struct { pool sync.Pool } // NewBSONValueWriterPool creates a new pool for ValueWriter instances that write to BSON. +// +// Deprecated: BSONValueWriterPool will not be supported in Go Driver 2.0. func NewBSONValueWriterPool() *BSONValueWriterPool { return &BSONValueWriterPool{ pool: sync.Pool{ @@ -45,6 +56,8 @@ func NewBSONValueWriterPool() *BSONValueWriterPool { } // Get retrieves a BSON ValueWriter from the pool and resets it to use w as the destination. +// +// Deprecated: BSONValueWriterPool will not be supported in Go Driver 2.0. func (bvwp *BSONValueWriterPool) Get(w io.Writer) ValueWriter { vw := bvwp.pool.Get().(*valueWriter) @@ -56,6 +69,8 @@ func (bvwp *BSONValueWriterPool) Get(w io.Writer) ValueWriter { } // GetAtModeElement retrieves a ValueWriterFlusher from the pool and resets it to use w as the destination. +// +// Deprecated: BSONValueWriterPool will not be supported in Go Driver 2.0. func (bvwp *BSONValueWriterPool) GetAtModeElement(w io.Writer) ValueWriterFlusher { vw := bvwp.Get(w).(*valueWriter) vw.push(mElement) @@ -64,6 +79,8 @@ func (bvwp *BSONValueWriterPool) GetAtModeElement(w io.Writer) ValueWriterFlushe // Put inserts a ValueWriter into the pool. If the ValueWriter is not a BSON ValueWriter, nothing // happens and ok will be false. +// +// Deprecated: BSONValueWriterPool will not be supported in Go Driver 2.0. func (bvwp *BSONValueWriterPool) Put(vw ValueWriter) (ok bool) { bvw, ok := vw.(*valueWriter) if !ok { @@ -139,32 +156,21 @@ type valueWriter struct { } func (vw *valueWriter) advanceFrame() { - if vw.frame+1 >= int64(len(vw.stack)) { // We need to grow the stack - length := len(vw.stack) - if length+1 >= cap(vw.stack) { - // double it - buf := make([]vwState, 2*cap(vw.stack)+1) - copy(buf, vw.stack) - vw.stack = buf - } - vw.stack = vw.stack[:length+1] - } vw.frame++ + if vw.frame >= int64(len(vw.stack)) { + vw.stack = append(vw.stack, vwState{}) + } } func (vw *valueWriter) push(m mode) { vw.advanceFrame() // Clean the stack - vw.stack[vw.frame].mode = m - vw.stack[vw.frame].key = "" - vw.stack[vw.frame].arrkey = 0 - vw.stack[vw.frame].start = 0 + vw.stack[vw.frame] = vwState{mode: m} - vw.stack[vw.frame].mode = m switch m { case mDocument, mArray, mCodeWithScope: - vw.reserveLength() + vw.reserveLength() // WARN: this is not needed } } @@ -203,6 +209,7 @@ func newValueWriter(w io.Writer) *valueWriter { return vw } +// TODO: only used in tests func newValueWriterFromSlice(buf []byte) *valueWriter { vw := new(valueWriter) stack := make([]vwState, 1, 5) @@ -239,17 +246,16 @@ func (vw *valueWriter) invalidTransitionError(destination mode, name string, mod } func (vw *valueWriter) writeElementHeader(t bsontype.Type, destination mode, callerName string, addmodes ...mode) error { - switch vw.stack[vw.frame].mode { + frame := &vw.stack[vw.frame] + switch frame.mode { case mElement: - key := vw.stack[vw.frame].key + key := frame.key if !isValidCString(key) { return errors.New("BSON element key cannot contain null bytes") } - - vw.buf = bsoncore.AppendHeader(vw.buf, t, key) + vw.appendHeader(t, key) case mValue: - // TODO: Do this with a cache of the first 1000 or so array keys. - vw.buf = bsoncore.AppendHeader(vw.buf, t, strconv.Itoa(vw.stack[vw.frame].arrkey)) + vw.appendIntHeader(t, frame.arrkey) default: modes := []mode{mElement, mValue} if addmodes != nil { @@ -591,9 +597,11 @@ func (vw *valueWriter) writeLength() error { if length > maxSize { return errMaxDocumentSizeExceeded{size: int64(len(vw.buf))} } - length = length - int(vw.stack[vw.frame].start) - start := vw.stack[vw.frame].start + frame := &vw.stack[vw.frame] + length = length - int(frame.start) + start := frame.start + _ = vw.buf[start+3] // BCE vw.buf[start+0] = byte(length) vw.buf[start+1] = byte(length >> 8) vw.buf[start+2] = byte(length >> 16) @@ -602,5 +610,31 @@ func (vw *valueWriter) writeLength() error { } func isValidCString(cs string) bool { - return !strings.ContainsRune(cs, '\x00') + // Disallow the zero byte in a cstring because the zero byte is used as the + // terminating character. + // + // It's safe to check bytes instead of runes because all multibyte UTF-8 + // code points start with (binary) 11xxxxxx or 10xxxxxx, so 00000000 (i.e. + // 0) will never be part of a multibyte UTF-8 code point. This logic is the + // same as the "r < utf8.RuneSelf" case in strings.IndexRune but can be + // inlined. + // + // https://cs.opensource.google/go/go/+/refs/tags/go1.21.1:src/strings/strings.go;l=127 + return strings.IndexByte(cs, 0) == -1 +} + +// appendHeader is the same as bsoncore.AppendHeader but does not check if the +// key is a valid C string since the caller has already checked for that. +// +// The caller of this function must check if key is a valid C string. +func (vw *valueWriter) appendHeader(t bsontype.Type, key string) { + vw.buf = bsoncore.AppendType(vw.buf, t) + vw.buf = append(vw.buf, key...) + vw.buf = append(vw.buf, 0x00) +} + +func (vw *valueWriter) appendIntHeader(t bsontype.Type, key int) { + vw.buf = bsoncore.AppendType(vw.buf, t) + vw.buf = strconv.AppendInt(vw.buf, int64(key), 10) + vw.buf = append(vw.buf, 0x00) } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go index dff65f87f..628f45293 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go @@ -56,6 +56,8 @@ type ValueWriter interface { } // ValueWriterFlusher is a superset of ValueWriter that exposes functionality to flush to the underlying buffer. +// +// Deprecated: ValueWriterFlusher will not be supported in Go Driver 2.0. type ValueWriterFlusher interface { ValueWriter Flush() error @@ -64,13 +66,20 @@ type ValueWriterFlusher interface { // BytesWriter is the interface used to write BSON bytes to a ValueWriter. // This interface is meant to be a superset of ValueWriter, so that types that // implement ValueWriter may also implement this interface. +// +// Deprecated: BytesWriter will not be supported in Go Driver 2.0. type BytesWriter interface { WriteValueBytes(t bsontype.Type, b []byte) error } // SliceWriter allows a pointer to a slice of bytes to be used as an io.Writer. +// +// Deprecated: SliceWriter will not be supported in Go Driver 2.0. type SliceWriter []byte +// Write writes the bytes to the underlying slice. +// +// Deprecated: SliceWriter will not be supported in Go Driver 2.0. func (sw *SliceWriter) Write(p []byte) (int, error) { written := len(p) *sw = append(*sw, p...) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go b/vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go index 7c91ae518..255d9909e 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go @@ -8,7 +8,9 @@ // a stringifier for the Type to enable easier debugging when working with BSON. package bsontype // import "go.mongodb.org/mongo-driver/bson/bsontype" -// These constants uniquely refer to each BSON type. +// BSON element types as described in https://bsonspec.org/spec.html. +// +// Deprecated: Use bson.Type* constants instead. const ( Double Type = 0x01 String Type = 0x02 @@ -31,7 +33,12 @@ const ( Decimal128 Type = 0x13 MinKey Type = 0xFF MaxKey Type = 0x7F +) +// BSON binary element subtypes as described in https://bsonspec.org/spec.html. +// +// Deprecated: Use the bson.TypeBinary* constants instead. +const ( BinaryGeneric byte = 0x00 BinaryFunction byte = 0x01 BinaryBinaryOld byte = 0x02 @@ -40,6 +47,7 @@ const ( BinaryMD5 byte = 0x05 BinaryEncrypted byte = 0x06 BinaryColumn byte = 0x07 + BinarySensitive byte = 0x08 BinaryUserDefined byte = 0x80 ) @@ -95,3 +103,14 @@ func (bt Type) String() string { return "invalid" } } + +// IsValid will return true if the Type is valid. +func (bt Type) IsValid() bool { + switch bt { + case Double, String, EmbeddedDocument, Array, Binary, Undefined, ObjectID, Boolean, DateTime, Null, Regex, + DBPointer, JavaScript, Symbol, CodeWithScope, Int32, Timestamp, Int64, Decimal128, MinKey, MaxKey: + return true + default: + return false + } +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/decoder.go b/vendor/go.mongodb.org/mongo-driver/bson/decoder.go index 6e189fa58..eac74cd39 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/decoder.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/decoder.go @@ -38,6 +38,12 @@ type Decoder struct { // (*Decoder).SetContext. defaultDocumentM bool defaultDocumentD bool + + binaryAsSlice bool + useJSONStructTags bool + useLocalTimeZone bool + zeroMaps bool + zeroStructs bool } // NewDecoder returns a new decoder that uses the DefaultRegistry to read from vr. @@ -53,6 +59,9 @@ func NewDecoder(vr bsonrw.ValueReader) (*Decoder, error) { } // NewDecoderWithContext returns a new decoder that uses DecodeContext dc to read from vr. +// +// Deprecated: Use [NewDecoder] and use the Decoder configuration methods set the desired unmarshal +// behavior instead. func NewDecoderWithContext(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader) (*Decoder, error) { if dc.Registry == nil { dc.Registry = DefaultRegistry @@ -70,8 +79,7 @@ func NewDecoderWithContext(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader) (* // Decode reads the next BSON document from the stream and decodes it into the // value pointed to by val. // -// The documentation for Unmarshal contains details about of BSON into a Go -// value. +// See [Unmarshal] for details about BSON unmarshaling behavior. func (d *Decoder) Decode(val interface{}) error { if unmarshaler, ok := val.(Unmarshaler); ok { // TODO(skriptble): Reuse a []byte here and use the AppendDocumentBytes method. @@ -100,42 +108,101 @@ func (d *Decoder) Decode(val interface{}) error { if err != nil { return err } + if d.defaultDocumentM { d.dc.DefaultDocumentM() } if d.defaultDocumentD { d.dc.DefaultDocumentD() } + if d.binaryAsSlice { + d.dc.BinaryAsSlice() + } + if d.useJSONStructTags { + d.dc.UseJSONStructTags() + } + if d.useLocalTimeZone { + d.dc.UseLocalTimeZone() + } + if d.zeroMaps { + d.dc.ZeroMaps() + } + if d.zeroStructs { + d.dc.ZeroStructs() + } + return decoder.DecodeValue(d.dc, d.vr, rval) } // Reset will reset the state of the decoder, using the same *DecodeContext used in // the original construction but using vr for reading. func (d *Decoder) Reset(vr bsonrw.ValueReader) error { + // TODO:(GODRIVER-2719): Remove error return value. d.vr = vr return nil } // SetRegistry replaces the current registry of the decoder with r. func (d *Decoder) SetRegistry(r *bsoncodec.Registry) error { + // TODO:(GODRIVER-2719): Remove error return value. d.dc.Registry = r return nil } // SetContext replaces the current registry of the decoder with dc. +// +// Deprecated: Use the Decoder configuration methods to set the desired unmarshal behavior instead. func (d *Decoder) SetContext(dc bsoncodec.DecodeContext) error { + // TODO:(GODRIVER-2719): Remove error return value. d.dc = dc return nil } -// DefaultDocumentM will decode empty documents using the primitive.M type. This behavior is restricted to data typed as -// "interface{}" or "map[string]interface{}". +// DefaultDocumentM causes the Decoder to always unmarshal documents into the primitive.M type. This +// behavior is restricted to data typed as "interface{}" or "map[string]interface{}". func (d *Decoder) DefaultDocumentM() { d.defaultDocumentM = true } -// DefaultDocumentD will decode empty documents using the primitive.D type. This behavior is restricted to data typed as -// "interface{}" or "map[string]interface{}". +// DefaultDocumentD causes the Decoder to always unmarshal documents into the primitive.D type. This +// behavior is restricted to data typed as "interface{}" or "map[string]interface{}". func (d *Decoder) DefaultDocumentD() { d.defaultDocumentD = true } + +// AllowTruncatingDoubles causes the Decoder to truncate the fractional part of BSON "double" values +// when attempting to unmarshal them into a Go integer (int, int8, int16, int32, or int64) struct +// field. The truncation logic does not apply to BSON "decimal128" values. +func (d *Decoder) AllowTruncatingDoubles() { + d.dc.Truncate = true +} + +// BinaryAsSlice causes the Decoder to unmarshal BSON binary field values that are the "Generic" or +// "Old" BSON binary subtype as a Go byte slice instead of a primitive.Binary. +func (d *Decoder) BinaryAsSlice() { + d.binaryAsSlice = true +} + +// UseJSONStructTags causes the Decoder to fall back to using the "json" struct tag if a "bson" +// struct tag is not specified. +func (d *Decoder) UseJSONStructTags() { + d.useJSONStructTags = true +} + +// UseLocalTimeZone causes the Decoder to unmarshal time.Time values in the local timezone instead +// of the UTC timezone. +func (d *Decoder) UseLocalTimeZone() { + d.useLocalTimeZone = true +} + +// ZeroMaps causes the Decoder to delete any existing values from Go maps in the destination value +// passed to Decode before unmarshaling BSON documents into them. +func (d *Decoder) ZeroMaps() { + d.zeroMaps = true +} + +// ZeroStructs causes the Decoder to delete any existing values from Go structs in the destination +// value passed to Decode before unmarshaling BSON documents into them. +func (d *Decoder) ZeroStructs() { + d.zeroStructs = true +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/doc.go index 0134006d8..af6098475 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/doc.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/doc.go @@ -6,8 +6,9 @@ // Package bson is a library for reading, writing, and manipulating BSON. BSON is a binary serialization format used to // store documents and make remote procedure calls in MongoDB. The BSON specification is located at https://bsonspec.org. -// The BSON library handles marshalling and unmarshalling of values through a configurable codec system. For a description -// of the codec system and examples of registering custom codecs, see the bsoncodec package. +// The BSON library handles marshaling and unmarshaling of values through a configurable codec system. For a description +// of the codec system and examples of registering custom codecs, see the bsoncodec package. For additional information +// and usage examples, check out the [Work with BSON] page in the Go Driver docs site. // // # Raw BSON // @@ -37,7 +38,7 @@ // bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} // bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} // -// When decoding BSON to a D or M, the following type mappings apply when unmarshalling: +// When decoding BSON to a D or M, the following type mappings apply when unmarshaling: // // 1. BSON int32 unmarshals to an int32. // 2. BSON int64 unmarshals to an int64. @@ -61,81 +62,78 @@ // 20. BSON DBPointer unmarshals to a primitive.DBPointer. // 21. BSON symbol unmarshals to a primitive.Symbol. // -// The above mappings also apply when marshalling a D or M to BSON. Some other useful marshalling mappings are: +// The above mappings also apply when marshaling a D or M to BSON. Some other useful marshaling mappings are: // // 1. time.Time marshals to a BSON datetime. // 2. int8, int16, and int32 marshal to a BSON int32. // 3. int marshals to a BSON int32 if the value is between math.MinInt32 and math.MaxInt32, inclusive, and a BSON int64 // otherwise. -// 4. int64 marshals to BSON int64. +// 4. int64 marshals to BSON int64 (unless [Encoder.IntMinSize] is set). // 5. uint8 and uint16 marshal to a BSON int32. -// 6. uint, uint32, and uint64 marshal to a BSON int32 if the value is between math.MinInt32 and math.MaxInt32, -// inclusive, and BSON int64 otherwise. -// 7. BSON null and undefined values will unmarshal into the zero value of a field (e.g. unmarshalling a BSON null or +// 6. uint, uint32, and uint64 marshal to a BSON int64 (unless [Encoder.IntMinSize] is set). +// 7. BSON null and undefined values will unmarshal into the zero value of a field (e.g. unmarshaling a BSON null or // undefined value into a string will yield the empty string.). // // # Structs // -// Structs can be marshalled/unmarshalled to/from BSON or Extended JSON. When transforming structs to/from BSON or Extended +// Structs can be marshaled/unmarshaled to/from BSON or Extended JSON. When transforming structs to/from BSON or Extended // JSON, the following rules apply: // -// 1. Only exported fields in structs will be marshalled or unmarshalled. +// 1. Only exported fields in structs will be marshaled or unmarshaled. // -// 2. When marshalling a struct, each field will be lowercased to generate the key for the corresponding BSON element. +// 2. When marshaling a struct, each field will be lowercased to generate the key for the corresponding BSON element. // For example, a struct field named "Foo" will generate key "foo". This can be overridden via a struct tag (e.g. // `bson:"fooField"` to generate key "fooField" instead). // -// 3. An embedded struct field is marshalled as a subdocument. The key will be the lowercased name of the field's type. +// 3. An embedded struct field is marshaled as a subdocument. The key will be the lowercased name of the field's type. // -// 4. A pointer field is marshalled as the underlying type if the pointer is non-nil. If the pointer is nil, it is -// marshalled as a BSON null value. +// 4. A pointer field is marshaled as the underlying type if the pointer is non-nil. If the pointer is nil, it is +// marshaled as a BSON null value. // -// 5. When unmarshalling, a field of type interface{} will follow the D/M type mappings listed above. BSON documents -// unmarshalled into an interface{} field will be unmarshalled as a D. +// 5. When unmarshaling, a field of type interface{} will follow the D/M type mappings listed above. BSON documents +// unmarshaled into an interface{} field will be unmarshaled as a D. // // The encoding of each struct field can be customized by the "bson" struct tag. // // This tag behavior is configurable, and different struct tag behavior can be configured by initializing a new -// bsoncodec.StructCodec with the desired tag parser and registering that StructCodec onto the Registry. By default, JSON tags -// are not honored, but that can be enabled by creating a StructCodec with JSONFallbackStructTagParser, like below: +// bsoncodec.StructCodec with the desired tag parser and registering that StructCodec onto the Registry. By default, JSON +// tags are not honored, but that can be enabled by creating a StructCodec with JSONFallbackStructTagParser, like below: // // Example: // // structcodec, _ := bsoncodec.NewStructCodec(bsoncodec.JSONFallbackStructTagParser) // // The bson tag gives the name of the field, possibly followed by a comma-separated list of options. -// The name may be empty in order to specify options without overriding the default field name. The following options can be used -// to configure behavior: -// -// 1. omitempty: If the omitempty struct tag is specified on a field, the field will not be marshalled if it is set to -// the zero value. Fields with language primitive types such as integers, booleans, and strings are considered empty if -// their value is equal to the zero value for the type (i.e. 0 for integers, false for booleans, and "" for strings). -// Slices, maps, and arrays are considered empty if they are of length zero. Interfaces and pointers are considered -// empty if their value is nil. By default, structs are only considered empty if the struct type implements the -// bsoncodec.Zeroer interface and the IsZero method returns true. Struct fields whose types do not implement Zeroer are -// never considered empty and will be marshalled as embedded documents. +// The name may be empty in order to specify options without overriding the default field name. The following options can +// be used to configure behavior: +// +// 1. omitempty: If the omitempty struct tag is specified on a field, the field will be omitted from the marshaling if +// the field has an empty value, defined as false, 0, a nil pointer, a nil interface value, and any empty array, +// slice, map, or string. // NOTE: It is recommended that this tag be used for all slice and map fields. // // 2. minsize: If the minsize struct tag is specified on a field of type int64, uint, uint32, or uint64 and the value of -// the field can fit in a signed int32, the field will be serialized as a BSON int32 rather than a BSON int64. For other -// types, this tag is ignored. +// the field can fit in a signed int32, the field will be serialized as a BSON int32 rather than a BSON int64. For +// other types, this tag is ignored. // -// 3. truncate: If the truncate struct tag is specified on a field with a non-float numeric type, BSON doubles unmarshalled -// into that field will be truncated at the decimal point. For example, if 3.14 is unmarshalled into a field of type int, -// it will be unmarshalled as 3. If this tag is not specified, the decoder will throw an error if the value cannot be -// decoded without losing precision. For float64 or non-numeric types, this tag is ignored. +// 3. truncate: If the truncate struct tag is specified on a field with a non-float numeric type, BSON doubles +// unmarshaled into that field will be truncated at the decimal point. For example, if 3.14 is unmarshaled into a +// field of type int, it will be unmarshaled as 3. If this tag is not specified, the decoder will throw an error if +// the value cannot be decoded without losing precision. For float64 or non-numeric types, this tag is ignored. // // 4. inline: If the inline struct tag is specified for a struct or map field, the field will be "flattened" when -// marshalling and "un-flattened" when unmarshalling. This means that all of the fields in that struct/map will be -// pulled up one level and will become top-level fields rather than being fields in a nested document. For example, if a -// map field named "Map" with value map[string]interface{}{"foo": "bar"} is inlined, the resulting document will be -// {"foo": "bar"} instead of {"map": {"foo": "bar"}}. There can only be one inlined map field in a struct. If there are -// duplicated fields in the resulting document when an inlined struct is marshalled, the inlined field will be overwritten. -// If there are duplicated fields in the resulting document when an inlined map is marshalled, an error will be returned. -// This tag can be used with fields that are pointers to structs. If an inlined pointer field is nil, it will not be -// marshalled. For fields that are not maps or structs, this tag is ignored. -// -// # Marshalling and Unmarshalling -// -// Manually marshalling and unmarshalling can be done with the Marshal and Unmarshal family of functions. +// marshaling and "un-flattened" when unmarshaling. This means that all of the fields in that struct/map will be +// pulled up one level and will become top-level fields rather than being fields in a nested document. For example, +// if a map field named "Map" with value map[string]interface{}{"foo": "bar"} is inlined, the resulting document will +// be {"foo": "bar"} instead of {"map": {"foo": "bar"}}. There can only be one inlined map field in a struct. If +// there are duplicated fields in the resulting document when an inlined struct is marshaled, the inlined field will +// be overwritten. If there are duplicated fields in the resulting document when an inlined map is marshaled, an +// error will be returned. This tag can be used with fields that are pointers to structs. If an inlined pointer field +// is nil, it will not be marshaled. For fields that are not maps or structs, this tag is ignored. +// +// # Marshaling and Unmarshaling +// +// Manually marshaling and unmarshaling can be done with the Marshal and Unmarshal family of functions. +// +// [Work with BSON]: https://www.mongodb.com/docs/drivers/go/current/fundamentals/bson/ package bson diff --git a/vendor/go.mongodb.org/mongo-driver/bson/encoder.go b/vendor/go.mongodb.org/mongo-driver/bson/encoder.go index fe5125d08..0be2a97fb 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/encoder.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/encoder.go @@ -29,10 +29,20 @@ var encPool = sync.Pool{ type Encoder struct { ec bsoncodec.EncodeContext vw bsonrw.ValueWriter + + errorOnInlineDuplicates bool + intMinSize bool + stringifyMapKeysWithFmt bool + nilMapAsEmpty bool + nilSliceAsEmpty bool + nilByteSliceAsEmpty bool + omitZeroStruct bool + useJSONStructTags bool } // NewEncoder returns a new encoder that uses the DefaultRegistry to write to vw. func NewEncoder(vw bsonrw.ValueWriter) (*Encoder, error) { + // TODO:(GODRIVER-2719): Remove error return value. if vw == nil { return nil, errors.New("cannot create a new Encoder with a nil ValueWriter") } @@ -44,6 +54,9 @@ func NewEncoder(vw bsonrw.ValueWriter) (*Encoder, error) { } // NewEncoderWithContext returns a new encoder that uses EncodeContext ec to write to vw. +// +// Deprecated: Use [NewEncoder] and use the Encoder configuration methods to set the desired marshal +// behavior instead. func NewEncoderWithContext(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter) (*Encoder, error) { if ec.Registry == nil { ec = bsoncodec.EncodeContext{Registry: DefaultRegistry} @@ -60,8 +73,7 @@ func NewEncoderWithContext(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter) (* // Encode writes the BSON encoding of val to the stream. // -// The documentation for Marshal contains details about the conversion of Go -// values to BSON. +// See [Marshal] for details about BSON marshaling behavior. func (e *Encoder) Encode(val interface{}) error { if marshaler, ok := val.(Marshaler); ok { // TODO(skriptble): Should we have a MarshalAppender interface so that we can have []byte reuse? @@ -76,24 +88,112 @@ func (e *Encoder) Encode(val interface{}) error { if err != nil { return err } + + // Copy the configurations applied to the Encoder over to the EncodeContext, which actually + // communicates those configurations to the default ValueEncoders. + if e.errorOnInlineDuplicates { + e.ec.ErrorOnInlineDuplicates() + } + if e.intMinSize { + e.ec.MinSize = true + } + if e.stringifyMapKeysWithFmt { + e.ec.StringifyMapKeysWithFmt() + } + if e.nilMapAsEmpty { + e.ec.NilMapAsEmpty() + } + if e.nilSliceAsEmpty { + e.ec.NilSliceAsEmpty() + } + if e.nilByteSliceAsEmpty { + e.ec.NilByteSliceAsEmpty() + } + if e.omitZeroStruct { + e.ec.OmitZeroStruct() + } + if e.useJSONStructTags { + e.ec.UseJSONStructTags() + } + return encoder.EncodeValue(e.ec, e.vw, reflect.ValueOf(val)) } -// Reset will reset the state of the encoder, using the same *EncodeContext used in +// Reset will reset the state of the Encoder, using the same *EncodeContext used in // the original construction but using vw. func (e *Encoder) Reset(vw bsonrw.ValueWriter) error { + // TODO:(GODRIVER-2719): Remove error return value. e.vw = vw return nil } -// SetRegistry replaces the current registry of the encoder with r. +// SetRegistry replaces the current registry of the Encoder with r. func (e *Encoder) SetRegistry(r *bsoncodec.Registry) error { + // TODO:(GODRIVER-2719): Remove error return value. e.ec.Registry = r return nil } -// SetContext replaces the current EncodeContext of the encoder with er. +// SetContext replaces the current EncodeContext of the encoder with ec. +// +// Deprecated: Use the Encoder configuration methods set the desired marshal behavior instead. func (e *Encoder) SetContext(ec bsoncodec.EncodeContext) error { + // TODO:(GODRIVER-2719): Remove error return value. e.ec = ec return nil } + +// ErrorOnInlineDuplicates causes the Encoder to return an error if there is a duplicate field in +// the marshaled BSON when the "inline" struct tag option is set. +func (e *Encoder) ErrorOnInlineDuplicates() { + e.errorOnInlineDuplicates = true +} + +// IntMinSize causes the Encoder to marshal Go integer values (int, int8, int16, int32, int64, uint, +// uint8, uint16, uint32, or uint64) as the minimum BSON int size (either 32 or 64 bits) that can +// represent the integer value. +func (e *Encoder) IntMinSize() { + e.intMinSize = true +} + +// StringifyMapKeysWithFmt causes the Encoder to convert Go map keys to BSON document field name +// strings using fmt.Sprint instead of the default string conversion logic. +func (e *Encoder) StringifyMapKeysWithFmt() { + e.stringifyMapKeysWithFmt = true +} + +// NilMapAsEmpty causes the Encoder to marshal nil Go maps as empty BSON documents instead of BSON +// null. +func (e *Encoder) NilMapAsEmpty() { + e.nilMapAsEmpty = true +} + +// NilSliceAsEmpty causes the Encoder to marshal nil Go slices as empty BSON arrays instead of BSON +// null. +func (e *Encoder) NilSliceAsEmpty() { + e.nilSliceAsEmpty = true +} + +// NilByteSliceAsEmpty causes the Encoder to marshal nil Go byte slices as empty BSON binary values +// instead of BSON null. +func (e *Encoder) NilByteSliceAsEmpty() { + e.nilByteSliceAsEmpty = true +} + +// TODO(GODRIVER-2820): Update the description to remove the note about only examining exported +// TODO struct fields once the logic is updated to also inspect private struct fields. + +// OmitZeroStruct causes the Encoder to consider the zero value for a struct (e.g. MyStruct{}) +// as empty and omit it from the marshaled BSON when the "omitempty" struct tag option is set. +// +// Note that the Encoder only examines exported struct fields when determining if a struct is the +// zero value. It considers pointers to a zero struct value (e.g. &MyStruct{}) not empty. +func (e *Encoder) OmitZeroStruct() { + e.omitZeroStruct = true +} + +// UseJSONStructTags causes the Encoder to fall back to using the "json" struct tag if a "bson" +// struct tag is not specified. +func (e *Encoder) UseJSONStructTags() { + e.useJSONStructTags = true +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/marshal.go b/vendor/go.mongodb.org/mongo-driver/bson/marshal.go index db8d8ee92..17ce6697e 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/marshal.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/marshal.go @@ -9,6 +9,7 @@ package bson import ( "bytes" "encoding/json" + "sync" "go.mongodb.org/mongo-driver/bson/bsoncodec" "go.mongodb.org/mongo-driver/bson/bsonrw" @@ -20,17 +21,23 @@ const defaultDstCap = 256 var bvwPool = bsonrw.NewBSONValueWriterPool() var extjPool = bsonrw.NewExtJSONValueWriterPool() -// Marshaler is an interface implemented by types that can marshal themselves -// into a BSON document represented as bytes. The bytes returned must be a valid -// BSON document if the error is nil. +// Marshaler is the interface implemented by types that can marshal themselves +// into a valid BSON document. +// +// Implementations of Marshaler must return a full BSON document. To create +// custom BSON marshaling behavior for individual values in a BSON document, +// implement the ValueMarshaler interface instead. type Marshaler interface { MarshalBSON() ([]byte, error) } -// ValueMarshaler is an interface implemented by types that can marshal -// themselves into a BSON value as bytes. The type must be the valid type for -// the bytes returned. The bytes and byte type together must be valid if the -// error is nil. +// ValueMarshaler is the interface implemented by types that can marshal +// themselves into a valid BSON value. The format of the returned bytes must +// match the returned type. +// +// Implementations of ValueMarshaler must return an individual BSON value. To +// create custom BSON marshaling behavior for an entire BSON document, implement +// the Marshaler interface instead. type ValueMarshaler interface { MarshalBSONValue() (bsontype.Type, []byte, error) } @@ -48,12 +55,42 @@ func Marshal(val interface{}) ([]byte, error) { // MarshalAppend will encode val as a BSON document and append the bytes to dst. If dst is not large enough to hold the // bytes, it will be grown. If val is not a type that can be transformed into a document, MarshalValueAppend should be // used instead. +// +// Deprecated: Use [NewEncoder] and pass the dst byte slice (wrapped by a bytes.Buffer) into +// [bsonrw.NewBSONValueWriter]: +// +// buf := bytes.NewBuffer(dst) +// vw, err := bsonrw.NewBSONValueWriter(buf) +// if err != nil { +// panic(err) +// } +// enc, err := bson.NewEncoder(vw) +// if err != nil { +// panic(err) +// } +// +// See [Encoder] for more examples. func MarshalAppend(dst []byte, val interface{}) ([]byte, error) { return MarshalAppendWithRegistry(DefaultRegistry, dst, val) } // MarshalWithRegistry returns the BSON encoding of val as a BSON document. If val is not a type that can be transformed // into a document, MarshalValueWithRegistry should be used instead. +// +// Deprecated: Use [NewEncoder] and specify the Registry by calling [Encoder.SetRegistry] instead: +// +// buf := new(bytes.Buffer) +// vw, err := bsonrw.NewBSONValueWriter(buf) +// if err != nil { +// panic(err) +// } +// enc, err := bson.NewEncoder(vw) +// if err != nil { +// panic(err) +// } +// enc.SetRegistry(reg) +// +// See [Encoder] for more examples. func MarshalWithRegistry(r *bsoncodec.Registry, val interface{}) ([]byte, error) { dst := make([]byte, 0) return MarshalAppendWithRegistry(r, dst, val) @@ -61,6 +98,22 @@ func MarshalWithRegistry(r *bsoncodec.Registry, val interface{}) ([]byte, error) // MarshalWithContext returns the BSON encoding of val as a BSON document using EncodeContext ec. If val is not a type // that can be transformed into a document, MarshalValueWithContext should be used instead. +// +// Deprecated: Use [NewEncoder] and use the Encoder configuration methods to set the desired marshal +// behavior instead: +// +// buf := bytes.NewBuffer(dst) +// vw, err := bsonrw.NewBSONValueWriter(buf) +// if err != nil { +// panic(err) +// } +// enc, err := bson.NewEncoder(vw) +// if err != nil { +// panic(err) +// } +// enc.IntMinSize() +// +// See [Encoder] for more examples. func MarshalWithContext(ec bsoncodec.EncodeContext, val interface{}) ([]byte, error) { dst := make([]byte, 0) return MarshalAppendWithContext(ec, dst, val) @@ -69,16 +122,74 @@ func MarshalWithContext(ec bsoncodec.EncodeContext, val interface{}) ([]byte, er // MarshalAppendWithRegistry will encode val as a BSON document using Registry r and append the bytes to dst. If dst is // not large enough to hold the bytes, it will be grown. If val is not a type that can be transformed into a document, // MarshalValueAppendWithRegistry should be used instead. +// +// Deprecated: Use [NewEncoder], and pass the dst byte slice (wrapped by a bytes.Buffer) into +// [bsonrw.NewBSONValueWriter], and specify the Registry by calling [Encoder.SetRegistry] instead: +// +// buf := bytes.NewBuffer(dst) +// vw, err := bsonrw.NewBSONValueWriter(buf) +// if err != nil { +// panic(err) +// } +// enc, err := bson.NewEncoder(vw) +// if err != nil { +// panic(err) +// } +// enc.SetRegistry(reg) +// +// See [Encoder] for more examples. func MarshalAppendWithRegistry(r *bsoncodec.Registry, dst []byte, val interface{}) ([]byte, error) { return MarshalAppendWithContext(bsoncodec.EncodeContext{Registry: r}, dst, val) } +// Pool of buffers for marshalling BSON. +var bufPool = sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, +} + // MarshalAppendWithContext will encode val as a BSON document using Registry r and EncodeContext ec and append the // bytes to dst. If dst is not large enough to hold the bytes, it will be grown. If val is not a type that can be // transformed into a document, MarshalValueAppendWithContext should be used instead. +// +// Deprecated: Use [NewEncoder], pass the dst byte slice (wrapped by a bytes.Buffer) into +// [bsonrw.NewBSONValueWriter], and use the Encoder configuration methods to set the desired marshal +// behavior instead: +// +// buf := bytes.NewBuffer(dst) +// vw, err := bsonrw.NewBSONValueWriter(buf) +// if err != nil { +// panic(err) +// } +// enc, err := bson.NewEncoder(vw) +// if err != nil { +// panic(err) +// } +// enc.IntMinSize() +// +// See [Encoder] for more examples. func MarshalAppendWithContext(ec bsoncodec.EncodeContext, dst []byte, val interface{}) ([]byte, error) { - sw := new(bsonrw.SliceWriter) - *sw = dst + sw := bufPool.Get().(*bytes.Buffer) + defer func() { + // Proper usage of a sync.Pool requires each entry to have approximately + // the same memory cost. To obtain this property when the stored type + // contains a variably-sized buffer, we add a hard limit on the maximum + // buffer to place back in the pool. We limit the size to 16MiB because + // that's the maximum wire message size supported by any current MongoDB + // server. + // + // Comment based on + // https://cs.opensource.google/go/go/+/refs/tags/go1.19:src/fmt/print.go;l=147 + // + // Recycle byte slices that are smaller than 16MiB and at least half + // occupied. + if sw.Cap() < 16*1024*1024 && sw.Cap()/2 < sw.Len() { + bufPool.Put(sw) + } + }() + + sw.Reset() vw := bvwPool.Get(sw) defer bvwPool.Put(vw) @@ -99,7 +210,7 @@ func MarshalAppendWithContext(ec bsoncodec.EncodeContext, dst []byte, val interf return nil, err } - return *sw, nil + return append(dst, sw.Bytes()...), nil } // MarshalValue returns the BSON encoding of val. @@ -112,17 +223,26 @@ func MarshalValue(val interface{}) (bsontype.Type, []byte, error) { // MarshalValueAppend will append the BSON encoding of val to dst. If dst is not large enough to hold the BSON encoding // of val, dst will be grown. +// +// Deprecated: Appending individual BSON elements to an existing slice will not be supported in Go +// Driver 2.0. func MarshalValueAppend(dst []byte, val interface{}) (bsontype.Type, []byte, error) { return MarshalValueAppendWithRegistry(DefaultRegistry, dst, val) } // MarshalValueWithRegistry returns the BSON encoding of val using Registry r. +// +// Deprecated: Using a custom registry to marshal individual BSON values will not be supported in Go +// Driver 2.0. func MarshalValueWithRegistry(r *bsoncodec.Registry, val interface{}) (bsontype.Type, []byte, error) { dst := make([]byte, 0) return MarshalValueAppendWithRegistry(r, dst, val) } // MarshalValueWithContext returns the BSON encoding of val using EncodeContext ec. +// +// Deprecated: Using a custom EncodeContext to marshal individual BSON elements will not be +// supported in Go Driver 2.0. func MarshalValueWithContext(ec bsoncodec.EncodeContext, val interface{}) (bsontype.Type, []byte, error) { dst := make([]byte, 0) return MarshalValueAppendWithContext(ec, dst, val) @@ -130,12 +250,18 @@ func MarshalValueWithContext(ec bsoncodec.EncodeContext, val interface{}) (bsont // MarshalValueAppendWithRegistry will append the BSON encoding of val to dst using Registry r. If dst is not large // enough to hold the BSON encoding of val, dst will be grown. +// +// Deprecated: Appending individual BSON elements to an existing slice will not be supported in Go +// Driver 2.0. func MarshalValueAppendWithRegistry(r *bsoncodec.Registry, dst []byte, val interface{}) (bsontype.Type, []byte, error) { return MarshalValueAppendWithContext(bsoncodec.EncodeContext{Registry: r}, dst, val) } // MarshalValueAppendWithContext will append the BSON encoding of val to dst using EncodeContext ec. If dst is not large // enough to hold the BSON encoding of val, dst will be grown. +// +// Deprecated: Appending individual BSON elements to an existing slice will not be supported in Go +// Driver 2.0. func MarshalValueAppendWithContext(ec bsoncodec.EncodeContext, dst []byte, val interface{}) (bsontype.Type, []byte, error) { // get a ValueWriter configured to write to dst sw := new(bsonrw.SliceWriter) @@ -173,17 +299,63 @@ func MarshalExtJSON(val interface{}, canonical, escapeHTML bool) ([]byte, error) // MarshalExtJSONAppend will append the extended JSON encoding of val to dst. // If dst is not large enough to hold the extended JSON encoding of val, dst // will be grown. +// +// Deprecated: Use [NewEncoder] and pass the dst byte slice (wrapped by a bytes.Buffer) into +// [bsonrw.NewExtJSONValueWriter] instead: +// +// buf := bytes.NewBuffer(dst) +// vw, err := bsonrw.NewExtJSONValueWriter(buf, true, false) +// if err != nil { +// panic(err) +// } +// enc, err := bson.NewEncoder(vw) +// if err != nil { +// panic(err) +// } +// +// See [Encoder] for more examples. func MarshalExtJSONAppend(dst []byte, val interface{}, canonical, escapeHTML bool) ([]byte, error) { return MarshalExtJSONAppendWithRegistry(DefaultRegistry, dst, val, canonical, escapeHTML) } // MarshalExtJSONWithRegistry returns the extended JSON encoding of val using Registry r. +// +// Deprecated: Use [NewEncoder] and specify the Registry by calling [Encoder.SetRegistry] instead: +// +// buf := new(bytes.Buffer) +// vw, err := bsonrw.NewBSONValueWriter(buf) +// if err != nil { +// panic(err) +// } +// enc, err := bson.NewEncoder(vw) +// if err != nil { +// panic(err) +// } +// enc.SetRegistry(reg) +// +// See [Encoder] for more examples. func MarshalExtJSONWithRegistry(r *bsoncodec.Registry, val interface{}, canonical, escapeHTML bool) ([]byte, error) { dst := make([]byte, 0, defaultDstCap) return MarshalExtJSONAppendWithContext(bsoncodec.EncodeContext{Registry: r}, dst, val, canonical, escapeHTML) } // MarshalExtJSONWithContext returns the extended JSON encoding of val using Registry r. +// +// Deprecated: Use [NewEncoder] and use the Encoder configuration methods to set the desired marshal +// behavior instead: +// +// buf := new(bytes.Buffer) +// vw, err := bsonrw.NewBSONValueWriter(buf) +// if err != nil { +// panic(err) +// } +// enc, err := bson.NewEncoder(vw) +// if err != nil { +// panic(err) +// } +// enc.IntMinSize() +// +// See [Encoder] for more examples. func MarshalExtJSONWithContext(ec bsoncodec.EncodeContext, val interface{}, canonical, escapeHTML bool) ([]byte, error) { dst := make([]byte, 0, defaultDstCap) return MarshalExtJSONAppendWithContext(ec, dst, val, canonical, escapeHTML) @@ -192,6 +364,22 @@ func MarshalExtJSONWithContext(ec bsoncodec.EncodeContext, val interface{}, cano // MarshalExtJSONAppendWithRegistry will append the extended JSON encoding of // val to dst using Registry r. If dst is not large enough to hold the BSON // encoding of val, dst will be grown. +// +// Deprecated: Use [NewEncoder], pass the dst byte slice (wrapped by a bytes.Buffer) into +// [bsonrw.NewExtJSONValueWriter], and specify the Registry by calling [Encoder.SetRegistry] +// instead: +// +// buf := bytes.NewBuffer(dst) +// vw, err := bsonrw.NewExtJSONValueWriter(buf, true, false) +// if err != nil { +// panic(err) +// } +// enc, err := bson.NewEncoder(vw) +// if err != nil { +// panic(err) +// } +// +// See [Encoder] for more examples. func MarshalExtJSONAppendWithRegistry(r *bsoncodec.Registry, dst []byte, val interface{}, canonical, escapeHTML bool) ([]byte, error) { return MarshalExtJSONAppendWithContext(bsoncodec.EncodeContext{Registry: r}, dst, val, canonical, escapeHTML) } @@ -199,6 +387,23 @@ func MarshalExtJSONAppendWithRegistry(r *bsoncodec.Registry, dst []byte, val int // MarshalExtJSONAppendWithContext will append the extended JSON encoding of // val to dst using Registry r. If dst is not large enough to hold the BSON // encoding of val, dst will be grown. +// +// Deprecated: Use [NewEncoder], pass the dst byte slice (wrapped by a bytes.Buffer) into +// [bsonrw.NewExtJSONValueWriter], and use the Encoder configuration methods to set the desired marshal +// behavior instead: +// +// buf := bytes.NewBuffer(dst) +// vw, err := bsonrw.NewExtJSONValueWriter(buf, true, false) +// if err != nil { +// panic(err) +// } +// enc, err := bson.NewEncoder(vw) +// if err != nil { +// panic(err) +// } +// enc.IntMinSize() +// +// See [Encoder] for more examples. func MarshalExtJSONAppendWithContext(ec bsoncodec.EncodeContext, dst []byte, val interface{}, canonical, escapeHTML bool) ([]byte, error) { sw := new(bsonrw.SliceWriter) *sw = dst diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go index ba7c9112e..08c39514b 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go @@ -164,9 +164,6 @@ func (d Decimal128) BigInt() (*big.Int, int, error) { // Would be handled by the logic below, but that's trivial and common. if high == 0 && low == 0 && exp == 0 { - if posSign { - return new(big.Int), 0, nil - } return new(big.Int), 0, nil } @@ -328,6 +325,7 @@ func ParseDecimal128(s string) (Decimal128, error) { return dErr(s) } + // Parse the significand (i.e. the non-exponent part) as a big.Int. bi, ok := new(big.Int).SetString(intPart+decPart, 10) if !ok { return dErr(s) @@ -360,6 +358,19 @@ func ParseDecimal128FromBigInt(bi *big.Int, exp int) (Decimal128, bool) { q := new(big.Int) r := new(big.Int) + // If the significand is zero, the logical value will always be zero, independent of the + // exponent. However, the loops for handling out-of-range exponent values below may be extremely + // slow for zero values because the significand never changes. Limit the exponent value to the + // supported range here to prevent entering the loops below. + if bi.Cmp(zero) == 0 { + if exp > MaxDecimal128Exp { + exp = MaxDecimal128Exp + } + if exp < MinDecimal128Exp { + exp = MinDecimal128Exp + } + } + for bigIntCmpAbs(bi, maxS) == 1 { bi, _ = q.QuoRem(bi, ten, r) if r.Cmp(zero) != 0 { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go index ded367316..c130e3ff1 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go @@ -82,18 +82,18 @@ func ObjectIDFromHex(s string) (ObjectID, error) { return NilObjectID, ErrInvalidHex } - b, err := hex.DecodeString(s) + var oid [12]byte + _, err := hex.Decode(oid[:], []byte(s)) if err != nil { return NilObjectID, err } - var oid [12]byte - copy(oid[:], b) - return oid, nil } // IsValidObjectID returns true if the provided hex string represents a valid ObjectID and false if not. +// +// Deprecated: Use ObjectIDFromHex and check the error instead. func IsValidObjectID(s string) bool { _, err := ObjectIDFromHex(s) return err == nil @@ -183,7 +183,7 @@ func processUniqueBytes() [5]byte { var b [5]byte _, err := io.ReadFull(rand.Reader, b[:]) if err != nil { - panic(fmt.Errorf("cannot initialize objectid package with crypto.rand.Reader: %v", err)) + panic(fmt.Errorf("cannot initialize objectid package with crypto.rand.Reader: %w", err)) } return b @@ -193,7 +193,7 @@ func readRandomUint32() uint32 { var b [4]byte _, err := io.ReadFull(rand.Reader, b[:]) if err != nil { - panic(fmt.Errorf("cannot initialize objectid package with crypto.rand.Reader: %v", err)) + panic(fmt.Errorf("cannot initialize objectid package with crypto.rand.Reader: %w", err)) } return (uint32(b[0]) << 0) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go index c72ccc1c4..65f4fbb94 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go @@ -45,7 +45,7 @@ var _ json.Unmarshaler = (*DateTime)(nil) // MarshalJSON marshal to time type. func (d DateTime) MarshalJSON() ([]byte, error) { - return json.Marshal(d.Time()) + return json.Marshal(d.Time().UTC()) } // UnmarshalJSON creates a primitive.DateTime from a JSON string. @@ -141,6 +141,16 @@ type Timestamp struct { I uint32 } +// After reports whether the time instant tp is after tp2. +func (tp Timestamp) After(tp2 Timestamp) bool { + return tp.T > tp2.T || (tp.T == tp2.T && tp.I > tp2.I) +} + +// Before reports whether the time instant tp is before tp2. +func (tp Timestamp) Before(tp2 Timestamp) bool { + return tp.T < tp2.T || (tp.T == tp2.T && tp.I < tp2.I) +} + // Equal compares tp to tp2 and returns true if they are equal. func (tp Timestamp) Equal(tp2 Timestamp) bool { return tp.T == tp2.T && tp.I == tp2.I @@ -151,24 +161,25 @@ func (tp Timestamp) IsZero() bool { return tp.T == 0 && tp.I == 0 } -// CompareTimestamp returns an integer comparing two Timestamps, where T is compared first, followed by I. -// Returns 0 if tp = tp2, 1 if tp > tp2, -1 if tp < tp2. -func CompareTimestamp(tp, tp2 Timestamp) int { - if tp.Equal(tp2) { +// Compare compares the time instant tp with tp2. If tp is before tp2, it returns -1; if tp is after +// tp2, it returns +1; if they're the same, it returns 0. +func (tp Timestamp) Compare(tp2 Timestamp) int { + switch { + case tp.Equal(tp2): return 0 - } - - if tp.T > tp2.T { - return 1 - } - if tp.T < tp2.T { + case tp.Before(tp2): return -1 + default: + return +1 } - // Compare I values because T values are equal - if tp.I > tp2.I { - return 1 - } - return -1 +} + +// CompareTimestamp compares the time instant tp with tp2. If tp is before tp2, it returns -1; if tp is after +// tp2, it returns +1; if they're the same, it returns 0. +// +// Deprecated: Use Timestamp.Compare instead. +func CompareTimestamp(tp, tp2 Timestamp) int { + return tp.Compare(tp2) } // MinKey represents the BSON minkey value. @@ -186,6 +197,9 @@ type MaxKey struct{} type D []E // Map creates a map from the elements of the D. +// +// Deprecated: Converting directly from a D to an M will not be supported in Go Driver 2.0. Instead, +// users should marshal the D to BSON using bson.Marshal and unmarshal it to M using bson.Unmarshal. func (d D) Map() M { m := make(M, len(d)) for _, e := range d { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go index 1cbe3884d..ff32a87a7 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go @@ -8,6 +8,7 @@ package bson import ( "errors" + "fmt" "reflect" "go.mongodb.org/mongo-driver/bson/bsoncodec" @@ -21,10 +22,16 @@ var primitiveCodecs PrimitiveCodecs // PrimitiveCodecs is a namespace for all of the default bsoncodec.Codecs for the primitive types // defined in this package. +// +// Deprecated: Use bson.NewRegistry to get a registry with all primitive encoders and decoders +// registered. type PrimitiveCodecs struct{} // RegisterPrimitiveCodecs will register the encode and decode methods attached to PrimitiveCodecs // with the provided RegistryBuilder. if rb is nil, a new empty RegistryBuilder will be created. +// +// Deprecated: Use bson.NewRegistry to get a registry with all primitive encoders and decoders +// registered. func (pc PrimitiveCodecs) RegisterPrimitiveCodecs(rb *bsoncodec.RegistryBuilder) { if rb == nil { panic(errors.New("argument to RegisterPrimitiveCodecs must not be nil")) @@ -38,18 +45,35 @@ func (pc PrimitiveCodecs) RegisterPrimitiveCodecs(rb *bsoncodec.RegistryBuilder) } // RawValueEncodeValue is the ValueEncoderFunc for RawValue. -func (PrimitiveCodecs) RawValueEncodeValue(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// If the RawValue's Type is "invalid" and the RawValue's Value is not empty or +// nil, then this method will return an error. +// +// Deprecated: Use bson.NewRegistry to get a registry with all primitive +// encoders and decoders registered. +func (PrimitiveCodecs) RawValueEncodeValue(_ bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tRawValue { - return bsoncodec.ValueEncoderError{Name: "RawValueEncodeValue", Types: []reflect.Type{tRawValue}, Received: val} + return bsoncodec.ValueEncoderError{ + Name: "RawValueEncodeValue", + Types: []reflect.Type{tRawValue}, + Received: val, + } } rawvalue := val.Interface().(RawValue) + if !rawvalue.Type.IsValid() { + return fmt.Errorf("the RawValue Type specifies an invalid BSON type: %#x", byte(rawvalue.Type)) + } + return bsonrw.Copier{}.CopyValueFromBytes(vw, rawvalue.Type, rawvalue.Value) } // RawValueDecodeValue is the ValueDecoderFunc for RawValue. -func (PrimitiveCodecs) RawValueDecodeValue(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { +// +// Deprecated: Use bson.NewRegistry to get a registry with all primitive encoders and decoders +// registered. +func (PrimitiveCodecs) RawValueDecodeValue(_ bsoncodec.DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tRawValue { return bsoncodec.ValueDecoderError{Name: "RawValueDecodeValue", Types: []reflect.Type{tRawValue}, Received: val} } @@ -64,7 +88,10 @@ func (PrimitiveCodecs) RawValueDecodeValue(dc bsoncodec.DecodeContext, vr bsonrw } // RawEncodeValue is the ValueEncoderFunc for Reader. -func (PrimitiveCodecs) RawEncodeValue(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { +// +// Deprecated: Use bson.NewRegistry to get a registry with all primitive encoders and decoders +// registered. +func (PrimitiveCodecs) RawEncodeValue(_ bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { if !val.IsValid() || val.Type() != tRaw { return bsoncodec.ValueEncoderError{Name: "RawEncodeValue", Types: []reflect.Type{tRaw}, Received: val} } @@ -75,7 +102,10 @@ func (PrimitiveCodecs) RawEncodeValue(ec bsoncodec.EncodeContext, vw bsonrw.Valu } // RawDecodeValue is the ValueDecoderFunc for Reader. -func (PrimitiveCodecs) RawDecodeValue(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { +// +// Deprecated: Use bson.NewRegistry to get a registry with all primitive encoders and decoders +// registered. +func (PrimitiveCodecs) RawDecodeValue(_ bsoncodec.DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if !val.CanSet() || val.Type() != tRaw { return bsoncodec.ValueDecoderError{Name: "RawDecodeValue", Types: []reflect.Type{tRaw}, Received: val} } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/raw.go b/vendor/go.mongodb.org/mongo-driver/bson/raw.go index efd705daa..130da61ba 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/raw.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/raw.go @@ -16,18 +16,27 @@ import ( // ErrNilReader indicates that an operation was attempted on a nil bson.Reader. var ErrNilReader = errors.New("nil reader") -// Raw is a wrapper around a byte slice. It will interpret the slice as a -// BSON document. This type is a wrapper around a bsoncore.Document. Errors returned from the -// methods on this type and associated types come from the bsoncore package. +// Raw is a raw encoded BSON document. It can be used to delay BSON document decoding or precompute +// a BSON encoded document. +// +// A Raw must be a full BSON document. Use the RawValue type for individual BSON values. type Raw []byte -// NewFromIOReader reads in a document from the given io.Reader and constructs a Raw from -// it. -func NewFromIOReader(r io.Reader) (Raw, error) { +// ReadDocument reads a BSON document from the io.Reader and returns it as a bson.Raw. If the +// reader contains multiple BSON documents, only the first document is read. +func ReadDocument(r io.Reader) (Raw, error) { doc, err := bsoncore.NewDocumentFromReader(r) return Raw(doc), err } +// NewFromIOReader reads a BSON document from the io.Reader and returns it as a bson.Raw. If the +// reader contains multiple BSON documents, only the first document is read. +// +// Deprecated: Use ReadDocument instead. +func NewFromIOReader(r io.Reader) (Raw, error) { + return ReadDocument(r) +} + // Validate validates the document. This method only validates the first document in // the slice, to validate other documents, the slice must be resliced. func (r Raw) Validate() (err error) { return bsoncore.Document(r).Validate() } @@ -51,12 +60,19 @@ func (r Raw) LookupErr(key ...string) (RawValue, error) { // elements. If the document is not valid, the elements up to the invalid point will be returned // along with an error. func (r Raw) Elements() ([]RawElement, error) { - elems, err := bsoncore.Document(r).Elements() + doc := bsoncore.Document(r) + if len(doc) == 0 { + return nil, nil + } + elems, err := doc.Elements() + if err != nil { + return nil, err + } relems := make([]RawElement, 0, len(elems)) for _, elem := range elems { relems = append(relems, RawElement(elem)) } - return relems, err + return relems, nil } // Values returns this document as a slice of values. The returned slice will contain valid values. @@ -81,5 +97,5 @@ func (r Raw) IndexErr(index uint) (RawElement, error) { return RawElement(elem), err } -// String implements the fmt.Stringer interface. +// String returns the BSON document encoded as Extended JSON. func (r Raw) String() string { return bsoncore.Document(r).String() } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/raw_element.go b/vendor/go.mongodb.org/mongo-driver/bson/raw_element.go index 006f503a3..8ce13c2cc 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/raw_element.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/raw_element.go @@ -10,10 +10,7 @@ import ( "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" ) -// RawElement represents a BSON element in byte form. This type provides a simple way to -// transform a slice of bytes into a BSON element and extract information from it. -// -// RawElement is a thin wrapper around a bsoncore.Element. +// RawElement is a raw encoded BSON document or array element. type RawElement []byte // Key returns the key for this element. If the element is not valid, this method returns an empty @@ -36,7 +33,7 @@ func (re RawElement) ValueErr() (RawValue, error) { // Validate ensures re is a valid BSON element. func (re RawElement) Validate() error { return bsoncore.Element(re).Validate() } -// String implements the fmt.Stringer interface. The output will be in extended JSON format. +// String returns the BSON element encoded as Extended JSON. func (re RawElement) String() string { doc := bsoncore.BuildDocument(nil, re) j, err := MarshalExtJSON(Raw(doc), true, false) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go b/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go index 75297f30f..4d1bfb316 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go @@ -26,11 +26,10 @@ var ErrNilContext = errors.New("DecodeContext cannot be nil") // ErrNilRegistry is returned when the provided registry is nil. var ErrNilRegistry = errors.New("Registry cannot be nil") -// RawValue represents a BSON value in byte form. It can be used to hold unprocessed BSON or to -// defer processing of BSON. Type is the BSON type of the value and Value are the raw bytes that -// represent the element. +// RawValue is a raw encoded BSON value. It can be used to delay BSON value decoding or precompute +// BSON encoded value. Type is the BSON type of the value and Value is the raw encoded BSON value. // -// This type wraps bsoncore.Value for most of it's functionality. +// A RawValue must be an individual BSON value. Use the Raw type for full BSON documents. type RawValue struct { Type bsontype.Type Value []byte @@ -38,6 +37,12 @@ type RawValue struct { r *bsoncodec.Registry } +// IsZero reports whether the RawValue is zero, i.e. no data is present on +// the RawValue. It returns true if Type is 0 and Value is empty or nil. +func (rv RawValue) IsZero() bool { + return rv.Type == 0x00 && len(rv.Value) == 0 +} + // Unmarshal deserializes BSON into the provided val. If RawValue cannot be unmarshaled into val, an // error is returned. This method will use the registry used to create the RawValue, if the RawValue // was created from partial BSON processing, or it will use the default registry. Users wishing to @@ -268,10 +273,16 @@ func (rv RawValue) Int32OK() (int32, bool) { return convertToCoreValue(rv).Int32 // AsInt32 returns a BSON number as an int32. If the BSON type is not a numeric one, this method // will panic. +// +// Deprecated: Use AsInt64 instead. If an int32 is required, convert the returned value to an int32 +// and perform any required overflow/underflow checking. func (rv RawValue) AsInt32() int32 { return convertToCoreValue(rv).AsInt32() } // AsInt32OK is the same as AsInt32, except that it returns a boolean instead of // panicking. +// +// Deprecated: Use AsInt64OK instead. If an int32 is required, convert the returned value to an +// int32 and perform any required overflow/underflow checking. func (rv RawValue) AsInt32OK() (int32, bool) { return convertToCoreValue(rv).AsInt32OK() } // Timestamp returns the BSON timestamp value the Value represents. It panics if the value is a diff --git a/vendor/go.mongodb.org/mongo-driver/bson/registry.go b/vendor/go.mongodb.org/mongo-driver/bson/registry.go index 16d7573e7..b5b0f3568 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/registry.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/registry.go @@ -6,15 +6,19 @@ package bson -import "go.mongodb.org/mongo-driver/bson/bsoncodec" +import ( + "go.mongodb.org/mongo-driver/bson/bsoncodec" +) // DefaultRegistry is the default bsoncodec.Registry. It contains the default codecs and the // primitive codecs. -var DefaultRegistry = NewRegistryBuilder().Build() +var DefaultRegistry = NewRegistry() // NewRegistryBuilder creates a new RegistryBuilder configured with the default encoders and // decoders from the bsoncodec.DefaultValueEncoders and bsoncodec.DefaultValueDecoders types and the // PrimitiveCodecs type in this package. +// +// Deprecated: Use NewRegistry instead. func NewRegistryBuilder() *bsoncodec.RegistryBuilder { rb := bsoncodec.NewRegistryBuilder() bsoncodec.DefaultValueEncoders{}.RegisterDefaultEncoders(rb) @@ -22,3 +26,10 @@ func NewRegistryBuilder() *bsoncodec.RegistryBuilder { primitiveCodecs.RegisterPrimitiveCodecs(rb) return rb } + +// NewRegistry creates a new Registry configured with the default encoders and decoders from the +// bsoncodec.DefaultValueEncoders and bsoncodec.DefaultValueDecoders types and the PrimitiveCodecs +// type in this package. +func NewRegistry() *bsoncodec.Registry { + return NewRegistryBuilder().Build() +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/types.go b/vendor/go.mongodb.org/mongo-driver/bson/types.go index 13a1c35cf..ef3981246 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/types.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/types.go @@ -10,7 +10,7 @@ import ( "go.mongodb.org/mongo-driver/bson/bsontype" ) -// These constants uniquely refer to each BSON type. +// BSON element types as described in https://bsonspec.org/spec.html. const ( TypeDouble = bsontype.Double TypeString = bsontype.String @@ -34,3 +34,17 @@ const ( TypeMinKey = bsontype.MinKey TypeMaxKey = bsontype.MaxKey ) + +// BSON binary element subtypes as described in https://bsonspec.org/spec.html. +const ( + TypeBinaryGeneric = bsontype.BinaryGeneric + TypeBinaryFunction = bsontype.BinaryFunction + TypeBinaryBinaryOld = bsontype.BinaryBinaryOld + TypeBinaryUUIDOld = bsontype.BinaryUUIDOld + TypeBinaryUUID = bsontype.BinaryUUID + TypeBinaryMD5 = bsontype.BinaryMD5 + TypeBinaryEncrypted = bsontype.BinaryEncrypted + TypeBinaryColumn = bsontype.BinaryColumn + TypeBinarySensitive = bsontype.BinarySensitive + TypeBinaryUserDefined = bsontype.BinaryUserDefined +) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go b/vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go index f936ba183..66da17ee0 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go @@ -14,18 +14,26 @@ import ( "go.mongodb.org/mongo-driver/bson/bsontype" ) -// Unmarshaler is an interface implemented by types that can unmarshal a BSON -// document representation of themselves. The BSON bytes can be assumed to be -// valid. UnmarshalBSON must copy the BSON bytes if it wishes to retain the data -// after returning. +// Unmarshaler is the interface implemented by types that can unmarshal a BSON +// document representation of themselves. The input can be assumed to be a valid +// encoding of a BSON document. UnmarshalBSON must copy the JSON data if it +// wishes to retain the data after returning. +// +// Unmarshaler is only used to unmarshal full BSON documents. To create custom +// BSON unmarshaling behavior for individual values in a BSON document, +// implement the ValueUnmarshaler interface instead. type Unmarshaler interface { UnmarshalBSON([]byte) error } -// ValueUnmarshaler is an interface implemented by types that can unmarshal a -// BSON value representation of themselves. The BSON bytes and type can be -// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it -// wishes to retain the data after returning. +// ValueUnmarshaler is the interface implemented by types that can unmarshal a +// BSON value representation of themselves. The input can be assumed to be a +// valid encoding of a BSON value. UnmarshalBSONValue must copy the BSON value +// bytes if it wishes to retain the data after returning. +// +// ValueUnmarshaler is only used to unmarshal individual values in a BSON +// document. To create custom BSON unmarshaling behavior for an entire BSON +// document, implement the Unmarshaler interface instead. type ValueUnmarshaler interface { UnmarshalBSONValue(bsontype.Type, []byte) error } @@ -40,6 +48,16 @@ func Unmarshal(data []byte, val interface{}) error { // UnmarshalWithRegistry parses the BSON-encoded data using Registry r and // stores the result in the value pointed to by val. If val is nil or not // a pointer, UnmarshalWithRegistry returns InvalidUnmarshalError. +// +// Deprecated: Use [NewDecoder] and specify the Registry by calling [Decoder.SetRegistry] instead: +// +// dec, err := bson.NewDecoder(bsonrw.NewBSONDocumentReader(data)) +// if err != nil { +// panic(err) +// } +// dec.SetRegistry(reg) +// +// See [Decoder] for more examples. func UnmarshalWithRegistry(r *bsoncodec.Registry, data []byte, val interface{}) error { vr := bsonrw.NewBSONDocumentReader(data) return unmarshalFromReader(bsoncodec.DecodeContext{Registry: r}, vr, val) @@ -48,11 +66,40 @@ func UnmarshalWithRegistry(r *bsoncodec.Registry, data []byte, val interface{}) // UnmarshalWithContext parses the BSON-encoded data using DecodeContext dc and // stores the result in the value pointed to by val. If val is nil or not // a pointer, UnmarshalWithRegistry returns InvalidUnmarshalError. +// +// Deprecated: Use [NewDecoder] and use the Decoder configuration methods to set the desired unmarshal +// behavior instead: +// +// dec, err := bson.NewDecoder(bsonrw.NewBSONDocumentReader(data)) +// if err != nil { +// panic(err) +// } +// dec.DefaultDocumentM() +// +// See [Decoder] for more examples. func UnmarshalWithContext(dc bsoncodec.DecodeContext, data []byte, val interface{}) error { vr := bsonrw.NewBSONDocumentReader(data) return unmarshalFromReader(dc, vr, val) } +// UnmarshalValue parses the BSON value of type t with bson.DefaultRegistry and +// stores the result in the value pointed to by val. If val is nil or not a pointer, +// UnmarshalValue returns an error. +func UnmarshalValue(t bsontype.Type, data []byte, val interface{}) error { + return UnmarshalValueWithRegistry(DefaultRegistry, t, data, val) +} + +// UnmarshalValueWithRegistry parses the BSON value of type t with registry r and +// stores the result in the value pointed to by val. If val is nil or not a pointer, +// UnmarshalValue returns an error. +// +// Deprecated: Using a custom registry to unmarshal individual BSON values will not be supported in +// Go Driver 2.0. +func UnmarshalValueWithRegistry(r *bsoncodec.Registry, t bsontype.Type, data []byte, val interface{}) error { + vr := bsonrw.NewBSONValueReader(t, data) + return unmarshalFromReader(bsoncodec.DecodeContext{Registry: r}, vr, val) +} + // UnmarshalExtJSON parses the extended JSON-encoded data and stores the result // in the value pointed to by val. If val is nil or not a pointer, Unmarshal // returns InvalidUnmarshalError. @@ -63,6 +110,20 @@ func UnmarshalExtJSON(data []byte, canonical bool, val interface{}) error { // UnmarshalExtJSONWithRegistry parses the extended JSON-encoded data using // Registry r and stores the result in the value pointed to by val. If val is // nil or not a pointer, UnmarshalWithRegistry returns InvalidUnmarshalError. +// +// Deprecated: Use [NewDecoder] and specify the Registry by calling [Decoder.SetRegistry] instead: +// +// vr, err := bsonrw.NewExtJSONValueReader(bytes.NewReader(data), true) +// if err != nil { +// panic(err) +// } +// dec, err := bson.NewDecoder(vr) +// if err != nil { +// panic(err) +// } +// dec.SetRegistry(reg) +// +// See [Decoder] for more examples. func UnmarshalExtJSONWithRegistry(r *bsoncodec.Registry, data []byte, canonical bool, val interface{}) error { ejvr, err := bsonrw.NewExtJSONValueReader(bytes.NewReader(data), canonical) if err != nil { @@ -75,6 +136,21 @@ func UnmarshalExtJSONWithRegistry(r *bsoncodec.Registry, data []byte, canonical // UnmarshalExtJSONWithContext parses the extended JSON-encoded data using // DecodeContext dc and stores the result in the value pointed to by val. If val is // nil or not a pointer, UnmarshalWithRegistry returns InvalidUnmarshalError. +// +// Deprecated: Use [NewDecoder] and use the Decoder configuration methods to set the desired unmarshal +// behavior instead: +// +// vr, err := bsonrw.NewExtJSONValueReader(bytes.NewReader(data), true) +// if err != nil { +// panic(err) +// } +// dec, err := bson.NewDecoder(vr) +// if err != nil { +// panic(err) +// } +// dec.DefaultDocumentM() +// +// See [Decoder] for more examples. func UnmarshalExtJSONWithContext(dc bsoncodec.DecodeContext, data []byte, canonical bool, val interface{}) error { ejvr, err := bsonrw.NewExtJSONValueReader(bytes.NewReader(data), canonical) if err != nil { diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/array.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/array.go index 8ea60ba3c..6bc0afa70 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/array.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/array.go @@ -7,10 +7,10 @@ package bsoncore import ( - "bytes" "fmt" "io" "strconv" + "strings" ) // NewArrayLengthError creates and returns an error for when the length of an array exceeds the @@ -53,7 +53,7 @@ func (a Array) DebugString() string { if len(a) < 5 { return "" } - var buf bytes.Buffer + var buf strings.Builder buf.WriteString("Array") length, rem, _ := ReadLength(a) // We know we have enough bytes to read the length buf.WriteByte('(') @@ -69,7 +69,7 @@ func (a Array) DebugString() string { buf.WriteString(fmt.Sprintf("", length)) break } - fmt.Fprintf(&buf, "%s", elem.Value().DebugString()) + buf.WriteString(elem.Value().DebugString()) if length != 1 { buf.WriteByte(',') } @@ -85,7 +85,7 @@ func (a Array) String() string { if len(a) < 5 { return "" } - var buf bytes.Buffer + var buf strings.Builder buf.WriteByte('[') length, rem, _ := ReadLength(a) // We know we have enough bytes to read the length @@ -100,7 +100,7 @@ func (a Array) String() string { if !ok { return "" } - fmt.Fprintf(&buf, "%s", elem.Value().String()) + buf.WriteString(elem.Value().String()) if length > 1 { buf.WriteByte(',') } diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go index 17aad6d71..88133293e 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go @@ -4,25 +4,6 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// Package bsoncore contains functions that can be used to encode and decode BSON -// elements and values to or from a slice of bytes. These functions are aimed at -// allowing low level manipulation of BSON and can be used to build a higher -// level BSON library. -// -// The Read* functions within this package return the values of the element and -// a boolean indicating if the values are valid. A boolean was used instead of -// an error because any error that would be returned would be the same: not -// enough bytes. This library attempts to do no validation, it will only return -// false if there are not enough bytes for an item to be read. For example, the -// ReadDocument function checks the length, if that length is larger than the -// number of bytes available, it will return false, if there are enough bytes, it -// will return those bytes and true. It is the consumers responsibility to -// validate those bytes. -// -// The Append* functions within this package will append the type value to the -// given dst slice. If the slice has enough capacity, it will not grow the -// slice. The Append*Element functions within this package operate in the same -// way, but additionally append the BSON type and the key before the value. package bsoncore // import "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" import ( @@ -254,7 +235,7 @@ func BuildDocumentValue(elems ...[]byte) Value { return Value{Type: bsontype.EmbeddedDocument, Data: BuildDocument(nil, elems...)} } -// BuildDocumentElement will append a BSON embedded document elemnt using key and the provided +// BuildDocumentElement will append a BSON embedded document element using key and the provided // elements and return the extended buffer. func BuildDocumentElement(dst []byte, key string, elems ...[]byte) []byte { return BuildDocument(AppendHeader(dst, bsontype.EmbeddedDocument, key), elems...) @@ -844,6 +825,9 @@ func readLengthBytes(src []byte) ([]byte, []byte, bool) { if !ok { return nil, src, false } + if l < 4 { + return nil, src, false + } if len(src) < int(l) { return nil, src, false } diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/doc.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/doc.go new file mode 100644 index 000000000..6837b53fc --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/doc.go @@ -0,0 +1,29 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package bsoncore contains functions that can be used to encode and decode BSON +// elements and values to or from a slice of bytes. These functions are aimed at +// allowing low level manipulation of BSON and can be used to build a higher +// level BSON library. +// +// The Read* functions within this package return the values of the element and +// a boolean indicating if the values are valid. A boolean was used instead of +// an error because any error that would be returned would be the same: not +// enough bytes. This library attempts to do no validation, it will only return +// false if there are not enough bytes for an item to be read. For example, the +// ReadDocument function checks the length, if that length is larger than the +// number of bytes available, it will return false, if there are enough bytes, it +// will return those bytes and true. It is the consumers responsibility to +// validate those bytes. +// +// The Append* functions within this package will append the type value to the +// given dst slice. If the slice has enough capacity, it will not grow the +// slice. The Append*Element functions within this package operate in the same +// way, but additionally append the BSON type and the key before the value. +// +// Warning: Package bsoncore is unstable and there is no backward compatibility +// guarantee. It is experimental and subject to change. +package bsoncore diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document.go index d6e4bb069..3f360f1ae 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document.go @@ -7,11 +7,11 @@ package bsoncore import ( - "bytes" "errors" "fmt" "io" "strconv" + "strings" "go.mongodb.org/mongo-driver/bson/bsontype" ) @@ -237,7 +237,7 @@ func (d Document) DebugString() string { if len(d) < 5 { return "" } - var buf bytes.Buffer + var buf strings.Builder buf.WriteString("Document") length, rem, _ := ReadLength(d) // We know we have enough bytes to read the length buf.WriteByte('(') @@ -253,7 +253,7 @@ func (d Document) DebugString() string { buf.WriteString(fmt.Sprintf("", length)) break } - fmt.Fprintf(&buf, "%s ", elem.DebugString()) + buf.WriteString(elem.DebugString()) } buf.WriteByte('}') @@ -266,7 +266,7 @@ func (d Document) String() string { if len(d) < 5 { return "" } - var buf bytes.Buffer + var buf strings.Builder buf.WriteByte('{') length, rem, _ := ReadLength(d) // We know we have enough bytes to read the length @@ -285,7 +285,7 @@ func (d Document) String() string { if !ok { return "" } - fmt.Fprintf(&buf, "%s", elem.String()) + buf.WriteString(elem.String()) first = false } buf.WriteByte('}') diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/element.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/element.go index 3acb4222b..1fe0897c9 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/element.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/element.go @@ -129,7 +129,7 @@ func (e Element) String() string { if !valid { return "" } - return fmt.Sprintf(`"%s": %v`, key, val) + return "\"" + string(key) + "\": " + val.String() } // DebugString outputs a human readable version of RawElement. It will attempt to stringify the diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go index 789d2b982..69c1f9edb 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go @@ -59,8 +59,6 @@ func (v Value) IsNumber() bool { // AsInt32 returns a BSON number as an int32. If the BSON type is not a numeric one, this method // will panic. -// -// TODO(skriptble): Add support for Decimal128. func (v Value) AsInt32() int32 { if !v.IsNumber() { panic(ElementTypeError{"bsoncore.Value.AsInt32", v.Type}) @@ -93,8 +91,6 @@ func (v Value) AsInt32() int32 { // AsInt32OK functions the same as AsInt32 but returns a boolean instead of panicking. False // indicates an error. -// -// TODO(skriptble): Add support for Decimal128. func (v Value) AsInt32OK() (int32, bool) { if !v.IsNumber() { return 0, false @@ -127,8 +123,6 @@ func (v Value) AsInt32OK() (int32, bool) { // AsInt64 returns a BSON number as an int64. If the BSON type is not a numeric one, this method // will panic. -// -// TODO(skriptble): Add support for Decimal128. func (v Value) AsInt64() int64 { if !v.IsNumber() { panic(ElementTypeError{"bsoncore.Value.AsInt64", v.Type}) @@ -162,8 +156,6 @@ func (v Value) AsInt64() int64 { // AsInt64OK functions the same as AsInt64 but returns a boolean instead of panicking. False // indicates an error. -// -// TODO(skriptble): Add support for Decimal128. func (v Value) AsInt64OK() (int64, bool) { if !v.IsNumber() { return 0, false @@ -198,21 +190,14 @@ func (v Value) AsInt64OK() (int64, bool) { // AsFloat64 returns a BSON number as an float64. If the BSON type is not a numeric one, this method // will panic. // -// TODO(skriptble): Add support for Decimal128. -func (v Value) AsFloat64() float64 { return 0 } +// TODO(GODRIVER-2751): Implement AsFloat64. +// func (v Value) AsFloat64() float64 // AsFloat64OK functions the same as AsFloat64 but returns a boolean instead of panicking. False // indicates an error. // -// TODO(skriptble): Add support for Decimal128. -func (v Value) AsFloat64OK() (float64, bool) { return 0, false } - -// Add will add this value to another. This is currently only implemented for strings and numbers. -// If either value is a string, the other type is coerced into a string and added to the other. -// -// This method will alter v and will attempt to reuse the []byte of v. If the []byte is too small, -// it will be expanded. -func (v *Value) Add(v2 Value) error { return nil } +// TODO(GODRIVER-2751): Implement AsFloat64OK. +// func (v Value) AsFloat64OK() (float64, bool) // Equal compaes v to v2 and returns true if they are equal. func (v Value) Equal(v2 Value) bool { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go new file mode 100644 index 000000000..92b8cf73c --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go @@ -0,0 +1,61 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "io" + "net/http" + "net/url" + "strings" +) + +// DefaultClient is the default Client and is used by Get, Head, Post and PostForm. +// Please be careful of intitialization order - for example, if you change +// the global propagator, the DefaultClient might still be using the old one. +var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)} + +// Get is a convenient replacement for http.Get that adds a span around the request. +func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) { + req, err := http.NewRequestWithContext(ctx, "GET", targetURL, nil) + if err != nil { + return nil, err + } + return DefaultClient.Do(req) +} + +// Head is a convenient replacement for http.Head that adds a span around the request. +func Head(ctx context.Context, targetURL string) (resp *http.Response, err error) { + req, err := http.NewRequestWithContext(ctx, "HEAD", targetURL, nil) + if err != nil { + return nil, err + } + return DefaultClient.Do(req) +} + +// Post is a convenient replacement for http.Post that adds a span around the request. +func Post(ctx context.Context, targetURL, contentType string, body io.Reader) (resp *http.Response, err error) { + req, err := http.NewRequestWithContext(ctx, "POST", targetURL, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", contentType) + return DefaultClient.Do(req) +} + +// PostForm is a convenient replacement for http.PostForm that adds a span around the request. +func PostForm(ctx context.Context, targetURL string, data url.Values) (resp *http.Response, err error) { + return Post(ctx, targetURL, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go new file mode 100644 index 000000000..303e5505e --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go @@ -0,0 +1,46 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "net/http" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// Attribute keys that can be added to a span. +const ( + ReadBytesKey = attribute.Key("http.read_bytes") // if anything was read from the request body, the total number of bytes read + ReadErrorKey = attribute.Key("http.read_error") // If an error occurred while reading a request, the string of the error (io.EOF is not recorded) + WroteBytesKey = attribute.Key("http.wrote_bytes") // if anything was written to the response writer, the total number of bytes written + WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded) +) + +// Server HTTP metrics. +const ( + RequestCount = "http.server.request_count" // Incoming request count total + RequestContentLength = "http.server.request_content_length" // Incoming request bytes total + ResponseContentLength = "http.server.response_content_length" // Incoming response bytes total + ServerLatency = "http.server.duration" // Incoming end to end duration, microseconds +) + +// Filter is a predicate used to determine whether a given http.request should +// be traced. A Filter must return true if the request should be traced. +type Filter func(*http.Request) bool + +func newTracer(tp trace.TracerProvider) trace.Tracer { + return tp.Tracer(instrumentationName, trace.WithInstrumentationVersion(Version())) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go new file mode 100644 index 000000000..e4fa1b8d9 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go @@ -0,0 +1,208 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "net/http" + "net/http/httptrace" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" +) + +const ( + instrumentationName = "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" +) + +// config represents the configuration options available for the http.Handler +// and http.Transport types. +type config struct { + ServerName string + Tracer trace.Tracer + Meter metric.Meter + Propagators propagation.TextMapPropagator + SpanStartOptions []trace.SpanStartOption + PublicEndpoint bool + PublicEndpointFn func(*http.Request) bool + ReadEvent bool + WriteEvent bool + Filters []Filter + SpanNameFormatter func(string, *http.Request) string + ClientTrace func(context.Context) *httptrace.ClientTrace + + TracerProvider trace.TracerProvider + MeterProvider metric.MeterProvider +} + +// Option interface used for setting optional config properties. +type Option interface { + apply(*config) +} + +type optionFunc func(*config) + +func (o optionFunc) apply(c *config) { + o(c) +} + +// newConfig creates a new config struct and applies opts to it. +func newConfig(opts ...Option) *config { + c := &config{ + Propagators: otel.GetTextMapPropagator(), + MeterProvider: otel.GetMeterProvider(), + } + for _, opt := range opts { + opt.apply(c) + } + + // Tracer is only initialized if manually specified. Otherwise, can be passed with the tracing context. + if c.TracerProvider != nil { + c.Tracer = newTracer(c.TracerProvider) + } + + c.Meter = c.MeterProvider.Meter( + instrumentationName, + metric.WithInstrumentationVersion(Version()), + ) + + return c +} + +// WithTracerProvider specifies a tracer provider to use for creating a tracer. +// If none is specified, the global provider is used. +func WithTracerProvider(provider trace.TracerProvider) Option { + return optionFunc(func(cfg *config) { + if provider != nil { + cfg.TracerProvider = provider + } + }) +} + +// WithMeterProvider specifies a meter provider to use for creating a meter. +// If none is specified, the global provider is used. +func WithMeterProvider(provider metric.MeterProvider) Option { + return optionFunc(func(cfg *config) { + if provider != nil { + cfg.MeterProvider = provider + } + }) +} + +// WithPublicEndpoint configures the Handler to link the span with an incoming +// span context. If this option is not provided, then the association is a child +// association instead of a link. +func WithPublicEndpoint() Option { + return optionFunc(func(c *config) { + c.PublicEndpoint = true + }) +} + +// WithPublicEndpointFn runs with every request, and allows conditionnally +// configuring the Handler to link the span with an incoming span context. If +// this option is not provided or returns false, then the association is a +// child association instead of a link. +// Note: WithPublicEndpoint takes precedence over WithPublicEndpointFn. +func WithPublicEndpointFn(fn func(*http.Request) bool) Option { + return optionFunc(func(c *config) { + c.PublicEndpointFn = fn + }) +} + +// WithPropagators configures specific propagators. If this +// option isn't specified, then the global TextMapPropagator is used. +func WithPropagators(ps propagation.TextMapPropagator) Option { + return optionFunc(func(c *config) { + if ps != nil { + c.Propagators = ps + } + }) +} + +// WithSpanOptions configures an additional set of +// trace.SpanOptions, which are applied to each new span. +func WithSpanOptions(opts ...trace.SpanStartOption) Option { + return optionFunc(func(c *config) { + c.SpanStartOptions = append(c.SpanStartOptions, opts...) + }) +} + +// WithFilter adds a filter to the list of filters used by the handler. +// If any filter indicates to exclude a request then the request will not be +// traced. All filters must allow a request to be traced for a Span to be created. +// If no filters are provided then all requests are traced. +// Filters will be invoked for each processed request, it is advised to make them +// simple and fast. +func WithFilter(f Filter) Option { + return optionFunc(func(c *config) { + c.Filters = append(c.Filters, f) + }) +} + +type event int + +// Different types of events that can be recorded, see WithMessageEvents. +const ( + ReadEvents event = iota + WriteEvents +) + +// WithMessageEvents configures the Handler to record the specified events +// (span.AddEvent) on spans. By default only summary attributes are added at the +// end of the request. +// +// Valid events are: +// - ReadEvents: Record the number of bytes read after every http.Request.Body.Read +// using the ReadBytesKey +// - WriteEvents: Record the number of bytes written after every http.ResponeWriter.Write +// using the WriteBytesKey +func WithMessageEvents(events ...event) Option { + return optionFunc(func(c *config) { + for _, e := range events { + switch e { + case ReadEvents: + c.ReadEvent = true + case WriteEvents: + c.WriteEvent = true + } + } + }) +} + +// WithSpanNameFormatter takes a function that will be called on every +// request and the returned string will become the Span Name. +func WithSpanNameFormatter(f func(operation string, r *http.Request) string) Option { + return optionFunc(func(c *config) { + c.SpanNameFormatter = f + }) +} + +// WithClientTrace takes a function that returns client trace instance that will be +// applied to the requests sent through the otelhttp Transport. +func WithClientTrace(f func(context.Context) *httptrace.ClientTrace) Option { + return optionFunc(func(c *config) { + c.ClientTrace = f + }) +} + +// WithServerName returns an Option that sets the name of the (virtual) server +// handling requests. +func WithServerName(server string) Option { + return optionFunc(func(c *config) { + c.ServerName = server + }) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go new file mode 100644 index 000000000..38c7f01c7 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go @@ -0,0 +1,18 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package otelhttp provides an http.Handler and functions that are intended +// to be used to add tracing by wrapping existing handlers (with Handler) and +// routes WithRouteTag. +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go new file mode 100644 index 000000000..b2fbe0784 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -0,0 +1,275 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "io" + "net/http" + "time" + + "github.com/felixge/httpsnoop" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/propagation" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + "go.opentelemetry.io/otel/trace" +) + +// middleware is an http middleware which wraps the next handler in a span. +type middleware struct { + operation string + server string + + tracer trace.Tracer + meter metric.Meter + propagators propagation.TextMapPropagator + spanStartOptions []trace.SpanStartOption + readEvent bool + writeEvent bool + filters []Filter + spanNameFormatter func(string, *http.Request) string + counters map[string]metric.Int64Counter + valueRecorders map[string]metric.Float64Histogram + publicEndpoint bool + publicEndpointFn func(*http.Request) bool +} + +func defaultHandlerFormatter(operation string, _ *http.Request) string { + return operation +} + +// NewHandler wraps the passed handler in a span named after the operation and +// enriches it with metrics. +func NewHandler(handler http.Handler, operation string, opts ...Option) http.Handler { + return NewMiddleware(operation, opts...)(handler) +} + +// NewMiddleware returns a tracing and metrics instrumentation middleware. +// The handler returned by the middleware wraps a handler +// in a span named after the operation and enriches it with metrics. +func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Handler { + h := middleware{ + operation: operation, + } + + defaultOpts := []Option{ + WithSpanOptions(trace.WithSpanKind(trace.SpanKindServer)), + WithSpanNameFormatter(defaultHandlerFormatter), + } + + c := newConfig(append(defaultOpts, opts...)...) + h.configure(c) + h.createMeasures() + + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + h.serveHTTP(w, r, next) + }) + } +} + +func (h *middleware) configure(c *config) { + h.tracer = c.Tracer + h.meter = c.Meter + h.propagators = c.Propagators + h.spanStartOptions = c.SpanStartOptions + h.readEvent = c.ReadEvent + h.writeEvent = c.WriteEvent + h.filters = c.Filters + h.spanNameFormatter = c.SpanNameFormatter + h.publicEndpoint = c.PublicEndpoint + h.publicEndpointFn = c.PublicEndpointFn + h.server = c.ServerName +} + +func handleErr(err error) { + if err != nil { + otel.Handle(err) + } +} + +func (h *middleware) createMeasures() { + h.counters = make(map[string]metric.Int64Counter) + h.valueRecorders = make(map[string]metric.Float64Histogram) + + requestBytesCounter, err := h.meter.Int64Counter(RequestContentLength) + handleErr(err) + + responseBytesCounter, err := h.meter.Int64Counter(ResponseContentLength) + handleErr(err) + + serverLatencyMeasure, err := h.meter.Float64Histogram(ServerLatency) + handleErr(err) + + h.counters[RequestContentLength] = requestBytesCounter + h.counters[ResponseContentLength] = responseBytesCounter + h.valueRecorders[ServerLatency] = serverLatencyMeasure +} + +// serveHTTP sets up tracing and calls the given next http.Handler with the span +// context injected into the request context. +func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http.Handler) { + requestStartTime := time.Now() + for _, f := range h.filters { + if !f(r) { + // Simply pass through to the handler if a filter rejects the request + next.ServeHTTP(w, r) + return + } + } + + ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header)) + opts := []trace.SpanStartOption{ + trace.WithAttributes(semconvutil.HTTPServerRequest(h.server, r)...), + } + if h.server != "" { + hostAttr := semconv.NetHostName(h.server) + opts = append(opts, trace.WithAttributes(hostAttr)) + } + opts = append(opts, h.spanStartOptions...) + if h.publicEndpoint || (h.publicEndpointFn != nil && h.publicEndpointFn(r.WithContext(ctx))) { + opts = append(opts, trace.WithNewRoot()) + // Linking incoming span context if any for public endpoint. + if s := trace.SpanContextFromContext(ctx); s.IsValid() && s.IsRemote() { + opts = append(opts, trace.WithLinks(trace.Link{SpanContext: s})) + } + } + + tracer := h.tracer + + if tracer == nil { + if span := trace.SpanFromContext(r.Context()); span.SpanContext().IsValid() { + tracer = newTracer(span.TracerProvider()) + } else { + tracer = newTracer(otel.GetTracerProvider()) + } + } + + ctx, span := tracer.Start(ctx, h.spanNameFormatter(h.operation, r), opts...) + defer span.End() + + readRecordFunc := func(int64) {} + if h.readEvent { + readRecordFunc = func(n int64) { + span.AddEvent("read", trace.WithAttributes(ReadBytesKey.Int64(n))) + } + } + + var bw bodyWrapper + // if request body is nil or NoBody, we don't want to mutate the body as it + // will affect the identity of it in an unforeseeable way because we assert + // ReadCloser fulfills a certain interface and it is indeed nil or NoBody. + if r.Body != nil && r.Body != http.NoBody { + bw.ReadCloser = r.Body + bw.record = readRecordFunc + r.Body = &bw + } + + writeRecordFunc := func(int64) {} + if h.writeEvent { + writeRecordFunc = func(n int64) { + span.AddEvent("write", trace.WithAttributes(WroteBytesKey.Int64(n))) + } + } + + rww := &respWriterWrapper{ + ResponseWriter: w, + record: writeRecordFunc, + ctx: ctx, + props: h.propagators, + statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything + } + + // Wrap w to use our ResponseWriter methods while also exposing + // other interfaces that w may implement (http.CloseNotifier, + // http.Flusher, http.Hijacker, http.Pusher, io.ReaderFrom). + + w = httpsnoop.Wrap(w, httpsnoop.Hooks{ + Header: func(httpsnoop.HeaderFunc) httpsnoop.HeaderFunc { + return rww.Header + }, + Write: func(httpsnoop.WriteFunc) httpsnoop.WriteFunc { + return rww.Write + }, + WriteHeader: func(httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc { + return rww.WriteHeader + }, + }) + + labeler := &Labeler{} + ctx = injectLabeler(ctx, labeler) + + next.ServeHTTP(w, r.WithContext(ctx)) + + setAfterServeAttributes(span, bw.read, rww.written, rww.statusCode, bw.err, rww.err) + + // Add metrics + attributes := append(labeler.Get(), semconvutil.HTTPServerRequestMetrics(h.server, r)...) + if rww.statusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(rww.statusCode)) + } + o := metric.WithAttributes(attributes...) + h.counters[RequestContentLength].Add(ctx, bw.read, o) + h.counters[ResponseContentLength].Add(ctx, rww.written, o) + + // Use floating point division here for higher precision (instead of Millisecond method). + elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) + + h.valueRecorders[ServerLatency].Record(ctx, elapsedTime, o) +} + +func setAfterServeAttributes(span trace.Span, read, wrote int64, statusCode int, rerr, werr error) { + attributes := []attribute.KeyValue{} + + // TODO: Consider adding an event after each read and write, possibly as an + // option (defaulting to off), so as to not create needlessly verbose spans. + if read > 0 { + attributes = append(attributes, ReadBytesKey.Int64(read)) + } + if rerr != nil && rerr != io.EOF { + attributes = append(attributes, ReadErrorKey.String(rerr.Error())) + } + if wrote > 0 { + attributes = append(attributes, WroteBytesKey.Int64(wrote)) + } + if statusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) + } + span.SetStatus(semconvutil.HTTPServerStatus(statusCode)) + + if werr != nil && werr != io.EOF { + attributes = append(attributes, WriteErrorKey.String(werr.Error())) + } + span.SetAttributes(attributes...) +} + +// WithRouteTag annotates spans and metrics with the provided route name +// with HTTP route attribute. +func WithRouteTag(route string, h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + attr := semconv.HTTPRouteKey.String(route) + + span := trace.SpanFromContext(r.Context()) + span.SetAttributes(attr) + + labeler, _ := LabelerFromContext(r.Context()) + labeler.Add(attr) + + h.ServeHTTP(w, r) + }) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go new file mode 100644 index 000000000..edf4ce3d3 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go @@ -0,0 +1,21 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + +// Generate semconvutil package: +//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv_test.go.tmpl "--data={}" --out=httpconv_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv.go.tmpl "--data={}" --out=httpconv.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv_test.go.tmpl "--data={}" --out=netconv_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv.go.tmpl "--data={}" --out=netconv.go diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go new file mode 100644 index 000000000..d3dede9eb --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go @@ -0,0 +1,552 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconvutil/httpconv.go.tmpl + +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + +import ( + "fmt" + "net/http" + "strings" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" +) + +// HTTPClientResponse returns trace attributes for an HTTP response received by a +// client from a server. It will return the following attributes if the related +// values are defined in resp: "http.status.code", +// "http.response_content_length". +// +// This does not add all OpenTelemetry required attributes for an HTTP event, +// it assumes ClientRequest was used to create the span with a complete set of +// attributes. If a complete set of attributes can be generated using the +// request contained in resp. For example: +// +// append(HTTPClientResponse(resp), ClientRequest(resp.Request)...) +func HTTPClientResponse(resp *http.Response) []attribute.KeyValue { + return hc.ClientResponse(resp) +} + +// HTTPClientRequest returns trace attributes for an HTTP request made by a client. +// The following attributes are always returned: "http.url", "http.flavor", +// "http.method", "net.peer.name". The following attributes are returned if the +// related values are defined in req: "net.peer.port", "http.user_agent", +// "http.request_content_length", "enduser.id". +func HTTPClientRequest(req *http.Request) []attribute.KeyValue { + return hc.ClientRequest(req) +} + +// HTTPClientStatus returns a span status code and message for an HTTP status code +// value received by a client. +func HTTPClientStatus(code int) (codes.Code, string) { + return hc.ClientStatus(code) +} + +// HTTPServerRequest returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "http.flavor", "http.target", "net.host.name". The following attributes are +// returned if they related values are defined in req: "net.host.port", +// "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id", +// "http.client_ip". +func HTTPServerRequest(server string, req *http.Request) []attribute.KeyValue { + return hc.ServerRequest(server, req) +} + +// HTTPServerRequestMetrics returns metric attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "http.flavor", "net.host.name". The following attributes are +// returned if they related values are defined in req: "net.host.port". +func HTTPServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue { + return hc.ServerRequestMetrics(server, req) +} + +// HTTPServerStatus returns a span status code and message for an HTTP status code +// value returned by a server. Status codes in the 400-499 range are not +// returned as errors. +func HTTPServerStatus(code int) (codes.Code, string) { + return hc.ServerStatus(code) +} + +// HTTPRequestHeader returns the contents of h as attributes. +// +// Instrumentation should require an explicit configuration of which headers to +// captured and then prune what they pass here. Including all headers can be a +// security risk - explicit configuration helps avoid leaking sensitive +// information. +// +// The User-Agent header is already captured in the http.user_agent attribute +// from ClientRequest and ServerRequest. Instrumentation may provide an option +// to capture that header here even though it is not recommended. Otherwise, +// instrumentation should filter that out of what is passed. +func HTTPRequestHeader(h http.Header) []attribute.KeyValue { + return hc.RequestHeader(h) +} + +// HTTPResponseHeader returns the contents of h as attributes. +// +// Instrumentation should require an explicit configuration of which headers to +// captured and then prune what they pass here. Including all headers can be a +// security risk - explicit configuration helps avoid leaking sensitive +// information. +// +// The User-Agent header is already captured in the http.user_agent attribute +// from ClientRequest and ServerRequest. Instrumentation may provide an option +// to capture that header here even though it is not recommended. Otherwise, +// instrumentation should filter that out of what is passed. +func HTTPResponseHeader(h http.Header) []attribute.KeyValue { + return hc.ResponseHeader(h) +} + +// httpConv are the HTTP semantic convention attributes defined for a version +// of the OpenTelemetry specification. +type httpConv struct { + NetConv *netConv + + EnduserIDKey attribute.Key + HTTPClientIPKey attribute.Key + HTTPFlavorKey attribute.Key + HTTPMethodKey attribute.Key + HTTPRequestContentLengthKey attribute.Key + HTTPResponseContentLengthKey attribute.Key + HTTPRouteKey attribute.Key + HTTPSchemeHTTP attribute.KeyValue + HTTPSchemeHTTPS attribute.KeyValue + HTTPStatusCodeKey attribute.Key + HTTPTargetKey attribute.Key + HTTPURLKey attribute.Key + HTTPUserAgentKey attribute.Key +} + +var hc = &httpConv{ + NetConv: nc, + + EnduserIDKey: semconv.EnduserIDKey, + HTTPClientIPKey: semconv.HTTPClientIPKey, + HTTPFlavorKey: semconv.HTTPFlavorKey, + HTTPMethodKey: semconv.HTTPMethodKey, + HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey, + HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey, + HTTPRouteKey: semconv.HTTPRouteKey, + HTTPSchemeHTTP: semconv.HTTPSchemeHTTP, + HTTPSchemeHTTPS: semconv.HTTPSchemeHTTPS, + HTTPStatusCodeKey: semconv.HTTPStatusCodeKey, + HTTPTargetKey: semconv.HTTPTargetKey, + HTTPURLKey: semconv.HTTPURLKey, + HTTPUserAgentKey: semconv.HTTPUserAgentKey, +} + +// ClientResponse returns attributes for an HTTP response received by a client +// from a server. The following attributes are returned if the related values +// are defined in resp: "http.status.code", "http.response_content_length". +// +// This does not add all OpenTelemetry required attributes for an HTTP event, +// it assumes ClientRequest was used to create the span with a complete set of +// attributes. If a complete set of attributes can be generated using the +// request contained in resp. For example: +// +// append(ClientResponse(resp), ClientRequest(resp.Request)...) +func (c *httpConv) ClientResponse(resp *http.Response) []attribute.KeyValue { + var n int + if resp.StatusCode > 0 { + n++ + } + if resp.ContentLength > 0 { + n++ + } + + attrs := make([]attribute.KeyValue, 0, n) + if resp.StatusCode > 0 { + attrs = append(attrs, c.HTTPStatusCodeKey.Int(resp.StatusCode)) + } + if resp.ContentLength > 0 { + attrs = append(attrs, c.HTTPResponseContentLengthKey.Int(int(resp.ContentLength))) + } + return attrs +} + +// ClientRequest returns attributes for an HTTP request made by a client. The +// following attributes are always returned: "http.url", "http.flavor", +// "http.method", "net.peer.name". The following attributes are returned if the +// related values are defined in req: "net.peer.port", "http.user_agent", +// "http.request_content_length", "enduser.id". +func (c *httpConv) ClientRequest(req *http.Request) []attribute.KeyValue { + n := 3 // URL, peer name, proto, and method. + var h string + if req.URL != nil { + h = req.URL.Host + } + peer, p := firstHostPort(h, req.Header.Get("Host")) + port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p) + if port > 0 { + n++ + } + useragent := req.UserAgent() + if useragent != "" { + n++ + } + if req.ContentLength > 0 { + n++ + } + userID, _, hasUserID := req.BasicAuth() + if hasUserID { + n++ + } + attrs := make([]attribute.KeyValue, 0, n) + + attrs = append(attrs, c.method(req.Method)) + attrs = append(attrs, c.flavor(req.Proto)) + + var u string + if req.URL != nil { + // Remove any username/password info that may be in the URL. + userinfo := req.URL.User + req.URL.User = nil + u = req.URL.String() + // Restore any username/password info that was removed. + req.URL.User = userinfo + } + attrs = append(attrs, c.HTTPURLKey.String(u)) + + attrs = append(attrs, c.NetConv.PeerName(peer)) + if port > 0 { + attrs = append(attrs, c.NetConv.PeerPort(port)) + } + + if useragent != "" { + attrs = append(attrs, c.HTTPUserAgentKey.String(useragent)) + } + + if l := req.ContentLength; l > 0 { + attrs = append(attrs, c.HTTPRequestContentLengthKey.Int64(l)) + } + + if hasUserID { + attrs = append(attrs, c.EnduserIDKey.String(userID)) + } + + return attrs +} + +// ServerRequest returns attributes for an HTTP request received by a server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "http.flavor", "http.target", "net.host.name". The following attributes are +// returned if they related values are defined in req: "net.host.port", +// "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id", +// "http.client_ip". +func (c *httpConv) ServerRequest(server string, req *http.Request) []attribute.KeyValue { + // TODO: This currently does not add the specification required + // `http.target` attribute. It has too high of a cardinality to safely be + // added. An alternate should be added, or this comment removed, when it is + // addressed by the specification. If it is ultimately decided to continue + // not including the attribute, the HTTPTargetKey field of the httpConv + // should be removed as well. + + n := 4 // Method, scheme, proto, and host name. + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + n++ + } + peer, peerPort := splitHostPort(req.RemoteAddr) + if peer != "" { + n++ + if peerPort > 0 { + n++ + } + } + useragent := req.UserAgent() + if useragent != "" { + n++ + } + userID, _, hasUserID := req.BasicAuth() + if hasUserID { + n++ + } + clientIP := serverClientIP(req.Header.Get("X-Forwarded-For")) + if clientIP != "" { + n++ + } + attrs := make([]attribute.KeyValue, 0, n) + + attrs = append(attrs, c.method(req.Method)) + attrs = append(attrs, c.scheme(req.TLS != nil)) + attrs = append(attrs, c.flavor(req.Proto)) + attrs = append(attrs, c.NetConv.HostName(host)) + + if hostPort > 0 { + attrs = append(attrs, c.NetConv.HostPort(hostPort)) + } + + if peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + attrs = append(attrs, c.NetConv.SockPeerAddr(peer)) + if peerPort > 0 { + attrs = append(attrs, c.NetConv.SockPeerPort(peerPort)) + } + } + + if useragent != "" { + attrs = append(attrs, c.HTTPUserAgentKey.String(useragent)) + } + + if hasUserID { + attrs = append(attrs, c.EnduserIDKey.String(userID)) + } + + if clientIP != "" { + attrs = append(attrs, c.HTTPClientIPKey.String(clientIP)) + } + + return attrs +} + +// ServerRequestMetrics returns metric attributes for an HTTP request received +// by a server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "http.flavor", "net.host.name". The following attributes are +// returned if they related values are defined in req: "net.host.port". +func (c *httpConv) ServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue { + // TODO: This currently does not add the specification required + // `http.target` attribute. It has too high of a cardinality to safely be + // added. An alternate should be added, or this comment removed, when it is + // addressed by the specification. If it is ultimately decided to continue + // not including the attribute, the HTTPTargetKey field of the httpConv + // should be removed as well. + + n := 4 // Method, scheme, proto, and host name. + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + n++ + } + attrs := make([]attribute.KeyValue, 0, n) + + attrs = append(attrs, c.methodMetric(req.Method)) + attrs = append(attrs, c.scheme(req.TLS != nil)) + attrs = append(attrs, c.flavor(req.Proto)) + attrs = append(attrs, c.NetConv.HostName(host)) + + if hostPort > 0 { + attrs = append(attrs, c.NetConv.HostPort(hostPort)) + } + + return attrs +} + +func (c *httpConv) method(method string) attribute.KeyValue { + if method == "" { + return c.HTTPMethodKey.String(http.MethodGet) + } + return c.HTTPMethodKey.String(method) +} + +func (c *httpConv) methodMetric(method string) attribute.KeyValue { + method = strings.ToUpper(method) + switch method { + case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: + default: + method = "_OTHER" + } + return c.HTTPMethodKey.String(method) +} + +func (c *httpConv) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return c.HTTPSchemeHTTPS + } + return c.HTTPSchemeHTTP +} + +func (c *httpConv) flavor(proto string) attribute.KeyValue { + switch proto { + case "HTTP/1.0": + return c.HTTPFlavorKey.String("1.0") + case "HTTP/1.1": + return c.HTTPFlavorKey.String("1.1") + case "HTTP/2": + return c.HTTPFlavorKey.String("2.0") + case "HTTP/3": + return c.HTTPFlavorKey.String("3.0") + default: + return c.HTTPFlavorKey.String(proto) + } +} + +func serverClientIP(xForwardedFor string) string { + if idx := strings.Index(xForwardedFor, ","); idx >= 0 { + xForwardedFor = xForwardedFor[:idx] + } + return xForwardedFor +} + +func requiredHTTPPort(https bool, port int) int { // nolint:revive + if https { + if port > 0 && port != 443 { + return port + } + } else { + if port > 0 && port != 80 { + return port + } + } + return -1 +} + +// Return the request host and port from the first non-empty source. +func firstHostPort(source ...string) (host string, port int) { + for _, hostport := range source { + host, port = splitHostPort(hostport) + if host != "" || port > 0 { + break + } + } + return +} + +// RequestHeader returns the contents of h as OpenTelemetry attributes. +func (c *httpConv) RequestHeader(h http.Header) []attribute.KeyValue { + return c.header("http.request.header", h) +} + +// ResponseHeader returns the contents of h as OpenTelemetry attributes. +func (c *httpConv) ResponseHeader(h http.Header) []attribute.KeyValue { + return c.header("http.response.header", h) +} + +func (c *httpConv) header(prefix string, h http.Header) []attribute.KeyValue { + key := func(k string) attribute.Key { + k = strings.ToLower(k) + k = strings.ReplaceAll(k, "-", "_") + k = fmt.Sprintf("%s.%s", prefix, k) + return attribute.Key(k) + } + + attrs := make([]attribute.KeyValue, 0, len(h)) + for k, v := range h { + attrs = append(attrs, key(k).StringSlice(v)) + } + return attrs +} + +// ClientStatus returns a span status code and message for an HTTP status code +// value received by a client. +func (c *httpConv) ClientStatus(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 400 { + return codes.Error, "" + } + return codes.Unset, "" +} + +// ServerStatus returns a span status code and message for an HTTP status code +// value returned by a server. Status codes in the 400-499 range are not +// returned as errors. +func (c *httpConv) ServerStatus(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 500 { + return codes.Error, "" + } + return codes.Unset, "" +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go new file mode 100644 index 000000000..bde889343 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go @@ -0,0 +1,368 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconvutil/netconv.go.tmpl + +// Copyright The OpenTelemetry Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + +import ( + "net" + "strconv" + "strings" + + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" +) + +// NetTransport returns a trace attribute describing the transport protocol of the +// passed network. See the net.Dial for information about acceptable network +// values. +func NetTransport(network string) attribute.KeyValue { + return nc.Transport(network) +} + +// NetClient returns trace attributes for a client network connection to address. +// See net.Dial for information about acceptable address values, address should +// be the same as the one used to create conn. If conn is nil, only network +// peer attributes will be returned that describe address. Otherwise, the +// socket level information about conn will also be included. +func NetClient(address string, conn net.Conn) []attribute.KeyValue { + return nc.Client(address, conn) +} + +// NetServer returns trace attributes for a network listener listening at address. +// See net.Listen for information about acceptable address values, address +// should be the same as the one used to create ln. If ln is nil, only network +// host attributes will be returned that describe address. Otherwise, the +// socket level information about ln will also be included. +func NetServer(address string, ln net.Listener) []attribute.KeyValue { + return nc.Server(address, ln) +} + +// netConv are the network semantic convention attributes defined for a version +// of the OpenTelemetry specification. +type netConv struct { + NetHostNameKey attribute.Key + NetHostPortKey attribute.Key + NetPeerNameKey attribute.Key + NetPeerPortKey attribute.Key + NetSockFamilyKey attribute.Key + NetSockPeerAddrKey attribute.Key + NetSockPeerPortKey attribute.Key + NetSockHostAddrKey attribute.Key + NetSockHostPortKey attribute.Key + NetTransportOther attribute.KeyValue + NetTransportTCP attribute.KeyValue + NetTransportUDP attribute.KeyValue + NetTransportInProc attribute.KeyValue +} + +var nc = &netConv{ + NetHostNameKey: semconv.NetHostNameKey, + NetHostPortKey: semconv.NetHostPortKey, + NetPeerNameKey: semconv.NetPeerNameKey, + NetPeerPortKey: semconv.NetPeerPortKey, + NetSockFamilyKey: semconv.NetSockFamilyKey, + NetSockPeerAddrKey: semconv.NetSockPeerAddrKey, + NetSockPeerPortKey: semconv.NetSockPeerPortKey, + NetSockHostAddrKey: semconv.NetSockHostAddrKey, + NetSockHostPortKey: semconv.NetSockHostPortKey, + NetTransportOther: semconv.NetTransportOther, + NetTransportTCP: semconv.NetTransportTCP, + NetTransportUDP: semconv.NetTransportUDP, + NetTransportInProc: semconv.NetTransportInProc, +} + +func (c *netConv) Transport(network string) attribute.KeyValue { + switch network { + case "tcp", "tcp4", "tcp6": + return c.NetTransportTCP + case "udp", "udp4", "udp6": + return c.NetTransportUDP + case "unix", "unixgram", "unixpacket": + return c.NetTransportInProc + default: + // "ip:*", "ip4:*", and "ip6:*" all are considered other. + return c.NetTransportOther + } +} + +// Host returns attributes for a network host address. +func (c *netConv) Host(address string) []attribute.KeyValue { + h, p := splitHostPort(address) + var n int + if h != "" { + n++ + if p > 0 { + n++ + } + } + + if n == 0 { + return nil + } + + attrs := make([]attribute.KeyValue, 0, n) + attrs = append(attrs, c.HostName(h)) + if p > 0 { + attrs = append(attrs, c.HostPort(int(p))) + } + return attrs +} + +// Server returns attributes for a network listener listening at address. See +// net.Listen for information about acceptable address values, address should +// be the same as the one used to create ln. If ln is nil, only network host +// attributes will be returned that describe address. Otherwise, the socket +// level information about ln will also be included. +func (c *netConv) Server(address string, ln net.Listener) []attribute.KeyValue { + if ln == nil { + return c.Host(address) + } + + lAddr := ln.Addr() + if lAddr == nil { + return c.Host(address) + } + + hostName, hostPort := splitHostPort(address) + sockHostAddr, sockHostPort := splitHostPort(lAddr.String()) + network := lAddr.Network() + sockFamily := family(network, sockHostAddr) + + n := nonZeroStr(hostName, network, sockHostAddr, sockFamily) + n += positiveInt(hostPort, sockHostPort) + attr := make([]attribute.KeyValue, 0, n) + if hostName != "" { + attr = append(attr, c.HostName(hostName)) + if hostPort > 0 { + // Only if net.host.name is set should net.host.port be. + attr = append(attr, c.HostPort(hostPort)) + } + } + if network != "" { + attr = append(attr, c.Transport(network)) + } + if sockFamily != "" { + attr = append(attr, c.NetSockFamilyKey.String(sockFamily)) + } + if sockHostAddr != "" { + attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr)) + if sockHostPort > 0 { + // Only if net.sock.host.addr is set should net.sock.host.port be. + attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort)) + } + } + return attr +} + +func (c *netConv) HostName(name string) attribute.KeyValue { + return c.NetHostNameKey.String(name) +} + +func (c *netConv) HostPort(port int) attribute.KeyValue { + return c.NetHostPortKey.Int(port) +} + +// Client returns attributes for a client network connection to address. See +// net.Dial for information about acceptable address values, address should be +// the same as the one used to create conn. If conn is nil, only network peer +// attributes will be returned that describe address. Otherwise, the socket +// level information about conn will also be included. +func (c *netConv) Client(address string, conn net.Conn) []attribute.KeyValue { + if conn == nil { + return c.Peer(address) + } + + lAddr, rAddr := conn.LocalAddr(), conn.RemoteAddr() + + var network string + switch { + case lAddr != nil: + network = lAddr.Network() + case rAddr != nil: + network = rAddr.Network() + default: + return c.Peer(address) + } + + peerName, peerPort := splitHostPort(address) + var ( + sockFamily string + sockPeerAddr string + sockPeerPort int + sockHostAddr string + sockHostPort int + ) + + if lAddr != nil { + sockHostAddr, sockHostPort = splitHostPort(lAddr.String()) + } + + if rAddr != nil { + sockPeerAddr, sockPeerPort = splitHostPort(rAddr.String()) + } + + switch { + case sockHostAddr != "": + sockFamily = family(network, sockHostAddr) + case sockPeerAddr != "": + sockFamily = family(network, sockPeerAddr) + } + + n := nonZeroStr(peerName, network, sockPeerAddr, sockHostAddr, sockFamily) + n += positiveInt(peerPort, sockPeerPort, sockHostPort) + attr := make([]attribute.KeyValue, 0, n) + if peerName != "" { + attr = append(attr, c.PeerName(peerName)) + if peerPort > 0 { + // Only if net.peer.name is set should net.peer.port be. + attr = append(attr, c.PeerPort(peerPort)) + } + } + if network != "" { + attr = append(attr, c.Transport(network)) + } + if sockFamily != "" { + attr = append(attr, c.NetSockFamilyKey.String(sockFamily)) + } + if sockPeerAddr != "" { + attr = append(attr, c.NetSockPeerAddrKey.String(sockPeerAddr)) + if sockPeerPort > 0 { + // Only if net.sock.peer.addr is set should net.sock.peer.port be. + attr = append(attr, c.NetSockPeerPortKey.Int(sockPeerPort)) + } + } + if sockHostAddr != "" { + attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr)) + if sockHostPort > 0 { + // Only if net.sock.host.addr is set should net.sock.host.port be. + attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort)) + } + } + return attr +} + +func family(network, address string) string { + switch network { + case "unix", "unixgram", "unixpacket": + return "unix" + default: + if ip := net.ParseIP(address); ip != nil { + if ip.To4() == nil { + return "inet6" + } + return "inet" + } + } + return "" +} + +func nonZeroStr(strs ...string) int { + var n int + for _, str := range strs { + if str != "" { + n++ + } + } + return n +} + +func positiveInt(ints ...int) int { + var n int + for _, i := range ints { + if i > 0 { + n++ + } + } + return n +} + +// Peer returns attributes for a network peer address. +func (c *netConv) Peer(address string) []attribute.KeyValue { + h, p := splitHostPort(address) + var n int + if h != "" { + n++ + if p > 0 { + n++ + } + } + + if n == 0 { + return nil + } + + attrs := make([]attribute.KeyValue, 0, n) + attrs = append(attrs, c.PeerName(h)) + if p > 0 { + attrs = append(attrs, c.PeerPort(int(p))) + } + return attrs +} + +func (c *netConv) PeerName(name string) attribute.KeyValue { + return c.NetPeerNameKey.String(name) +} + +func (c *netConv) PeerPort(port int) attribute.KeyValue { + return c.NetPeerPortKey.Int(port) +} + +func (c *netConv) SockPeerAddr(addr string) attribute.KeyValue { + return c.NetSockPeerAddrKey.String(addr) +} + +func (c *netConv) SockPeerPort(port int) attribute.KeyValue { + return c.NetSockPeerPortKey.Int(port) +} + +// splitHostPort splits a network address hostport of the form "host", +// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port", +// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and +// port. +// +// An empty host is returned if it is not provided or unparsable. A negative +// port is returned if it is not provided or unparsable. +func splitHostPort(hostport string) (host string, port int) { + port = -1 + + if strings.HasPrefix(hostport, "[") { + addrEnd := strings.LastIndex(hostport, "]") + if addrEnd < 0 { + // Invalid hostport. + return + } + if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 { + host = hostport[1:addrEnd] + return + } + } else { + if i := strings.LastIndex(hostport, ":"); i < 0 { + host = hostport + return + } + } + + host, pStr, err := net.SplitHostPort(hostport) + if err != nil { + return + } + + p, err := strconv.ParseUint(pStr, 10, 16) + if err != nil { + return + } + return host, int(p) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go new file mode 100644 index 000000000..26a51a180 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go @@ -0,0 +1,65 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" +) + +// Labeler is used to allow instrumented HTTP handlers to add custom attributes to +// the metrics recorded by the net/http instrumentation. +type Labeler struct { + mu sync.Mutex + attributes []attribute.KeyValue +} + +// Add attributes to a Labeler. +func (l *Labeler) Add(ls ...attribute.KeyValue) { + l.mu.Lock() + defer l.mu.Unlock() + l.attributes = append(l.attributes, ls...) +} + +// Get returns a copy of the attributes added to the Labeler. +func (l *Labeler) Get() []attribute.KeyValue { + l.mu.Lock() + defer l.mu.Unlock() + ret := make([]attribute.KeyValue, len(l.attributes)) + copy(ret, l.attributes) + return ret +} + +type labelerContextKeyType int + +const lablelerContextKey labelerContextKeyType = 0 + +func injectLabeler(ctx context.Context, l *Labeler) context.Context { + return context.WithValue(ctx, lablelerContextKey, l) +} + +// LabelerFromContext retrieves a Labeler instance from the provided context if +// one is available. If no Labeler was found in the provided context a new, empty +// Labeler is returned and the second return value is false. In this case it is +// safe to use the Labeler but any attributes added to it will not be used. +func LabelerFromContext(ctx context.Context) (*Labeler, bool) { + l, ok := ctx.Value(lablelerContextKey).(*Labeler) + if !ok { + l = &Labeler{} + } + return l, ok +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go new file mode 100644 index 000000000..e835cac12 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -0,0 +1,193 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "io" + "net/http" + "net/http/httptrace" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" +) + +// Transport implements the http.RoundTripper interface and wraps +// outbound HTTP(S) requests with a span. +type Transport struct { + rt http.RoundTripper + + tracer trace.Tracer + propagators propagation.TextMapPropagator + spanStartOptions []trace.SpanStartOption + filters []Filter + spanNameFormatter func(string, *http.Request) string + clientTrace func(context.Context) *httptrace.ClientTrace +} + +var _ http.RoundTripper = &Transport{} + +// NewTransport wraps the provided http.RoundTripper with one that +// starts a span and injects the span context into the outbound request headers. +// +// If the provided http.RoundTripper is nil, http.DefaultTransport will be used +// as the base http.RoundTripper. +func NewTransport(base http.RoundTripper, opts ...Option) *Transport { + if base == nil { + base = http.DefaultTransport + } + + t := Transport{ + rt: base, + } + + defaultOpts := []Option{ + WithSpanOptions(trace.WithSpanKind(trace.SpanKindClient)), + WithSpanNameFormatter(defaultTransportFormatter), + } + + c := newConfig(append(defaultOpts, opts...)...) + t.applyConfig(c) + + return &t +} + +func (t *Transport) applyConfig(c *config) { + t.tracer = c.Tracer + t.propagators = c.Propagators + t.spanStartOptions = c.SpanStartOptions + t.filters = c.Filters + t.spanNameFormatter = c.SpanNameFormatter + t.clientTrace = c.ClientTrace +} + +func defaultTransportFormatter(_ string, r *http.Request) string { + return "HTTP " + r.Method +} + +// RoundTrip creates a Span and propagates its context via the provided request's headers +// before handing the request to the configured base RoundTripper. The created span will +// end when the response body is closed or when a read from the body returns io.EOF. +func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { + for _, f := range t.filters { + if !f(r) { + // Simply pass through to the base RoundTripper if a filter rejects the request + return t.rt.RoundTrip(r) + } + } + + tracer := t.tracer + + if tracer == nil { + if span := trace.SpanFromContext(r.Context()); span.SpanContext().IsValid() { + tracer = newTracer(span.TracerProvider()) + } else { + tracer = newTracer(otel.GetTracerProvider()) + } + } + + opts := append([]trace.SpanStartOption{}, t.spanStartOptions...) // start with the configured options + + ctx, span := tracer.Start(r.Context(), t.spanNameFormatter("", r), opts...) + + if t.clientTrace != nil { + ctx = httptrace.WithClientTrace(ctx, t.clientTrace(ctx)) + } + + r = r.Clone(ctx) // According to RoundTripper spec, we shouldn't modify the origin request. + span.SetAttributes(semconvutil.HTTPClientRequest(r)...) + t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header)) + + res, err := t.rt.RoundTrip(r) + if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + span.End() + return res, err + } + + span.SetAttributes(semconvutil.HTTPClientResponse(res)...) + span.SetStatus(semconvutil.HTTPClientStatus(res.StatusCode)) + res.Body = newWrappedBody(span, res.Body) + + return res, err +} + +// newWrappedBody returns a new and appropriately scoped *wrappedBody as an +// io.ReadCloser. If the passed body implements io.Writer, the returned value +// will implement io.ReadWriteCloser. +func newWrappedBody(span trace.Span, body io.ReadCloser) io.ReadCloser { + // The successful protocol switch responses will have a body that + // implement an io.ReadWriteCloser. Ensure this interface type continues + // to be satisfied if that is the case. + if _, ok := body.(io.ReadWriteCloser); ok { + return &wrappedBody{span: span, body: body} + } + + // Remove the implementation of the io.ReadWriteCloser and only implement + // the io.ReadCloser. + return struct{ io.ReadCloser }{&wrappedBody{span: span, body: body}} +} + +// wrappedBody is the response body type returned by the transport +// instrumentation to complete a span. Errors encountered when using the +// response body are recorded in span tracking the response. +// +// The span tracking the response is ended when this body is closed. +// +// If the response body implements the io.Writer interface (i.e. for +// successful protocol switches), the wrapped body also will. +type wrappedBody struct { + span trace.Span + body io.ReadCloser +} + +var _ io.ReadWriteCloser = &wrappedBody{} + +func (wb *wrappedBody) Write(p []byte) (int, error) { + // This will not panic given the guard in newWrappedBody. + n, err := wb.body.(io.Writer).Write(p) + if err != nil { + wb.span.RecordError(err) + wb.span.SetStatus(codes.Error, err.Error()) + } + return n, err +} + +func (wb *wrappedBody) Read(b []byte) (int, error) { + n, err := wb.body.Read(b) + + switch err { + case nil: + // nothing to do here but fall through to the return + case io.EOF: + wb.span.End() + default: + wb.span.RecordError(err) + wb.span.SetStatus(codes.Error, err.Error()) + } + return n, err +} + +func (wb *wrappedBody) Close() error { + wb.span.End() + if wb.body != nil { + return wb.body.Close() + } + return nil +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go new file mode 100644 index 000000000..6eace875c --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -0,0 +1,28 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +// Version is the current release version of the otelhttp instrumentation. +func Version() string { + return "0.45.0" + // This string is updated by the pre_release.sh script during release +} + +// SemVersion is the semantic version to be supplied to tracer/meter creation. +// +// Deprecated: Use [Version] instead. +func SemVersion() string { + return Version() +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go new file mode 100644 index 000000000..11a35ed16 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go @@ -0,0 +1,99 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "io" + "net/http" + + "go.opentelemetry.io/otel/propagation" +) + +var _ io.ReadCloser = &bodyWrapper{} + +// bodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number +// of bytes read and the last error. +type bodyWrapper struct { + io.ReadCloser + record func(n int64) // must not be nil + + read int64 + err error +} + +func (w *bodyWrapper) Read(b []byte) (int, error) { + n, err := w.ReadCloser.Read(b) + n1 := int64(n) + w.read += n1 + w.err = err + w.record(n1) + return n, err +} + +func (w *bodyWrapper) Close() error { + return w.ReadCloser.Close() +} + +var _ http.ResponseWriter = &respWriterWrapper{} + +// respWriterWrapper wraps a http.ResponseWriter in order to track the number of +// bytes written, the last error, and to catch the first written statusCode. +// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional +// types (http.Hijacker, http.Pusher, http.CloseNotifier, http.Flusher, etc) +// that may be useful when using it in real life situations. +type respWriterWrapper struct { + http.ResponseWriter + record func(n int64) // must not be nil + + // used to inject the header + ctx context.Context + + props propagation.TextMapPropagator + + written int64 + statusCode int + err error + wroteHeader bool +} + +func (w *respWriterWrapper) Header() http.Header { + return w.ResponseWriter.Header() +} + +func (w *respWriterWrapper) Write(p []byte) (int, error) { + if !w.wroteHeader { + w.WriteHeader(http.StatusOK) + } + n, err := w.ResponseWriter.Write(p) + n1 := int64(n) + w.record(n1) + w.written += n1 + w.err = err + return n, err +} + +// WriteHeader persists initial statusCode for span attribution. +// All calls to WriteHeader will be propagated to the underlying ResponseWriter +// and will persist the statusCode from the first call. +// Blocking consecutive calls to WriteHeader alters expected behavior and will +// remove warning logs from net/http where developers will notice incorrect handler implementations. +func (w *respWriterWrapper) WriteHeader(statusCode int) { + if !w.wroteHeader { + w.wroteHeader = true + w.statusCode = statusCode + } + w.ResponseWriter.WriteHeader(statusCode) +} diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore new file mode 100644 index 000000000..ae6a3bcf1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.codespellignore @@ -0,0 +1,5 @@ +ot +fo +te +collison +consequentially diff --git a/vendor/go.opentelemetry.io/otel/.codespellrc b/vendor/go.opentelemetry.io/otel/.codespellrc new file mode 100644 index 000000000..4afbb1fb3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.codespellrc @@ -0,0 +1,10 @@ +# https://github.com/codespell-project/codespell +[codespell] +builtin = clear,rare,informal +check-filenames = +check-hidden = +ignore-words = .codespellignore +interactive = 1 +skip = .git,go.mod,go.sum,semconv,venv,.tools +uri-ignore-words-list = * +write = diff --git a/vendor/go.opentelemetry.io/otel/.gitattributes b/vendor/go.opentelemetry.io/otel/.gitattributes new file mode 100644 index 000000000..314766e91 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.gitattributes @@ -0,0 +1,3 @@ +* text=auto eol=lf +*.{cmd,[cC][mM][dD]} text eol=crlf +*.{bat,[bB][aA][tT]} text eol=crlf diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore new file mode 100644 index 000000000..895c7664b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.gitignore @@ -0,0 +1,22 @@ +.DS_Store +Thumbs.db + +.tools/ +venv/ +.idea/ +.vscode/ +*.iml +*.so +coverage.* +go.work +go.work.sum + +gen/ + +/example/dice/dice +/example/namedtracer/namedtracer +/example/otel-collector/otel-collector +/example/opencensus/opencensus +/example/passthrough/passthrough +/example/prometheus/prometheus +/example/zipkin/zipkin diff --git a/vendor/go.opentelemetry.io/otel/.gitmodules b/vendor/go.opentelemetry.io/otel/.gitmodules new file mode 100644 index 000000000..38a1f5698 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.gitmodules @@ -0,0 +1,3 @@ +[submodule "opentelemetry-proto"] + path = exporters/otlp/internal/opentelemetry-proto + url = https://github.com/open-telemetry/opentelemetry-proto diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml new file mode 100644 index 000000000..a62511f38 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -0,0 +1,296 @@ +# See https://github.com/golangci/golangci-lint#config-file +run: + issues-exit-code: 1 #Default + tests: true #Default + +linters: + # Disable everything by default so upgrades to not include new "default + # enabled" linters. + disable-all: true + # Specifically enable linters we want to use. + enable: + - depguard + - errcheck + - godot + - gofumpt + - goimports + - gosec + - gosimple + - govet + - ineffassign + - misspell + - revive + - staticcheck + - typecheck + - unused + +issues: + # Maximum issues count per one linter. + # Set to 0 to disable. + # Default: 50 + # Setting to unlimited so the linter only is run once to debug all issues. + max-issues-per-linter: 0 + # Maximum count of issues with the same text. + # Set to 0 to disable. + # Default: 3 + # Setting to unlimited so the linter only is run once to debug all issues. + max-same-issues: 0 + # Excluding configuration per-path, per-linter, per-text and per-source. + exclude-rules: + # TODO: Having appropriate comments for exported objects helps development, + # even for objects in internal packages. Appropriate comments for all + # exported objects should be added and this exclusion removed. + - path: '.*internal/.*' + text: "exported (method|function|type|const) (.+) should have comment or be unexported" + linters: + - revive + # Yes, they are, but it's okay in a test. + - path: _test\.go + text: "exported func.*returns unexported type.*which can be annoying to use" + linters: + - revive + # Example test functions should be treated like main. + - path: example.*_test\.go + text: "calls to (.+) only in main[(][)] or init[(][)] functions" + linters: + - revive + # It's okay to not run gosec in a test. + - path: _test\.go + linters: + - gosec + # Igonoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) + # as we commonly use it in tests and examples. + - text: "G404:" + linters: + - gosec + # Igonoring gosec G402: TLS MinVersion too low + # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well. + - text: "G402: TLS MinVersion too low." + linters: + - gosec + include: + # revive exported should have comment or be unexported. + - EXC0012 + # revive package comment should be of the form ... + - EXC0013 + +linters-settings: + depguard: + rules: + non-tests: + files: + - "!$test" + - "!**/*test/*.go" + - "!**/internal/matchers/*.go" + deny: + - pkg: "testing" + - pkg: "github.com/stretchr/testify" + - pkg: "crypto/md5" + - pkg: "crypto/sha1" + - pkg: "crypto/**/pkix" + otlp-internal: + files: + - "!**/exporters/otlp/internal/**/*.go" + deny: + - pkg: "go.opentelemetry.io/otel/exporters/otlp/internal" + desc: Do not use cross-module internal packages. + otlptrace-internal: + files: + - "!**/exporters/otlp/otlptrace/*.go" + - "!**/exporters/otlp/otlptrace/internal/**.go" + deny: + - pkg: "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal" + desc: Do not use cross-module internal packages. + otlpmetric-internal: + files: + - "!**/exporters/otlp/otlpmetric/internal/*.go" + - "!**/exporters/otlp/otlpmetric/internal/**/*.go" + deny: + - pkg: "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal" + desc: Do not use cross-module internal packages. + otel-internal: + files: + - "**/sdk/*.go" + - "**/sdk/**/*.go" + - "**/exporters/*.go" + - "**/exporters/**/*.go" + - "**/schema/*.go" + - "**/schema/**/*.go" + - "**/metric/*.go" + - "**/metric/**/*.go" + - "**/bridge/*.go" + - "**/bridge/**/*.go" + - "**/example/*.go" + - "**/example/**/*.go" + - "**/trace/*.go" + - "**/trace/**/*.go" + deny: + - pkg: "go.opentelemetry.io/otel/internal$" + desc: Do not use cross-module internal packages. + - pkg: "go.opentelemetry.io/otel/internal/attribute" + desc: Do not use cross-module internal packages. + - pkg: "go.opentelemetry.io/otel/internal/internaltest" + desc: Do not use cross-module internal packages. + - pkg: "go.opentelemetry.io/otel/internal/matchers" + desc: Do not use cross-module internal packages. + godot: + exclude: + # Exclude links. + - '^ *\[[^]]+\]:' + # Exclude sentence fragments for lists. + - '^[ ]*[-•]' + # Exclude sentences prefixing a list. + - ':$' + goimports: + local-prefixes: go.opentelemetry.io + misspell: + locale: US + ignore-words: + - cancelled + revive: + # Sets the default failure confidence. + # This means that linting errors with less than 0.8 confidence will be ignored. + # Default: 0.8 + confidence: 0.01 + rules: + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#blank-imports + - name: blank-imports + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#bool-literal-in-expr + - name: bool-literal-in-expr + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#constant-logical-expr + - name: constant-logical-expr + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-as-argument + # TODO (#3372) re-enable linter when it is compatible. https://github.com/golangci/golangci-lint/issues/3280 + - name: context-as-argument + disabled: true + arguments: + allowTypesBefore: "*testing.T" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-keys-type + - name: context-keys-type + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#deep-exit + - name: deep-exit + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#defer + - name: defer + disabled: false + arguments: + - ["call-chain", "loop"] + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#dot-imports + - name: dot-imports + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#duplicated-imports + - name: duplicated-imports + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#early-return + - name: early-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-block + - name: empty-block + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-lines + - name: empty-lines + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-naming + - name: error-naming + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-return + - name: error-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-strings + - name: error-strings + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#errorf + - name: errorf + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#exported + - name: exported + disabled: false + arguments: + - "sayRepetitiveInsteadOfStutters" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#flag-parameter + - name: flag-parameter + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#identical-branches + - name: identical-branches + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#if-return + - name: if-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#increment-decrement + - name: increment-decrement + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#indent-error-flow + - name: indent-error-flow + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#import-shadowing + - name: import-shadowing + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#package-comments + - name: package-comments + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range + - name: range + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-in-closure + - name: range-val-in-closure + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-address + - name: range-val-address + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#redefines-builtin-id + - name: redefines-builtin-id + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#string-format + - name: string-format + disabled: false + arguments: + - - panic + - '/^[^\n]*$/' + - must not contain line breaks + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#struct-tag + - name: struct-tag + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#superfluous-else + - name: superfluous-else + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#time-equal + - name: time-equal + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-naming + - name: var-naming + disabled: false + arguments: + - ["ID"] # AllowList + - ["Otel", "Aws", "Gcp"] # DenyList + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-declaration + - name: var-declaration + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unconditional-recursion + - name: unconditional-recursion + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unexported-return + - name: unexported-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unhandled-error + - name: unhandled-error + disabled: false + arguments: + - "fmt.Fprint" + - "fmt.Fprintf" + - "fmt.Fprintln" + - "fmt.Print" + - "fmt.Printf" + - "fmt.Println" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unnecessary-stmt + - name: unnecessary-stmt + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#useless-break + - name: useless-break + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value + - name: waitgroup-by-value + disabled: false diff --git a/vendor/go.opentelemetry.io/otel/.lycheeignore b/vendor/go.opentelemetry.io/otel/.lycheeignore new file mode 100644 index 000000000..40d62fa2e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.lycheeignore @@ -0,0 +1,6 @@ +http://localhost +http://jaeger-collector +https://github.com/open-telemetry/opentelemetry-go/milestone/ +https://github.com/open-telemetry/opentelemetry-go/projects +file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries +file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual diff --git a/vendor/go.opentelemetry.io/otel/.markdownlint.yaml b/vendor/go.opentelemetry.io/otel/.markdownlint.yaml new file mode 100644 index 000000000..3202496c3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.markdownlint.yaml @@ -0,0 +1,29 @@ +# Default state for all rules +default: true + +# ul-style +MD004: false + +# hard-tabs +MD010: false + +# line-length +MD013: false + +# no-duplicate-header +MD024: + siblings_only: true + +#single-title +MD025: false + +# ol-prefix +MD029: + style: ordered + +# no-inline-html +MD033: false + +# fenced-code-language +MD040: false + diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md new file mode 100644 index 000000000..fe670d79c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -0,0 +1,2859 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). + +This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +## [1.22.0/0.45.0] 2024-01-17 + +### Added + +- The `go.opentelemetry.io/otel/semconv/v1.22.0` package. + The package contains semantic conventions from the `v1.22.0` version of the OpenTelemetry Semantic Conventions. (#4735) +- The `go.opentelemetry.io/otel/semconv/v1.23.0` package. + The package contains semantic conventions from the `v1.23.0` version of the OpenTelemetry Semantic Conventions. (#4746) +- The `go.opentelemetry.io/otel/semconv/v1.23.1` package. + The package contains semantic conventions from the `v1.23.1` version of the OpenTelemetry Semantic Conventions. (#4749) +- The `go.opentelemetry.io/otel/semconv/v1.24.0` package. + The package contains semantic conventions from the `v1.24.0` version of the OpenTelemetry Semantic Conventions. (#4770) +- Add `WithResourceAsConstantLabels` option to apply resource attributes for every metric emitted by the Prometheus exporter. (#4733) +- Experimental cardinality limiting is added to the metric SDK. + See [metric documentation](./sdk/metric/EXPERIMENTAL.md#cardinality-limit) for more information about this feature and how to enable it. (#4457) +- Add `NewMemberRaw` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage`. (#4804) + +### Changed + +- Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.24.0`. (#4754) +- Update transformations in `go.opentelemetry.io/otel/exporters/zipkin` to follow `v1.19.0` version of the OpenTelemetry specification. (#4754) +- Record synchronous measurements when the passed context is canceled instead of dropping in `go.opentelemetry.io/otel/sdk/metric`. + If you do not want to make a measurement when the context is cancelled, you need to handle it yourself (e.g `if ctx.Err() != nil`). (#4671) +- Improve `go.opentelemetry.io/otel/trace.TraceState`'s performance. (#4722) +- Improve `go.opentelemetry.io/otel/propagation.TraceContext`'s performance. (#4721) +- Improve `go.opentelemetry.io/otel/baggage` performance. (#4743) +- Improve performance of the `(*Set).Filter` method in `go.opentelemetry.io/otel/attribute` when the passed filter does not filter out any attributes from the set. (#4774) +- `Member.String` in `go.opentelemetry.io/otel/baggage` percent-encodes only when necessary. (#4775) +- Improve `go.opentelemetry.io/otel/trace.Span`'s performance when adding multiple attributes. (#4818) +- `Property.Value` in `go.opentelemetry.io/otel/baggage` now returns a raw string instead of a percent-encoded value. (#4804) + +### Fixed + +- Fix `Parse` in `go.opentelemetry.io/otel/baggage` to validate member value before percent-decoding. (#4755) +- Fix whitespace encoding of `Member.String` in `go.opentelemetry.io/otel/baggage`. (#4756) +- Fix observable not registered error when the asynchronous instrument has a drop aggregation in `go.opentelemetry.io/otel/sdk/metric`. (#4772) +- Fix baggage item key so that it is not canonicalized in `go.opentelemetry.io/otel/bridge/opentracing`. (#4776) +- Fix `go.opentelemetry.io/otel/bridge/opentracing` to properly handle baggage values that requires escaping during propagation. (#4804) +- Fix a bug where using multiple readers resulted in incorrect asynchronous counter values in `go.opentelemetry.io/otel/sdk/metric`. (#4742) + +## [1.21.0/0.44.0] 2023-11-16 + +### Removed + +- Remove the deprecated `go.opentelemetry.io/otel/bridge/opencensus.NewTracer`. (#4706) +- Remove the deprecated `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` module. (#4707) +- Remove the deprecated `go.opentelemetry.io/otel/example/view` module. (#4708) +- Remove the deprecated `go.opentelemetry.io/otel/example/fib` module. (#4723) + +### Fixed + +- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4719) +- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4719) + +## [1.20.0/0.43.0] 2023-11-10 + +This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementors need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this. + +### Added + +- Add `go.opentelemetry.io/otel/bridge/opencensus.InstallTraceBridge`, which installs the OpenCensus trace bridge, and replaces `opencensus.NewTracer`. (#4567) +- Add scope version to trace and metric bridges in `go.opentelemetry.io/otel/bridge/opencensus`. (#4584) +- Add the `go.opentelemetry.io/otel/trace/embedded` package to be embedded in the exported trace API interfaces. (#4620) +- Add the `go.opentelemetry.io/otel/trace/noop` package as a default no-op implementation of the trace API. (#4620) +- Add context propagation in `go.opentelemetry.io/otel/example/dice`. (#4644) +- Add view configuration to `go.opentelemetry.io/otel/example/prometheus`. (#4649) +- Add `go.opentelemetry.io/otel/metric.WithExplicitBucketBoundaries`, which allows defining default explicit bucket boundaries when creating histogram instruments. (#4603) +- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4660) +- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4660) +- Add Summary, SummaryDataPoint, and QuantileValue to `go.opentelemetry.io/sdk/metric/metricdata`. (#4622) +- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` now supports exemplars from OpenCensus. (#4585) +- Add support for `WithExplicitBucketBoundaries` in `go.opentelemetry.io/otel/sdk/metric`. (#4605) +- Add support for Summary metrics in `go.opentelemetry.io/otel/bridge/opencensus`. (#4668) + +### Deprecated + +- Deprecate `go.opentelemetry.io/otel/bridge/opencensus.NewTracer` in favor of `opencensus.InstallTraceBridge`. (#4567) +- Deprecate `go.opentelemetry.io/otel/example/fib` package is in favor of `go.opentelemetry.io/otel/example/dice`. (#4618) +- Deprecate `go.opentelemetry.io/otel/trace.NewNoopTracerProvider`. + Use the added `NewTracerProvider` function in `go.opentelemetry.io/otel/trace/noop` instead. (#4620) +- Deprecate `go.opentelemetry.io/otel/example/view` package in favor of `go.opentelemetry.io/otel/example/prometheus`. (#4649) +- Deprecate `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4693) + +### Changed + +- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` returns a `*MetricProducer` struct instead of the metric.Producer interface. (#4583) +- The `TracerProvider` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.TracerProvider` type. + This extends the `TracerProvider` interface and is is a breaking change for any existing implementation. + Implementors need to update their implementations based on what they want the default behavior of the interface to be. + See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) +- The `Tracer` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Tracer` type. + This extends the `Tracer` interface and is is a breaking change for any existing implementation. + Implementors need to update their implementations based on what they want the default behavior of the interface to be. + See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) +- The `Span` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Span` type. + This extends the `Span` interface and is is a breaking change for any existing implementation. + Implementors need to update their implementations based on what they want the default behavior of the interface to be. + See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) +- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4670) +- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4670) +- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4669) +- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4669) +- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4679) +- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4679) + +### Fixed + +- Fix improper parsing of characters such us `+`, `/` by `Parse` in `go.opentelemetry.io/otel/baggage` as they were rendered as a whitespace. (#4667) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_RESOURCE_ATTRIBUTES` in `go.opentelemetry.io/otel/sdk/resource` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracegrpc` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp` as they were rendered as a whitespace. (#4699) +- In `go.opentelemetry.op/otel/exporters/prometheus`, the exporter no longer `Collect`s metrics after `Shutdown` is invoked. (#4648) +- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4695) +- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4695) + +## [1.19.0/0.42.0/0.0.7] 2023-09-28 + +This release contains the first stable release of the OpenTelemetry Go [metric SDK]. +Our project stability guarantees now apply to the `go.opentelemetry.io/otel/sdk/metric` package. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- Add the "Roll the dice" getting started application example in `go.opentelemetry.io/otel/example/dice`. (#4539) +- The `WithWriter` and `WithPrettyPrint` options to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to set a custom `io.Writer`, and allow displaying the output in human-readable JSON. (#4507) + +### Changed + +- Allow '/' characters in metric instrument names. (#4501) +- The exporter in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` does not prettify its output by default anymore. (#4507) +- Upgrade `gopkg.io/yaml` from `v2` to `v3` in `go.opentelemetry.io/otel/schema`. (#4535) + +### Fixed + +- In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the Prometheus metric on every `Collect` if we know the scope is invalid. (#4499) + +### Removed + +- Remove `"go.opentelemetry.io/otel/bridge/opencensus".NewMetricExporter`, which is replaced by `NewMetricProducer`. (#4566) + +## [1.19.0-rc.1/0.42.0-rc.1] 2023-09-14 + +This is a release candidate for the v1.19.0/v0.42.0 release. +That release is expected to include the `v1` release of the OpenTelemetry Go metric SDK and will provide stability guarantees of that SDK. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Changed + +- Allow '/' characters in metric instrument names. (#4501) + +### Fixed + +- In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the prometheus metric on every `Collect` if we know the scope is invalid. (#4499) + +## [1.18.0/0.41.0/0.0.6] 2023-09-12 + +This release drops the compatibility guarantee of [Go 1.19]. + +### Added + +- Add `WithProducer` option in `go.opentelemetry.op/otel/exporters/prometheus` to restore the ability to register producers on the prometheus exporter's manual reader. (#4473) +- Add `IgnoreValue` option in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest` to allow ignoring values when comparing metrics. (#4447) + +### Changed + +- Use a `TestingT` interface instead of `*testing.T` struct in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest`. (#4483) + +### Deprecated + +- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` was deprecated in `v0.35.0` (#3541). + The deprecation notice format for the function has been corrected to trigger Go documentation and build tooling. (#4470) + +### Removed + +- Removed the deprecated `go.opentelemetry.io/otel/exporters/jaeger` package. (#4467) +- Removed the deprecated `go.opentelemetry.io/otel/example/jaeger` package. (#4467) +- Removed the deprecated `go.opentelemetry.io/otel/sdk/metric/aggregation` package. (#4468) +- Removed the deprecated internal packages in `go.opentelemetry.io/otel/exporters/otlp` and its sub-packages. (#4469) +- Dropped guaranteed support for versions of Go less than 1.20. (#4481) + +## [1.17.0/0.40.0/0.0.5] 2023-08-28 + +### Added + +- Export the `ManualReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244) +- Export the `PeriodicReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244) +- Add support for exponential histogram aggregations. + A histogram can be configured as an exponential histogram using a view with `"go.opentelemetry.io/otel/sdk/metric".ExponentialHistogram` as the aggregation. (#4245) +- Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4272) +- Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4272) +- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE` environment variable. (#4287) +- Add `WithoutCounterSuffixes` option in `go.opentelemetry.io/otel/exporters/prometheus` to disable addition of `_total` suffixes. (#4306) +- Add info and debug logging to the metric SDK in `go.opentelemetry.io/otel/sdk/metric`. (#4315) +- The `go.opentelemetry.io/otel/semconv/v1.21.0` package. + The package contains semantic conventions from the `v1.21.0` version of the OpenTelemetry Semantic Conventions. (#4362) +- Accept 201 to 299 HTTP status as success in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4365) +- Document the `Temporality` and `Aggregation` methods of the `"go.opentelemetry.io/otel/sdk/metric".Exporter"` need to be concurrent safe. (#4381) +- Expand the set of units supported by the Prometheus exporter, and don't add unit suffixes if they are already present in `go.opentelemetry.op/otel/exporters/prometheus` (#4374) +- Move the `Aggregation` interface and its implementations from `go.opentelemetry.io/otel/sdk/metric/aggregation` to `go.opentelemetry.io/otel/sdk/metric`. (#4435) +- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION` environment variable. (#4437) +- Add the `NewAllowKeysFilter` and `NewDenyKeysFilter` functions to `go.opentelemetry.io/otel/attribute` to allow convenient creation of allow-keys and deny-keys filters. (#4444) +- Support Go 1.21. (#4463) + +### Changed + +- Starting from `v1.21.0` of semantic conventions, `go.opentelemetry.io/otel/semconv/{version}/httpconv` and `go.opentelemetry.io/otel/semconv/{version}/netconv` packages will no longer be published. (#4145) +- Log duplicate instrument conflict at a warning level instead of info in `go.opentelemetry.io/otel/sdk/metric`. (#4202) +- Return an error on the creation of new instruments in `go.opentelemetry.io/otel/sdk/metric` if their name doesn't pass regexp validation. (#4210) +- `NewManualReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*ManualReader` instead of `Reader`. (#4244) +- `NewPeriodicReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*PeriodicReader` instead of `Reader`. (#4244) +- Count the Collect time in the `PeriodicReader` timeout in `go.opentelemetry.io/otel/sdk/metric`. (#4221) +- The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272) +- The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272) +- If an attribute set is omitted from an async callback, the previous value will no longer be exported in `go.opentelemetry.io/otel/sdk/metric`. (#4290) +- If an attribute set is observed multiple times in an async callback in `go.opentelemetry.io/otel/sdk/metric`, the values will be summed instead of the last observation winning. (#4289) +- Allow the explicit bucket histogram aggregation to be used for the up-down counter, observable counter, observable up-down counter, and observable gauge in the `go.opentelemetry.io/otel/sdk/metric` package. (#4332) +- Restrict `Meter`s in `go.opentelemetry.io/otel/sdk/metric` to only register and collect instruments it created. (#4333) +- `PeriodicReader.Shutdown` and `PeriodicReader.ForceFlush` in `go.opentelemetry.io/otel/sdk/metric` now apply the periodic reader's timeout to the operation if the user provided context does not contain a deadline. (#4356, #4377) +- Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.21.0`. (#4408) +- Increase instrument name maximum length from 63 to 255 characters in `go.opentelemetry.io/otel/sdk/metric`. (#4434) +- Add `go.opentelemetry.op/otel/sdk/metric.WithProducer` as an `Option` for `"go.opentelemetry.io/otel/sdk/metric".NewManualReader` and `"go.opentelemetry.io/otel/sdk/metric".NewPeriodicReader`. (#4346) + +### Removed + +- Remove `Reader.RegisterProducer` in `go.opentelemetry.io/otel/metric`. + Use the added `WithProducer` option instead. (#4346) +- Remove `Reader.ForceFlush` in `go.opentelemetry.io/otel/metric`. + Notice that `PeriodicReader.ForceFlush` is still available. (#4375) + +### Fixed + +- Correctly format log messages from the `go.opentelemetry.io/otel/exporters/zipkin` exporter. (#4143) +- Log an error for calls to `NewView` in `go.opentelemetry.io/otel/sdk/metric` that have empty criteria. (#4307) +- Fix `"go.opentelemetry.io/otel/sdk/resource".WithHostID()` to not set an empty `host.id`. (#4317) +- Use the instrument identifying fields to cache aggregators and determine duplicate instrument registrations in `go.opentelemetry.io/otel/sdk/metric`. (#4337) +- Detect duplicate instruments for case-insensitive names in `go.opentelemetry.io/otel/sdk/metric`. (#4338) +- The `ManualReader` will not panic if `AggregationSelector` returns `nil` in `go.opentelemetry.io/otel/sdk/metric`. (#4350) +- If a `Reader`'s `AggregationSelector` returns `nil` or `DefaultAggregation` the pipeline will use the default aggregation. (#4350) +- Log a suggested view that fixes instrument conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4349) +- Fix possible panic, deadlock and race condition in batch span processor in `go.opentelemetry.io/otel/sdk/trace`. (#4353) +- Improve context cancellation handling in batch span processor's `ForceFlush` in `go.opentelemetry.io/otel/sdk/trace`. (#4369) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` using gotmpl. (#4397, #3846) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4404, #3846) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4407, #3846) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4400, #3846) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4401, #3846) +- Do not block the metric SDK when OTLP metric exports are blocked in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#3925, #4395) +- Do not append `_total` if the counter already has that suffix for the Prometheus exproter in `go.opentelemetry.io/otel/exporter/prometheus`. (#4373) +- Fix resource detection data race in `go.opentelemetry.io/otel/sdk/resource`. (#4409) +- Use the first-seen instrument name during instrument name conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4428) + +### Deprecated + +- The `go.opentelemetry.io/otel/exporters/jaeger` package is deprecated. + OpenTelemetry dropped support for Jaeger exporter in July 2023. + Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` + or `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` instead. (#4423) +- The `go.opentelemetry.io/otel/example/jaeger` package is deprecated. (#4423) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` package is deprecated. (#4420) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf` package is deprecated. (#4420) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otest` package is deprecated. (#4420) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform` package is deprecated. (#4420) +- The `go.opentelemetry.io/otel/exporters/otlp/internal` package is deprecated. (#4421) +- The `go.opentelemetry.io/otel/exporters/otlp/internal/envconfig` package is deprecated. (#4421) +- The `go.opentelemetry.io/otel/exporters/otlp/internal/retry` package is deprecated. (#4421) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/envconfig` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlptracetest` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/retry` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/sdk/metric/aggregation` package is deprecated. + Use the aggregation types added to `go.opentelemetry.io/otel/sdk/metric` instead. (#4435) + +## [1.16.0/0.39.0] 2023-05-18 + +This release contains the first stable release of the OpenTelemetry Go [metric API]. +Our project stability guarantees now apply to the `go.opentelemetry.io/otel/metric` package. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- The `go.opentelemetry.io/otel/semconv/v1.19.0` package. + The package contains semantic conventions from the `v1.19.0` version of the OpenTelemetry specification. (#3848) +- The `go.opentelemetry.io/otel/semconv/v1.20.0` package. + The package contains semantic conventions from the `v1.20.0` version of the OpenTelemetry specification. (#4078) +- The Exponential Histogram data types in `go.opentelemetry.io/otel/sdk/metric/metricdata`. (#4165) +- OTLP metrics exporter now supports the Exponential Histogram Data Type. (#4222) +- Fix serialization of `time.Time` zero values in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` packages. (#4271) + +### Changed + +- Use `strings.Cut()` instead of `string.SplitN()` for better readability and memory use. (#4049) +- `MeterProvider` returns noop meters once it has been shutdown. (#4154) + +### Removed + +- The deprecated `go.opentelemetry.io/otel/metric/instrument` package is removed. + Use `go.opentelemetry.io/otel/metric` instead. (#4055) + +### Fixed + +- Fix build for BSD based systems in `go.opentelemetry.io/otel/sdk/resource`. (#4077) + +## [1.16.0-rc.1/0.39.0-rc.1] 2023-05-03 + +This is a release candidate for the v1.16.0/v0.39.0 release. +That release is expected to include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- Support global `MeterProvider` in `go.opentelemetry.io/otel`. (#4039) + - Use `Meter` for a `metric.Meter` from the global `metric.MeterProvider`. + - Use `GetMeterProivder` for a global `metric.MeterProvider`. + - Use `SetMeterProivder` to set the global `metric.MeterProvider`. + +### Changed + +- Move the `go.opentelemetry.io/otel/metric` module to the `stable-v1` module set. + This stages the metric API to be released as a stable module. (#4038) + +### Removed + +- The `go.opentelemetry.io/otel/metric/global` package is removed. + Use `go.opentelemetry.io/otel` instead. (#4039) + +## [1.15.1/0.38.1] 2023-05-02 + +### Fixed + +- Remove unused imports from `sdk/resource/host_id_bsd.go` which caused build failures. (#4040, #4041) + +## [1.15.0/0.38.0] 2023-04-27 + +### Added + +- The `go.opentelemetry.io/otel/metric/embedded` package. (#3916) +- The `Version` function to `go.opentelemetry.io/otel/sdk` to return the SDK version. (#3949) +- Add a `WithNamespace` option to `go.opentelemetry.io/otel/exporters/prometheus` to allow users to prefix metrics with a namespace. (#3970) +- The following configuration types were added to `go.opentelemetry.io/otel/metric/instrument` to be used in the configuration of measurement methods. (#3971) + - The `AddConfig` used to hold configuration for addition measurements + - `NewAddConfig` used to create a new `AddConfig` + - `AddOption` used to configure an `AddConfig` + - The `RecordConfig` used to hold configuration for recorded measurements + - `NewRecordConfig` used to create a new `RecordConfig` + - `RecordOption` used to configure a `RecordConfig` + - The `ObserveConfig` used to hold configuration for observed measurements + - `NewObserveConfig` used to create a new `ObserveConfig` + - `ObserveOption` used to configure an `ObserveConfig` +- `WithAttributeSet` and `WithAttributes` are added to `go.opentelemetry.io/otel/metric/instrument`. + They return an option used during a measurement that defines the attribute Set associated with the measurement. (#3971) +- The `Version` function to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` to return the OTLP metrics client version. (#3956) +- The `Version` function to `go.opentelemetry.io/otel/exporters/otlp/otlptrace` to return the OTLP trace client version. (#3956) + +### Changed + +- The `Extrema` in `go.opentelemetry.io/otel/sdk/metric/metricdata` is redefined with a generic argument of `[N int64 | float64]`. (#3870) +- Update all exported interfaces from `go.opentelemetry.io/otel/metric` to embed their corresponding interface from `go.opentelemetry.io/otel/metric/embedded`. + This adds an implementation requirement to set the interface default behavior for unimplemented methods. (#3916) +- Move No-Op implementation from `go.opentelemetry.io/otel/metric` into its own package `go.opentelemetry.io/otel/metric/noop`. (#3941) + - `metric.NewNoopMeterProvider` is replaced with `noop.NewMeterProvider` +- Add all the methods from `"go.opentelemetry.io/otel/trace".SpanContext` to `bridgeSpanContext` by embedding `otel.SpanContext` in `bridgeSpanContext`. (#3966) +- Wrap `UploadMetrics` error in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/` to improve error message when encountering generic grpc errors. (#3974) +- The measurement methods for all instruments in `go.opentelemetry.io/otel/metric/instrument` accept an option instead of the variadic `"go.opentelemetry.io/otel/attribute".KeyValue`. (#3971) + - The `Int64Counter.Add` method now accepts `...AddOption` + - The `Float64Counter.Add` method now accepts `...AddOption` + - The `Int64UpDownCounter.Add` method now accepts `...AddOption` + - The `Float64UpDownCounter.Add` method now accepts `...AddOption` + - The `Int64Histogram.Record` method now accepts `...RecordOption` + - The `Float64Histogram.Record` method now accepts `...RecordOption` + - The `Int64Observer.Observe` method now accepts `...ObserveOption` + - The `Float64Observer.Observe` method now accepts `...ObserveOption` +- The `Observer` methods in `go.opentelemetry.io/otel/metric` accept an option instead of the variadic `"go.opentelemetry.io/otel/attribute".KeyValue`. (#3971) + - The `Observer.ObserveInt64` method now accepts `...ObserveOption` + - The `Observer.ObserveFloat64` method now accepts `...ObserveOption` +- Move global metric back to `go.opentelemetry.io/otel/metric/global` from `go.opentelemetry.io/otel`. (#3986) + +### Fixed + +- `TracerProvider` allows calling `Tracer()` while it's shutting down. + It used to deadlock. (#3924) +- Use the SDK version for the Telemetry SDK resource detector in `go.opentelemetry.io/otel/sdk/resource`. (#3949) +- Fix a data race in `SpanProcessor` returned by `NewSimpleSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace`. (#3951) +- Automatically figure out the default aggregation with `aggregation.Default`. (#3967) + +### Deprecated + +- The `go.opentelemetry.io/otel/metric/instrument` package is deprecated. + Use the equivalent types added to `go.opentelemetry.io/otel/metric` instead. (#4018) + +## [1.15.0-rc.2/0.38.0-rc.2] 2023-03-23 + +This is a release candidate for the v1.15.0/v0.38.0 release. +That release will include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- The `WithHostID` option to `go.opentelemetry.io/otel/sdk/resource`. (#3812) +- The `WithoutTimestamps` option to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to sets all timestamps to zero. (#3828) +- The new `Exemplar` type is added to `go.opentelemetry.io/otel/sdk/metric/metricdata`. + Both the `DataPoint` and `HistogramDataPoint` types from that package have a new field of `Exemplars` containing the sampled exemplars for their timeseries. (#3849) +- Configuration for each metric instrument in `go.opentelemetry.io/otel/sdk/metric/instrument`. (#3895) +- The internal logging introduces a warning level verbosity equal to `V(1)`. (#3900) +- Added a log message warning about usage of `SimpleSpanProcessor` in production environments. (#3854) + +### Changed + +- Optimize memory allocation when creation a new `Set` using `NewSet` or `NewSetWithFiltered` in `go.opentelemetry.io/otel/attribute`. (#3832) +- Optimize memory allocation when creation new metric instruments in `go.opentelemetry.io/otel/sdk/metric`. (#3832) +- Avoid creating new objects on all calls to `WithDeferredSetup` and `SkipContextSetup` in OpenTracing bridge. (#3833) +- The `New` and `Detect` functions from `go.opentelemetry.io/otel/sdk/resource` return errors that wrap underlying errors instead of just containing the underlying error strings. (#3844) +- Both the `Histogram` and `HistogramDataPoint` are redefined with a generic argument of `[N int64 | float64]` in `go.opentelemetry.io/otel/sdk/metric/metricdata`. (#3849) +- The metric `Export` interface from `go.opentelemetry.io/otel/sdk/metric` accepts a `*ResourceMetrics` instead of `ResourceMetrics`. (#3853) +- Rename `Asynchronous` to `Observable` in `go.opentelemetry.io/otel/metric/instrument`. (#3892) +- Rename `Int64ObserverOption` to `Int64ObservableOption` in `go.opentelemetry.io/otel/metric/instrument`. (#3895) +- Rename `Float64ObserverOption` to `Float64ObservableOption` in `go.opentelemetry.io/otel/metric/instrument`. (#3895) +- The internal logging changes the verbosity level of info to `V(4)`, the verbosity level of debug to `V(8)`. (#3900) + +### Fixed + +- `TracerProvider` consistently doesn't allow to register a `SpanProcessor` after shutdown. (#3845) + +### Removed + +- The deprecated `go.opentelemetry.io/otel/metric/global` package is removed. (#3829) +- The unneeded `Synchronous` interface in `go.opentelemetry.io/otel/metric/instrument` was removed. (#3892) +- The `Float64ObserverConfig` and `NewFloat64ObserverConfig` in `go.opentelemetry.io/otel/sdk/metric/instrument`. + Use the added `float64` instrument configuration instead. (#3895) +- The `Int64ObserverConfig` and `NewInt64ObserverConfig` in `go.opentelemetry.io/otel/sdk/metric/instrument`. + Use the added `int64` instrument configuration instead. (#3895) +- The `NewNoopMeter` function in `go.opentelemetry.io/otel/metric`, use `NewMeterProvider().Meter("")` instead. (#3893) + +## [1.15.0-rc.1/0.38.0-rc.1] 2023-03-01 + +This is a release candidate for the v1.15.0/v0.38.0 release. +That release will include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +This release drops the compatibility guarantee of [Go 1.18]. + +### Added + +- Support global `MeterProvider` in `go.opentelemetry.io/otel`. (#3818) + - Use `Meter` for a `metric.Meter` from the global `metric.MeterProvider`. + - Use `GetMeterProivder` for a global `metric.MeterProvider`. + - Use `SetMeterProivder` to set the global `metric.MeterProvider`. + +### Changed + +- Dropped compatibility testing for [Go 1.18]. + The project no longer guarantees support for this version of Go. (#3813) + +### Fixed + +- Handle empty environment variable as it they were not set. (#3764) +- Clarify the `httpconv` and `netconv` packages in `go.opentelemetry.io/otel/semconv/*` provide tracing semantic conventions. (#3823) +- Fix race conditions in `go.opentelemetry.io/otel/exporters/metric/prometheus` that could cause a panic. (#3899) +- Fix sending nil `scopeInfo` to metrics channel in `go.opentelemetry.io/otel/exporters/metric/prometheus` that could cause a panic in `github.com/prometheus/client_golang/prometheus`. (#3899) + +### Deprecated + +- The `go.opentelemetry.io/otel/metric/global` package is deprecated. + Use `go.opentelemetry.io/otel` instead. (#3818) + +### Removed + +- The deprecated `go.opentelemetry.io/otel/metric/unit` package is removed. (#3814) + +## [1.14.0/0.37.0/0.0.4] 2023-02-27 + +This release is the last to support [Go 1.18]. +The next release will require at least [Go 1.19]. + +### Added + +- The `event` type semantic conventions are added to `go.opentelemetry.io/otel/semconv/v1.17.0`. (#3697) +- Support [Go 1.20]. (#3693) +- The `go.opentelemetry.io/otel/semconv/v1.18.0` package. + The package contains semantic conventions from the `v1.18.0` version of the OpenTelemetry specification. (#3719) + - The following `const` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included: + - `OtelScopeNameKey` -> `OTelScopeNameKey` + - `OtelScopeVersionKey` -> `OTelScopeVersionKey` + - `OtelLibraryNameKey` -> `OTelLibraryNameKey` + - `OtelLibraryVersionKey` -> `OTelLibraryVersionKey` + - `OtelStatusCodeKey` -> `OTelStatusCodeKey` + - `OtelStatusDescriptionKey` -> `OTelStatusDescriptionKey` + - `OtelStatusCodeOk` -> `OTelStatusCodeOk` + - `OtelStatusCodeError` -> `OTelStatusCodeError` + - The following `func` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included: + - `OtelScopeName` -> `OTelScopeName` + - `OtelScopeVersion` -> `OTelScopeVersion` + - `OtelLibraryName` -> `OTelLibraryName` + - `OtelLibraryVersion` -> `OTelLibraryVersion` + - `OtelStatusDescription` -> `OTelStatusDescription` +- A `IsSampled` method is added to the `SpanContext` implementation in `go.opentelemetry.io/otel/bridge/opentracing` to expose the span sampled state. + See the [README](./bridge/opentracing/README.md) for more information. (#3570) +- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/metric`. (#3738) +- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/trace`. (#3739) +- The following environment variables are supported by the periodic `Reader` in `go.opentelemetry.io/otel/sdk/metric`. (#3763) + - `OTEL_METRIC_EXPORT_INTERVAL` sets the time between collections and exports. + - `OTEL_METRIC_EXPORT_TIMEOUT` sets the timeout an export is attempted. + +### Changed + +- Fall-back to `TextMapCarrier` when it's not `HttpHeader`s in `go.opentelemetry.io/otel/bridge/opentracing`. (#3679) +- The `Collect` method of the `"go.opentelemetry.io/otel/sdk/metric".Reader` interface is updated to accept the `metricdata.ResourceMetrics` value the collection will be made into. + This change is made to enable memory reuse by SDK users. (#3732) +- The `WithUnit` option in `go.opentelemetry.io/otel/sdk/metric/instrument` is updated to accept a `string` for the unit value. (#3776) + +### Fixed + +- Ensure `go.opentelemetry.io/otel` does not use generics. (#3723, #3725) +- Multi-reader `MeterProvider`s now export metrics for all readers, instead of just the first reader. (#3720, #3724) +- Remove use of deprecated `"math/rand".Seed` in `go.opentelemetry.io/otel/example/prometheus`. (#3733) +- Do not silently drop unknown schema data with `Parse` in `go.opentelemetry.io/otel/schema/v1.1`. (#3743) +- Data race issue in OTLP exporter retry mechanism. (#3755, #3756) +- Wrapping empty errors when exporting in `go.opentelemetry.io/otel/sdk/metric`. (#3698, #3772) +- Incorrect "all" and "resource" definition for schema files in `go.opentelemetry.io/otel/schema/v1.1`. (#3777) + +### Deprecated + +- The `go.opentelemetry.io/otel/metric/unit` package is deprecated. + Use the equivalent unit string instead. (#3776) + - Use `"1"` instead of `unit.Dimensionless` + - Use `"By"` instead of `unit.Bytes` + - Use `"ms"` instead of `unit.Milliseconds` + +## [1.13.0/0.36.0] 2023-02-07 + +### Added + +- Attribute `KeyValue` creations functions to `go.opentelemetry.io/otel/semconv/v1.17.0` for all non-enum semantic conventions. + These functions ensure semantic convention type correctness. (#3675) + +### Fixed + +- Removed the `http.target` attribute from being added by `ServerRequest` in the following packages. (#3687) + - `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.14.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.15.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.16.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.17.0/httpconv` + +### Removed + +- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is removed. (#3631) +- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is removed. (#3631) +- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is removed. (#3631) +- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncint64` package is removed. (#3631) + +## [1.12.0/0.35.0] 2023-01-28 + +### Added + +- The `WithInt64Callback` option to `go.opentelemetry.io/otel/metric/instrument`. + This options is used to configure `int64` Observer callbacks during their creation. (#3507) +- The `WithFloat64Callback` option to `go.opentelemetry.io/otel/metric/instrument`. + This options is used to configure `float64` Observer callbacks during their creation. (#3507) +- The `Producer` interface and `Reader.RegisterProducer(Producer)` to `go.opentelemetry.io/otel/sdk/metric`. + These additions are used to enable external metric Producers. (#3524) +- The `Callback` function type to `go.opentelemetry.io/otel/metric`. + This new named function type is registered with a `Meter`. (#3564) +- The `go.opentelemetry.io/otel/semconv/v1.13.0` package. + The package contains semantic conventions from the `v1.13.0` version of the OpenTelemetry specification. (#3499) + - The `EndUserAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientRequest` and `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPAttributesFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientResponse` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPClientAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPServerAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPServerMetricAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `NetAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `Transport` in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` and `ClientRequest` or `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `SpanStatusFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `SpanStatusFromHTTPStatusCodeAndSpanKind` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `ClientStatus` and `ServerStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `Client` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Conn`. + - The `Server` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Listener`. +- The `go.opentelemetry.io/otel/semconv/v1.14.0` package. + The package contains semantic conventions from the `v1.14.0` version of the OpenTelemetry specification. (#3566) +- The `go.opentelemetry.io/otel/semconv/v1.15.0` package. + The package contains semantic conventions from the `v1.15.0` version of the OpenTelemetry specification. (#3578) +- The `go.opentelemetry.io/otel/semconv/v1.16.0` package. + The package contains semantic conventions from the `v1.16.0` version of the OpenTelemetry specification. (#3579) +- Metric instruments to `go.opentelemetry.io/otel/metric/instrument`. + These instruments are use as replacements of the deprecated `go.opentelemetry.io/otel/metric/instrument/{asyncfloat64,asyncint64,syncfloat64,syncint64}` packages.(#3575, #3586) + - `Float64ObservableCounter` replaces the `asyncfloat64.Counter` + - `Float64ObservableUpDownCounter` replaces the `asyncfloat64.UpDownCounter` + - `Float64ObservableGauge` replaces the `asyncfloat64.Gauge` + - `Int64ObservableCounter` replaces the `asyncint64.Counter` + - `Int64ObservableUpDownCounter` replaces the `asyncint64.UpDownCounter` + - `Int64ObservableGauge` replaces the `asyncint64.Gauge` + - `Float64Counter` replaces the `syncfloat64.Counter` + - `Float64UpDownCounter` replaces the `syncfloat64.UpDownCounter` + - `Float64Histogram` replaces the `syncfloat64.Histogram` + - `Int64Counter` replaces the `syncint64.Counter` + - `Int64UpDownCounter` replaces the `syncint64.UpDownCounter` + - `Int64Histogram` replaces the `syncint64.Histogram` +- `NewTracerProvider` to `go.opentelemetry.io/otel/bridge/opentracing`. + This is used to create `WrapperTracer` instances from a `TracerProvider`. (#3116) +- The `Extrema` type to `go.opentelemetry.io/otel/sdk/metric/metricdata`. + This type is used to represent min/max values and still be able to distinguish unset and zero values. (#3487) +- The `go.opentelemetry.io/otel/semconv/v1.17.0` package. + The package contains semantic conventions from the `v1.17.0` version of the OpenTelemetry specification. (#3599) + +### Changed + +- Jaeger and Zipkin exporter use `github.com/go-logr/logr` as the logging interface, and add the `WithLogr` option. (#3497, #3500) +- Instrument configuration in `go.opentelemetry.io/otel/metric/instrument` is split into specific options and configuration based on the instrument type. (#3507) + - Use the added `Int64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncint64`. + - Use the added `Float64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncfloat64`. + - Use the added `Int64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncint64`. + - Use the added `Float64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncfloat64`. +- Return a `Registration` from the `RegisterCallback` method of a `Meter` in the `go.opentelemetry.io/otel/metric` package. + This `Registration` can be used to unregister callbacks. (#3522) +- Global error handler uses an atomic value instead of a mutex. (#3543) +- Add `NewMetricProducer` to `go.opentelemetry.io/otel/bridge/opencensus`, which can be used to pass OpenCensus metrics to an OpenTelemetry Reader. (#3541) +- Global logger uses an atomic value instead of a mutex. (#3545) +- The `Shutdown` method of the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` releases all computational resources when called the first time. (#3551) +- The `Sampler` returned from `TraceIDRatioBased` `go.opentelemetry.io/otel/sdk/trace` now uses the rightmost bits for sampling decisions. + This fixes random sampling when using ID generators like `xray.IDGenerator` and increasing parity with other language implementations. (#3557) +- Errors from `go.opentelemetry.io/otel/exporters/otlp/otlptrace` exporters are wrapped in errors identifying their signal name. + Existing users of the exporters attempting to identify specific errors will need to use `errors.Unwrap()` to get the underlying error. (#3516) +- Exporters from `go.opentelemetry.io/otel/exporters/otlp` will print the final retryable error message when attempts to retry time out. (#3514) +- The instrument kind names in `go.opentelemetry.io/otel/sdk/metric` are updated to match the API. (#3562) + - `InstrumentKindSyncCounter` is renamed to `InstrumentKindCounter` + - `InstrumentKindSyncUpDownCounter` is renamed to `InstrumentKindUpDownCounter` + - `InstrumentKindSyncHistogram` is renamed to `InstrumentKindHistogram` + - `InstrumentKindAsyncCounter` is renamed to `InstrumentKindObservableCounter` + - `InstrumentKindAsyncUpDownCounter` is renamed to `InstrumentKindObservableUpDownCounter` + - `InstrumentKindAsyncGauge` is renamed to `InstrumentKindObservableGauge` +- The `RegisterCallback` method of the `Meter` in `go.opentelemetry.io/otel/metric` changed. + - The named `Callback` replaces the inline function parameter. (#3564) + - `Callback` is required to return an error. (#3576) + - `Callback` accepts the added `Observer` parameter added. + This new parameter is used by `Callback` implementations to observe values for asynchronous instruments instead of calling the `Observe` method of the instrument directly. (#3584) + - The slice of `instrument.Asynchronous` is now passed as a variadic argument. (#3587) +- The exporter from `go.opentelemetry.io/otel/exporters/zipkin` is updated to use the `v1.16.0` version of semantic conventions. + This means it no longer uses the removed `net.peer.ip` or `http.host` attributes to determine the remote endpoint. + Instead it uses the `net.sock.peer` attributes. (#3581) +- The `Min` and `Max` fields of the `HistogramDataPoint` in `go.opentelemetry.io/otel/sdk/metric/metricdata` are now defined with the added `Extrema` type instead of a `*float64`. (#3487) + +### Fixed + +- Asynchronous instruments that use sum aggregators and attribute filters correctly add values from equivalent attribute sets that have been filtered. (#3439, #3549) +- The `RegisterCallback` method of the `Meter` from `go.opentelemetry.io/otel/sdk/metric` only registers a callback for instruments created by that meter. + Trying to register a callback with instruments from a different meter will result in an error being returned. (#3584) + +### Deprecated + +- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` is deprecated. + Use `NewMetricProducer` instead. (#3541) +- The `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/syncint64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `NewWrappedTracerProvider` in `go.opentelemetry.io/otel/bridge/opentracing` is now deprecated. + Use `NewTracerProvider` instead. (#3116) + +### Removed + +- The deprecated `go.opentelemetry.io/otel/sdk/metric/view` package is removed. (#3520) +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncint64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Int64ObservableCounter` + - The `UpDownCounter` method is replaced by `Meter.Int64ObservableUpDownCounter` + - The `Gauge` method is replaced by `Meter.Int64ObservableGauge` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncfloat64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Float64ObservableCounter` + - The `UpDownCounter` method is replaced by `Meter.Float64ObservableUpDownCounter` + - The `Gauge` method is replaced by `Meter.Float64ObservableGauge` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncint64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Int64Counter` + - The `UpDownCounter` method is replaced by `Meter.Int64UpDownCounter` + - The `Histogram` method is replaced by `Meter.Int64Histogram` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncfloat64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Float64Counter` + - The `UpDownCounter` method is replaced by `Meter.Float64UpDownCounter` + - The `Histogram` method is replaced by `Meter.Float64Histogram` + +## [1.11.2/0.34.0] 2022-12-05 + +### Added + +- The `WithView` `Option` is added to the `go.opentelemetry.io/otel/sdk/metric` package. + This option is used to configure the view(s) a `MeterProvider` will use for all `Reader`s that are registered with it. (#3387) +- Add Instrumentation Scope and Version as info metric and label in Prometheus exporter. + This can be disabled using the `WithoutScopeInfo()` option added to that package.(#3273, #3357) +- OTLP exporters now recognize: (#3363) + - `OTEL_EXPORTER_OTLP_INSECURE` + - `OTEL_EXPORTER_OTLP_TRACES_INSECURE` + - `OTEL_EXPORTER_OTLP_METRICS_INSECURE` + - `OTEL_EXPORTER_OTLP_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE` +- The `View` type and related `NewView` function to create a view according to the OpenTelemetry specification are added to `go.opentelemetry.io/otel/sdk/metric`. + These additions are replacements for the `View` type and `New` function from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459) +- The `Instrument` and `InstrumentKind` type are added to `go.opentelemetry.io/otel/sdk/metric`. + These additions are replacements for the `Instrument` and `InstrumentKind` types from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459) +- The `Stream` type is added to `go.opentelemetry.io/otel/sdk/metric` to define a metric data stream a view will produce. (#3459) +- The `AssertHasAttributes` allows instrument authors to test that datapoints returned have appropriate attributes. (#3487) + +### Changed + +- The `"go.opentelemetry.io/otel/sdk/metric".WithReader` option no longer accepts views to associate with the `Reader`. + Instead, views are now registered directly with the `MeterProvider` via the new `WithView` option. + The views registered with the `MeterProvider` apply to all `Reader`s. (#3387) +- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/sdk/metric".Exporter` interface. (#3260) +- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric".Client` interface. (#3260) +- The `WithTemporalitySelector` and `WithAggregationSelector` `ReaderOption`s have been changed to `ManualReaderOption`s in the `go.opentelemetry.io/otel/sdk/metric` package. (#3260) +- The periodic reader in the `go.opentelemetry.io/otel/sdk/metric` package now uses the temporality and aggregation selectors from its configured exporter instead of accepting them as options. (#3260) + +### Fixed + +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter fixes duplicated `_total` suffixes. (#3369) +- Remove comparable requirement for `Reader`s. (#3387) +- Cumulative metrics from the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) are defined as monotonic sums, instead of non-monotonic. (#3389) +- Asynchronous counters (`Counter` and `UpDownCounter`) from the metric SDK now produce delta sums when configured with delta temporality. (#3398) +- Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340) +- `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436) +- Re-enabled Attribute Filters in the Metric SDK. (#3396) +- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggragation. (#3408) +- Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432) +- Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440) +- Prevent duplicate Prometheus description, unit, and type. (#3469) +- Prevents panic when using incorrect `attribute.Value.As[Type]Slice()`. (#3489) + +### Removed + +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.Client` interface is removed. (#3486) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.New` function is removed. Use the `otlpmetric[http|grpc].New` directly. (#3486) + +### Deprecated + +- The `go.opentelemetry.io/otel/sdk/metric/view` package is deprecated. + Use `Instrument`, `InstrumentKind`, `View`, and `NewView` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3476) + +## [1.11.1/0.33.0] 2022-10-19 + +### Added + +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` registers with a Prometheus registerer on creation. + By default, it will register with the default Prometheus registerer. + A non-default registerer can be used by passing the `WithRegisterer` option. (#3239) +- Added the `WithAggregationSelector` option to the `go.opentelemetry.io/otel/exporters/prometheus` package to change the default `AggregationSelector` used. (#3341) +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` converts the `Resource` associated with metric exports into a `target_info` metric. (#3285) + +### Changed + +- The `"go.opentelemetry.io/otel/exporters/prometheus".New` function is updated to return an error. + It will return an error if the exporter fails to register with Prometheus. (#3239) + +### Fixed + +- The URL-encoded values from the `OTEL_RESOURCE_ATTRIBUTES` environment variable are decoded. (#2963) +- The `baggage.NewMember` function decodes the `value` parameter instead of directly using it. + This fixes the implementation to be compliant with the W3C specification. (#3226) +- Slice attributes of the `attribute` package are now comparable based on their value, not instance. (#3108 #3252) +- The `Shutdown` and `ForceFlush` methods of the `"go.opentelemetry.io/otel/sdk/trace".TraceProvider` no longer return an error when no processor is registered. (#3268) +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` cumulatively sums histogram buckets. (#3281) +- The sum of each histogram data point is now uniquely exported by the `go.opentelemetry.io/otel/exporters/otlpmetric` exporters. (#3284, #3293) +- Recorded values for asynchronous counters (`Counter` and `UpDownCounter`) are interpreted as exact, not incremental, sum values by the metric SDK. (#3350, #3278) +- `UpDownCounters` are now correctly output as Prometheus gauges in the `go.opentelemetry.io/otel/exporters/prometheus` exporter. (#3358) +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` no longer describes the metrics it will send to Prometheus on startup. + Instead the exporter is defined as an "unchecked" collector for Prometheus. + This fixes the `reader is not registered` warning currently emitted on startup. (#3291 #3342) +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now correctly adds `_total` suffixes to counter metrics. (#3360) +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now adds a unit suffix to metric names. + This can be disabled using the `WithoutUnits()` option added to that package. (#3352) + +## [1.11.0/0.32.3] 2022-10-12 + +### Added + +- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlptrace/otlptracehttp`). (#3261) + +### Changed + +- `span.SetStatus` has been updated such that calls that lower the status are now no-ops. (#3214) +- Upgrade `golang.org/x/sys/unix` from `v0.0.0-20210423185535-09eb48e85fd7` to `v0.0.0-20220919091848-fb04ddd9f9c8`. + This addresses [GO-2022-0493](https://pkg.go.dev/vuln/GO-2022-0493). (#3235) + +## [0.32.2] Metric SDK (Alpha) - 2022-10-11 + +### Added + +- Added an example of using metric views to customize instruments. (#3177) +- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetrichttp`). (#3261) + +### Changed + +- Flush pending measurements with the `PeriodicReader` in the `go.opentelemetry.io/otel/sdk/metric` when `ForceFlush` or `Shutdown` are called. (#3220) +- Update histogram default bounds to match the requirements of the latest specification. (#3222) +- Encode the HTTP status code in the OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`) as an integer. (#3265) + +### Fixed + +- Use default view if instrument does not match any registered view of a reader. (#3224, #3237) +- Return the same instrument every time a user makes the exact same instrument creation call. (#3229, #3251) +- Return the existing instrument when a view transforms a creation call to match an existing instrument. (#3240, #3251) +- Log a warning when a conflicting instrument (e.g. description, unit, data-type) is created instead of returning an error. (#3251) +- The OpenCensus bridge no longer sends empty batches of metrics. (#3263) + +## [0.32.1] Metric SDK (Alpha) - 2022-09-22 + +### Changed + +- The Prometheus exporter sanitizes OpenTelemetry instrument names when exporting. + Invalid characters are replaced with `_`. (#3212) + +### Added + +- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been reintroduced. (#3192) +- The OpenCensus bridge example (`go.opentelemetry.io/otel/example/opencensus`) has been reintroduced. (#3206) + +### Fixed + +- Updated go.mods to point to valid versions of the sdk. (#3216) +- Set the `MeterProvider` resource on all exported metric data. (#3218) + +## [0.32.0] Revised Metric SDK (Alpha) - 2022-09-18 + +### Changed + +- The metric SDK in `go.opentelemetry.io/otel/sdk/metric` is completely refactored to comply with the OpenTelemetry specification. + Please see the package documentation for how the new SDK is initialized and configured. (#3175) +- Update the minimum supported go version to go1.18. Removes support for go1.17 (#3179) + +### Removed + +- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been removed. + A new bridge compliant with the revised metric SDK will be added back in a future release. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/histogram` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/sum` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/basic` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/controllertest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/time` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/export/aggregation` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/export` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/metrictest` package is removed. + A replacement package that supports the new metric SDK will be added back in a future release. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/number` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/basic` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/processortest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/reducer` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/registry` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/sdkapi` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/selector/simple` package is removed, see the new metric SDK. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".ErrUninitializedInstrument` variable was removed. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".ErrBadInstrument` variable was removed. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".Accumulator` type was removed, see the `MeterProvider`in the new metric SDK. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".NewAccumulator` function was removed, see `NewMeterProvider`in the new metric SDK. (#3175) +- The deprecated `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets` function was removed. (#3175) + +## [1.10.0] - 2022-09-09 + +### Added + +- Support Go 1.19. (#3077) + Include compatibility testing and document support. (#3077) +- Support the OTLP ExportTracePartialSuccess response; these are passed to the registered error handler. (#3106) +- Upgrade go.opentelemetry.io/proto/otlp from v0.18.0 to v0.19.0 (#3107) + +### Changed + +- Fix misidentification of OpenTelemetry `SpanKind` in OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`). (#3096) +- Attempting to start a span with a nil `context` will no longer cause a panic. (#3110) +- All exporters will be shutdown even if one reports an error (#3091) +- Ensure valid UTF-8 when truncating over-length attribute values. (#3156) + +## [1.9.0/0.0.3] - 2022-08-01 + +### Added + +- Add support for Schema Files format 1.1.x (metric "split" transform) with the new `go.opentelemetry.io/otel/schema/v1.1` package. (#2999) +- Add the `go.opentelemetry.io/otel/semconv/v1.11.0` package. + The package contains semantic conventions from the `v1.11.0` version of the OpenTelemetry specification. (#3009) +- Add the `go.opentelemetry.io/otel/semconv/v1.12.0` package. + The package contains semantic conventions from the `v1.12.0` version of the OpenTelemetry specification. (#3010) +- Add the `http.method` attribute to HTTP server metric from all `go.opentelemetry.io/otel/semconv/*` packages. (#3018) + +### Fixed + +- Invalid warning for context setup being deferred in `go.opentelemetry.io/otel/bridge/opentracing` package. (#3029) + +## [1.8.0/0.31.0] - 2022-07-08 + +### Added + +- Add support for `opentracing.TextMap` format in the `Inject` and `Extract` methods +of the `"go.opentelemetry.io/otel/bridge/opentracing".BridgeTracer` type. (#2911) + +### Changed + +- The `crosslink` make target has been updated to use the `go.opentelemetry.io/build-tools/crosslink` package. (#2886) +- In the `go.opentelemetry.io/otel/sdk/instrumentation` package rename `Library` to `Scope` and alias `Library` as `Scope` (#2976) +- Move metric no-op implementation form `nonrecording` to `metric` package. (#2866) + +### Removed + +- Support for go1.16. Support is now only for go1.17 and go1.18 (#2917) + +### Deprecated + +- The `Library` struct in the `go.opentelemetry.io/otel/sdk/instrumentation` package is deprecated. + Use the equivalent `Scope` struct instead. (#2977) +- The `ReadOnlySpan.InstrumentationLibrary` method from the `go.opentelemetry.io/otel/sdk/trace` package is deprecated. + Use the equivalent `ReadOnlySpan.InstrumentationScope` method instead. (#2977) + +## [1.7.0/0.30.0] - 2022-04-28 + +### Added + +- Add the `go.opentelemetry.io/otel/semconv/v1.8.0` package. + The package contains semantic conventions from the `v1.8.0` version of the OpenTelemetry specification. (#2763) +- Add the `go.opentelemetry.io/otel/semconv/v1.9.0` package. + The package contains semantic conventions from the `v1.9.0` version of the OpenTelemetry specification. (#2792) +- Add the `go.opentelemetry.io/otel/semconv/v1.10.0` package. + The package contains semantic conventions from the `v1.10.0` version of the OpenTelemetry specification. (#2842) +- Added an in-memory exporter to metrictest to aid testing with a full SDK. (#2776) + +### Fixed + +- Globally delegated instruments are unwrapped before delegating asynchronous callbacks. (#2784) +- Remove import of `testing` package in non-tests builds of the `go.opentelemetry.io/otel` package. (#2786) + +### Changed + +- The `WithLabelEncoder` option from the `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` package is renamed to `WithAttributeEncoder`. (#2790) +- The `LabelFilterSelector` interface from `go.opentelemetry.io/otel/sdk/metric/processor/reducer` is renamed to `AttributeFilterSelector`. + The method included in the renamed interface also changed from `LabelFilterFor` to `AttributeFilterFor`. (#2790) +- The `Metadata.Labels` method from the `go.opentelemetry.io/otel/sdk/metric/export` package is renamed to `Metadata.Attributes`. + Consequentially, the `Record` type from the same package also has had the embedded method renamed. (#2790) + +### Deprecated + +- The `Iterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. + Use the equivalent `Iterator.Attribute` method instead. (#2790) +- The `Iterator.IndexedLabel` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. + Use the equivalent `Iterator.IndexedAttribute` method instead. (#2790) +- The `MergeIterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. + Use the equivalent `MergeIterator.Attribute` method instead. (#2790) + +### Removed + +- Removed the `Batch` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864) +- Removed the `Measurement` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864) + +## [0.29.0] - 2022-04-11 + +### Added + +- The metrics global package was added back into several test files. (#2764) +- The `Meter` function is added back to the `go.opentelemetry.io/otel/metric/global` package. + This function is a convenience function equivalent to calling `global.MeterProvider().Meter(...)`. (#2750) + +### Removed + +- Removed module the `go.opentelemetry.io/otel/sdk/export/metric`. + Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2720) + +### Changed + +- Don't panic anymore when setting a global MeterProvider to itself. (#2749) +- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` from `v0.12.1` to `v0.15.0`. + This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibraryMetrics` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeMetrics`. (#2748) + +## [1.6.3] - 2022-04-07 + +### Fixed + +- Allow non-comparable global `MeterProvider`, `TracerProvider`, and `TextMapPropagator` types to be set. (#2772, #2773) + +## [1.6.2] - 2022-04-06 + +### Changed + +- Don't panic anymore when setting a global TracerProvider or TextMapPropagator to itself. (#2749) +- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace` from `v0.12.1` to `v0.15.0`. + This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibrarySpans` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeSpans`. (#2748) + +## [1.6.1] - 2022-03-28 + +### Fixed + +- The `go.opentelemetry.io/otel/schema/*` packages now use the correct schema URL for their `SchemaURL` constant. + Instead of using `"https://opentelemetry.io/schemas/v"` they now use the correct URL without a `v` prefix, `"https://opentelemetry.io/schemas/"`. (#2743, #2744) + +### Security + +- Upgrade `go.opentelemetry.io/proto/otlp` from `v0.12.0` to `v0.12.1`. + This includes an indirect upgrade of `github.com/grpc-ecosystem/grpc-gateway` which resolves [a vulnerability](https://nvd.nist.gov/vuln/detail/CVE-2019-11254) from `gopkg.in/yaml.v2` in version `v2.2.3`. (#2724, #2728) + +## [1.6.0/0.28.0] - 2022-03-23 + +### ⚠️ Notice ⚠️ + +This update is a breaking change of the unstable Metrics API. +Code instrumented with the `go.opentelemetry.io/otel/metric` will need to be modified. + +### Added + +- Add metrics exponential histogram support. + New mapping functions have been made available in `sdk/metric/aggregator/exponential/mapping` for other OpenTelemetry projects to take dependencies on. (#2502) +- Add Go 1.18 to our compatibility tests. (#2679) +- Allow configuring the Sampler with the `OTEL_TRACES_SAMPLER` and `OTEL_TRACES_SAMPLER_ARG` environment variables. (#2305, #2517) +- Add the `metric/global` for obtaining and setting the global `MeterProvider`. (#2660) + +### Changed + +- The metrics API has been significantly changed to match the revised OpenTelemetry specification. + High-level changes include: + + - Synchronous and asynchronous instruments are now handled by independent `InstrumentProvider`s. + These `InstrumentProvider`s are managed with a `Meter`. + - Synchronous and asynchronous instruments are grouped into their own packages based on value types. + - Asynchronous callbacks can now be registered with a `Meter`. + + Be sure to check out the metric module documentation for more information on how to use the revised API. (#2587, #2660) + +### Fixed + +- Fallback to general attribute limits when span specific ones are not set in the environment. (#2675, #2677) + +## [1.5.0] - 2022-03-16 + +### Added + +- Log the Exporters configuration in the TracerProviders message. (#2578) +- Added support to configure the span limits with environment variables. + The following environment variables are supported. (#2606, #2637) + - `OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT` + - `OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT` + - `OTEL_SPAN_EVENT_COUNT_LIMIT` + - `OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT` + - `OTEL_SPAN_LINK_COUNT_LIMIT` + - `OTEL_LINK_ATTRIBUTE_COUNT_LIMIT` + + If the provided environment variables are invalid (negative), the default values would be used. +- Rename the `gc` runtime name to `go` (#2560) +- Add resource container ID detection. (#2418) +- Add span attribute value length limit. + The new `AttributeValueLengthLimit` field is added to the `"go.opentelemetry.io/otel/sdk/trace".SpanLimits` type to configure this limit for a `TracerProvider`. + The default limit for this resource is "unlimited". (#2637) +- Add the `WithRawSpanLimits` option to `go.opentelemetry.io/otel/sdk/trace`. + This option replaces the `WithSpanLimits` option. + Zero or negative values will not be changed to the default value like `WithSpanLimits` does. + Setting a limit to zero will effectively disable the related resource it limits and setting to a negative value will mean that resource is unlimited. + Consequentially, limits should be constructed using `NewSpanLimits` and updated accordingly. (#2637) + +### Changed + +- Drop oldest tracestate `Member` when capacity is reached. (#2592) +- Add event and link drop counts to the exported data from the `oltptrace` exporter. (#2601) +- Unify path cleaning functionally in the `otlpmetric` and `otlptrace` configuration. (#2639) +- Change the debug message from the `sdk/trace.BatchSpanProcessor` to reflect the count is cumulative. (#2640) +- Introduce new internal `envconfig` package for OTLP exporters. (#2608) +- If `http.Request.Host` is empty, fall back to use `URL.Host` when populating `http.host` in the `semconv` packages. (#2661) + +### Fixed + +- Remove the OTLP trace exporter limit of SpanEvents when exporting. (#2616) +- Default to port `4318` instead of `4317` for the `otlpmetrichttp` and `otlptracehttp` client. (#2614, #2625) +- Unlimited span limits are now supported (negative values). (#2636, #2637) + +### Deprecated + +- Deprecated `"go.opentelemetry.io/otel/sdk/trace".WithSpanLimits`. + Use `WithRawSpanLimits` instead. + That option allows setting unlimited and zero limits, this option does not. + This option will be kept until the next major version incremented release. (#2637) + +## [1.4.1] - 2022-02-16 + +### Fixed + +- Fix race condition in reading the dropped spans number for the `BatchSpanProcessor`. (#2615) + +## [1.4.0] - 2022-02-11 + +### Added + +- Use `OTEL_EXPORTER_ZIPKIN_ENDPOINT` environment variable to specify zipkin collector endpoint. (#2490) +- Log the configuration of `TracerProvider`s, and `Tracer`s for debugging. + To enable use a logger with Verbosity (V level) `>=1`. (#2500) +- Added support to configure the batch span-processor with environment variables. + The following environment variables are used. (#2515) + - `OTEL_BSP_SCHEDULE_DELAY` + - `OTEL_BSP_EXPORT_TIMEOUT` + - `OTEL_BSP_MAX_QUEUE_SIZE`. + - `OTEL_BSP_MAX_EXPORT_BATCH_SIZE` + +### Changed + +- Zipkin exporter exports `Resource` attributes in the `Tags` field. (#2589) + +### Deprecated + +- Deprecate module the `go.opentelemetry.io/otel/sdk/export/metric`. + Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2382) +- Deprecate `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets`. (#2445) + +### Fixed + +- Fixed the instrument kind for noop async instruments to correctly report an implementation. (#2461) +- Fix UDP packets overflowing with Jaeger payloads. (#2489, #2512) +- Change the `otlpmetric.Client` interface's `UploadMetrics` method to accept a single `ResourceMetrics` instead of a slice of them. (#2491) +- Specify explicit buckets in Prometheus example, fixing issue where example only has `+inf` bucket. (#2419, #2493) +- W3C baggage will now decode urlescaped values. (#2529) +- Baggage members are now only validated once, when calling `NewMember` and not also when adding it to the baggage itself. (#2522) +- The order attributes are dropped from spans in the `go.opentelemetry.io/otel/sdk/trace` package when capacity is reached is fixed to be in compliance with the OpenTelemetry specification. + Instead of dropping the least-recently-used attribute, the last added attribute is dropped. + This drop order still only applies to attributes with unique keys not already contained in the span. + If an attribute is added with a key already contained in the span, that attribute is updated to the new value being added. (#2576) + +### Removed + +- Updated `go.opentelemetry.io/proto/otlp` from `v0.11.0` to `v0.12.0`. This version removes a number of deprecated methods. (#2546) + - [`Metric.GetIntGauge()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntGauge) + - [`Metric.GetIntHistogram()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntHistogram) + - [`Metric.GetIntSum()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntSum) + +## [1.3.0] - 2021-12-10 + +### ⚠️ Notice ⚠️ + +We have updated the project minimum supported Go version to 1.16 + +### Added + +- Added an internal Logger. + This can be used by the SDK and API to provide users with feedback of the internal state. + To enable verbose logs configure the logger which will print V(1) logs. For debugging information configure to print V(5) logs. (#2343) +- Add the `WithRetry` `Option` and the `RetryConfig` type to the `go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp` package to specify retry behavior consistently. (#2425) +- Add `SpanStatusFromHTTPStatusCodeAndSpanKind` to all `semconv` packages to return a span status code similar to `SpanStatusFromHTTPStatusCode`, but exclude `4XX` HTTP errors as span errors if the span is of server kind. (#2296) + +### Changed + +- The `"go.opentelemetry.io/otel/exporter/otel/otlptrace/otlptracegrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2329) +- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2425) +- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".RetrySettings` type is renamed to `RetryConfig`. (#2425) +- The `go.opentelemetry.io/otel/exporter/otel/*` gRPC exporters now default to using the host's root CA set if none are provided by the user and `WithInsecure` is not specified. (#2432) +- Change `resource.Default` to be evaluated the first time it is called, rather than on import. This allows the caller the option to update `OTEL_RESOURCE_ATTRIBUTES` first, such as with `os.Setenv`. (#2371) + +### Fixed + +- The `go.opentelemetry.io/otel/exporter/otel/*` exporters are updated to handle per-signal and universal endpoints according to the OpenTelemetry specification. + Any per-signal endpoint set via an `OTEL_EXPORTER_OTLP__ENDPOINT` environment variable is now used without modification of the path. + When `OTEL_EXPORTER_OTLP_ENDPOINT` is set, if it contains a path, that path is used as a base path which per-signal paths are appended to. (#2433) +- Basic metric controller updated to use sync.Map to avoid blocking calls (#2381) +- The `go.opentelemetry.io/otel/exporter/jaeger` correctly sets the `otel.status_code` value to be a string of `ERROR` or `OK` instead of an integer code. (#2439, #2440) + +### Deprecated + +- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithMaxAttempts` `Option`, use the new `WithRetry` `Option` instead. (#2425) +- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithBackoff` `Option`, use the new `WithRetry` `Option` instead. (#2425) + +### Removed + +- Remove the metric Processor's ability to convert cumulative to delta aggregation temporality. (#2350) +- Remove the metric Bound Instruments interface and implementations. (#2399) +- Remove the metric MinMaxSumCount kind aggregation and the corresponding OTLP export path. (#2423) +- Metric SDK removes the "exact" aggregator for histogram instruments, as it performed a non-standard aggregation for OTLP export (creating repeated Gauge points) and worked its way into a number of confusing examples. (#2348) + +## [1.2.0] - 2021-11-12 + +### Changed + +- Metric SDK `export.ExportKind`, `export.ExportKindSelector` types have been renamed to `aggregation.Temporality` and `aggregation.TemporalitySelector` respectively to keep in line with current specification and protocol along with built-in selectors (e.g., `aggregation.CumulativeTemporalitySelector`, ...). (#2274) +- The Metric `Exporter` interface now requires a `TemporalitySelector` method instead of an `ExportKindSelector`. (#2274) +- Metrics API cleanup. The `metric/sdkapi` package has been created to relocate the API-to-SDK interface: + - The following interface types simply moved from `metric` to `metric/sdkapi`: `Descriptor`, `MeterImpl`, `InstrumentImpl`, `SyncImpl`, `BoundSyncImpl`, `AsyncImpl`, `AsyncRunner`, `AsyncSingleRunner`, and `AsyncBatchRunner` + - The following struct types moved and are replaced with type aliases, since they are exposed to the user: `Observation`, `Measurement`. + - The No-op implementations of sync and async instruments are no longer exported, new functions `sdkapi.NewNoopAsyncInstrument()` and `sdkapi.NewNoopSyncInstrument()` are provided instead. (#2271) +- Update the SDK `BatchSpanProcessor` to export all queued spans when `ForceFlush` is called. (#2080, #2335) + +### Added + +- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002) +- Added a new `schema` module to help parse Schema Files in OTEP 0152 format. (#2267) +- Added a new `MapCarrier` to the `go.opentelemetry.io/otel/propagation` package to hold propagated cross-cutting concerns as a `map[string]string` held in memory. (#2334) + +## [1.1.0] - 2021-10-27 + +### Added + +- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002) +- Add the `go.opentelemetry.io/otel/semconv/v1.7.0` package. + The package contains semantic conventions from the `v1.7.0` version of the OpenTelemetry specification. (#2320) +- Add the `go.opentelemetry.io/otel/semconv/v1.6.1` package. + The package contains semantic conventions from the `v1.6.1` version of the OpenTelemetry specification. (#2321) +- Add the `go.opentelemetry.io/otel/semconv/v1.5.0` package. + The package contains semantic conventions from the `v1.5.0` version of the OpenTelemetry specification. (#2322) + - When upgrading from the `semconv/v1.4.0` package note the following name changes: + - `K8SReplicasetUIDKey` -> `K8SReplicaSetUIDKey` + - `K8SReplicasetNameKey` -> `K8SReplicaSetNameKey` + - `K8SStatefulsetUIDKey` -> `K8SStatefulSetUIDKey` + - `k8SStatefulsetNameKey` -> `K8SStatefulSetNameKey` + - `K8SDaemonsetUIDKey` -> `K8SDaemonSetUIDKey` + - `K8SDaemonsetNameKey` -> `K8SDaemonSetNameKey` + +### Changed + +- Links added to a span will be dropped by the SDK if they contain an invalid span context (#2275). + +### Fixed + +- The `"go.opentelemetry.io/otel/semconv/v1.4.0".HTTPServerAttributesFromHTTPRequest` now correctly only sets the HTTP client IP attribute even if the connection was routed with proxies and there are multiple addresses in the `X-Forwarded-For` header. (#2282, #2284) +- The `"go.opentelemetry.io/otel/semconv/v1.4.0".NetAttributesFromHTTPRequest` function correctly handles IPv6 addresses as IP addresses and sets the correct net peer IP instead of the net peer hostname attribute. (#2283, #2285) +- The simple span processor shutdown method deterministically returns the exporter error status if it simultaneously finishes when the deadline is reached. (#2290, #2289) + +## [1.0.1] - 2021-10-01 + +### Fixed + +- json stdout exporter no longer crashes due to concurrency bug. (#2265) + +## [Metrics 0.24.0] - 2021-10-01 + +### Changed + +- NoopMeterProvider is now private and NewNoopMeterProvider must be used to obtain a noopMeterProvider. (#2237) +- The Metric SDK `Export()` function takes a new two-level reader interface for iterating over results one instrumentation library at a time. (#2197) + - The former `"go.opentelemetry.io/otel/sdk/export/metric".CheckpointSet` is renamed `Reader`. + - The new interface is named `"go.opentelemetry.io/otel/sdk/export/metric".InstrumentationLibraryReader`. + +## [1.0.0] - 2021-09-20 + +This is the first stable release for the project. +This release includes an API and SDK for the tracing signal that will comply with the stability guarantees defined by the projects [versioning policy](./VERSIONING.md). + +### Added + +- OTLP trace exporter now sets the `SchemaURL` field in the exported telemetry if the Tracer has `WithSchemaURL` option. (#2242) + +### Fixed + +- Slice-valued attributes can correctly be used as map keys. (#2223) + +### Removed + +- Removed the `"go.opentelemetry.io/otel/exporters/zipkin".WithSDKOptions` function. (#2248) +- Removed the deprecated package `go.opentelemetry.io/otel/oteltest`. (#2234) +- Removed the deprecated package `go.opentelemetry.io/otel/bridge/opencensus/utils`. (#2233) +- Removed deprecated functions, types, and methods from `go.opentelemetry.io/otel/attribute` package. + Use the typed functions and methods added to the package instead. (#2235) + - The `Key.Array` method is removed. + - The `Array` function is removed. + - The `Any` function is removed. + - The `ArrayValue` function is removed. + - The `AsArray` function is removed. + +## [1.0.0-RC3] - 2021-09-02 + +### Added + +- Added `ErrorHandlerFunc` to use a function as an `"go.opentelemetry.io/otel".ErrorHandler`. (#2149) +- Added `"go.opentelemetry.io/otel/trace".WithStackTrace` option to add a stack trace when using `span.RecordError` or when panic is handled in `span.End`. (#2163) +- Added typed slice attribute types and functionality to the `go.opentelemetry.io/otel/attribute` package to replace the existing array type and functions. (#2162) + - `BoolSlice`, `IntSlice`, `Int64Slice`, `Float64Slice`, and `StringSlice` replace the use of the `Array` function in the package. +- Added the `go.opentelemetry.io/otel/example/fib` example package. + Included is an example application that computes Fibonacci numbers. (#2203) + +### Changed + +- Metric instruments have been renamed to match the (feature-frozen) metric API specification: + - ValueRecorder becomes Histogram + - ValueObserver becomes Gauge + - SumObserver becomes CounterObserver + - UpDownSumObserver becomes UpDownCounterObserver + The API exported from this project is still considered experimental. (#2202) +- Metric SDK/API implementation type `InstrumentKind` moves into `sdkapi` sub-package. (#2091) +- The Metrics SDK export record no longer contains a Resource pointer, the SDK `"go.opentelemetry.io/otel/sdk/trace/export/metric".Exporter.Export()` function for push-based exporters now takes a single Resource argument, pull-based exporters use `"go.opentelemetry.io/otel/sdk/metric/controller/basic".Controller.Resource()`. (#2120) +- The JSON output of the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` is harmonized now such that the output is "plain" JSON objects after each other of the form `{ ... } { ... } { ... }`. Earlier the JSON objects describing a span were wrapped in a slice for each `Exporter.ExportSpans` call, like `[ { ... } ][ { ... } { ... } ]`. Outputting JSON object directly after each other is consistent with JSON loggers, and a bit easier to parse and read. (#2196) +- Update the `NewTracerConfig`, `NewSpanStartConfig`, `NewSpanEndConfig`, and `NewEventConfig` function in the `go.opentelemetry.io/otel/trace` package to return their respective configurations as structs instead of pointers to the struct. (#2212) + +### Deprecated + +- The `go.opentelemetry.io/otel/bridge/opencensus/utils` package is deprecated. + All functionality from this package now exists in the `go.opentelemetry.io/otel/bridge/opencensus` package. + The functions from that package should be used instead. (#2166) +- The `"go.opentelemetry.io/otel/attribute".Array` function and the related `ARRAY` value type is deprecated. + Use the typed `*Slice` functions and types added to the package instead. (#2162) +- The `"go.opentelemetry.io/otel/attribute".Any` function is deprecated. + Use the typed functions instead. (#2181) +- The `go.opentelemetry.io/otel/oteltest` package is deprecated. + The `"go.opentelemetry.io/otel/sdk/trace/tracetest".SpanRecorder` can be registered with the default SDK (`go.opentelemetry.io/otel/sdk/trace`) as a `SpanProcessor` and used as a replacement for this deprecated package. (#2188) + +### Removed + +- Removed metrics test package `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#2105) + +### Fixed + +- The `fromEnv` detector no longer throws an error when `OTEL_RESOURCE_ATTRIBUTES` environment variable is not set or empty. (#2138) +- Setting the global `ErrorHandler` with `"go.opentelemetry.io/otel".SetErrorHandler` multiple times is now supported. (#2160, #2140) +- The `"go.opentelemetry.io/otel/attribute".Any` function now supports `int32` values. (#2169) +- Multiple calls to `"go.opentelemetry.io/otel/sdk/metric/controller/basic".WithResource()` are handled correctly, and when no resources are provided `"go.opentelemetry.io/otel/sdk/resource".Default()` is used. (#2120) +- The `WithoutTimestamps` option for the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter causes the exporter to correctly omit timestamps. (#2195) +- Fixed typos in resources.go. (#2201) + +## [1.0.0-RC2] - 2021-07-26 + +### Added + +- Added `WithOSDescription` resource configuration option to set OS (Operating System) description resource attribute (`os.description`). (#1840) +- Added `WithOS` resource configuration option to set all OS (Operating System) resource attributes at once. (#1840) +- Added the `WithRetry` option to the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package. + This option is a replacement for the removed `WithMaxAttempts` and `WithBackoff` options. (#2095) +- Added API `LinkFromContext` to return Link which encapsulates SpanContext from provided context and also encapsulates attributes. (#2115) +- Added a new `Link` type under the SDK `otel/sdk/trace` package that counts the number of attributes that were dropped for surpassing the `AttributePerLinkCountLimit` configured in the Span's `SpanLimits`. + This new type replaces the equal-named API `Link` type found in the `otel/trace` package for most usages within the SDK. + For example, instances of this type are now returned by the `Links()` function of `ReadOnlySpan`s provided in places like the `OnEnd` function of `SpanProcessor` implementations. (#2118) +- Added the `SpanRecorder` type to the `go.opentelemetry.io/otel/skd/trace/tracetest` package. + This type can be used with the default SDK as a `SpanProcessor` during testing. (#2132) + +### Changed + +- The `SpanModels` function is now exported from the `go.opentelemetry.io/otel/exporters/zipkin` package to convert OpenTelemetry spans into Zipkin model spans. (#2027) +- Rename the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".RetrySettings` to `RetryConfig`. (#2095) + +### Deprecated + +- The `TextMapCarrier` and `TextMapPropagator` from the `go.opentelemetry.io/otel/oteltest` package and their associated creation functions (`TextMapCarrier`, `NewTextMapPropagator`) are deprecated. (#2114) +- The `Harness` type from the `go.opentelemetry.io/otel/oteltest` package and its associated creation function, `NewHarness` are deprecated and will be removed in the next release. (#2123) +- The `TraceStateFromKeyValues` function from the `go.opentelemetry.io/otel/oteltest` package is deprecated. + Use the `trace.ParseTraceState` function instead. (#2122) + +### Removed + +- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/jaeger`. (#2020) +- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/zipkin`. (#2020) +- Removed the `"go.opentelemetry.io/otel/sdk/resource".WithBuiltinDetectors` function. + The explicit `With*` options for every built-in detector should be used instead. (#2026 #2097) +- Removed the `WithMaxAttempts` and `WithBackoff` options from the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package. + The retry logic of the package has been updated to match the `otlptracegrpc` package and accordingly a `WithRetry` option is added that should be used instead. (#2095) +- Removed `DroppedAttributeCount` field from `otel/trace.Link` struct. (#2118) + +### Fixed + +- When using WithNewRoot, don't use the parent context for making sampling decisions. (#2032) +- `oteltest.Tracer` now creates a valid `SpanContext` when using `WithNewRoot`. (#2073) +- OS type detector now sets the correct `dragonflybsd` value for DragonFly BSD. (#2092) +- The OTel span status is correctly transformed into the OTLP status in the `go.opentelemetry.io/otel/exporters/otlp/otlptrace` package. + This fix will by default set the status to `Unset` if it is not explicitly set to `Ok` or `Error`. (#2099 #2102) +- The `Inject` method for the `"go.opentelemetry.io/otel/propagation".TraceContext` type no longer injects empty `tracestate` values. (#2108) +- Use `6831` as default Jaeger agent port instead of `6832`. (#2131) + +## [Experimental Metrics v0.22.0] - 2021-07-19 + +### Added + +- Adds HTTP support for OTLP metrics exporter. (#2022) + +### Removed + +- Removed the deprecated package `go.opentelemetry.io/otel/exporters/metric/prometheus`. (#2020) + +## [1.0.0-RC1] / 0.21.0 - 2021-06-18 + +With this release we are introducing a split in module versions. The tracing API and SDK are entering the `v1.0.0` Release Candidate phase with `v1.0.0-RC1` +while the experimental metrics API and SDK continue with `v0.x` releases at `v0.21.0`. Modules at major version 1 or greater will not depend on modules +with major version 0. + +### Added + +- Adds `otlpgrpc.WithRetry`option for configuring the retry policy for transient errors on the otlp/gRPC exporter. (#1832) + - The following status codes are defined as transient errors: + | gRPC Status Code | Description | + | ---------------- | ----------- | + | 1 | Cancelled | + | 4 | Deadline Exceeded | + | 8 | Resource Exhausted | + | 10 | Aborted | + | 10 | Out of Range | + | 14 | Unavailable | + | 15 | Data Loss | +- Added `Status` type to the `go.opentelemetry.io/otel/sdk/trace` package to represent the status of a span. (#1874) +- Added `SpanStub` type and its associated functions to the `go.opentelemetry.io/otel/sdk/trace/tracetest` package. + This type can be used as a testing replacement for the `SpanSnapshot` that was removed from the `go.opentelemetry.io/otel/sdk/trace` package. (#1873) +- Adds support for scheme in `OTEL_EXPORTER_OTLP_ENDPOINT` according to the spec. (#1886) +- Adds `trace.WithSchemaURL` option for configuring the tracer with a Schema URL. (#1889) +- Added an example of using OpenTelemetry Go as a trace context forwarder. (#1912) +- `ParseTraceState` is added to the `go.opentelemetry.io/otel/trace` package. + It can be used to decode a `TraceState` from a `tracestate` header string value. (#1937) +- Added `Len` method to the `TraceState` type in the `go.opentelemetry.io/otel/trace` package. + This method returns the number of list-members the `TraceState` holds. (#1937) +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace` that defines a trace exporter that uses a `otlptrace.Client` to send data. + Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` implementing a gRPC `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing .(#1922) +- Added `Baggage`, `Member`, and `Property` types to the `go.opentelemetry.io/otel/baggage` package along with their related functions. (#1967) +- Added `ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext` functions to the `go.opentelemetry.io/otel/baggage` package. + These functions replace the `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions from that package and directly work with the new `Baggage` type. (#1967) +- The `OTEL_SERVICE_NAME` environment variable is the preferred source for `service.name`, used by the environment resource detector if a service name is present both there and in `OTEL_RESOURCE_ATTRIBUTES`. (#1969) +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` implementing an HTTP `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing. (#1963) +- Changes `go.opentelemetry.io/otel/sdk/resource.NewWithAttributes` to require a schema URL. The old function is still available as `resource.NewSchemaless`. This is a breaking change. (#1938) +- Several builtin resource detectors now correctly populate the schema URL. (#1938) +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` that defines a metrics exporter that uses a `otlpmetric.Client` to send data. +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` implementing a gRPC `otlpmetric.Client` and offers convenience functions, `New` and `NewUnstarted`, to create an `otlpmetric.Exporter`.(#1991) +- Added `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter. (#2005) +- Added `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` exporter. (#2005) +- Added a `TracerProvider()` method to the `"go.opentelemetry.io/otel/trace".Span` interface. This can be used to obtain a `TracerProvider` from a given span that utilizes the same trace processing pipeline. (#2009) + +### Changed + +- Make `NewSplitDriver` from `go.opentelemetry.io/otel/exporters/otlp` take variadic arguments instead of a `SplitConfig` item. + `NewSplitDriver` now automatically implements an internal `noopDriver` for `SplitConfig` fields that are not initialized. (#1798) +- `resource.New()` now creates a Resource without builtin detectors. Previous behavior is now achieved by using `WithBuiltinDetectors` Option. (#1810) +- Move the `Event` type from the `go.opentelemetry.io/otel` package to the `go.opentelemetry.io/otel/sdk/trace` package. (#1846) +- CI builds validate against last two versions of Go, dropping 1.14 and adding 1.16. (#1865) +- BatchSpanProcessor now report export failures when calling `ForceFlush()` method. (#1860) +- `Set.Encoded(Encoder)` no longer caches the result of an encoding. (#1855) +- Renamed `CloudZoneKey` to `CloudAvailabilityZoneKey` in Resource semantic conventions according to spec. (#1871) +- The `StatusCode` and `StatusMessage` methods of the `ReadOnlySpan` interface and the `Span` produced by the `go.opentelemetry.io/otel/sdk/trace` package have been replaced with a single `Status` method. + This method returns the status of a span using the new `Status` type. (#1874) +- Updated `ExportSpans` method of the`SpanExporter` interface type to accept `ReadOnlySpan`s instead of the removed `SpanSnapshot`. + This brings the export interface into compliance with the specification in that it now accepts an explicitly immutable type instead of just an implied one. (#1873) +- Unembed `SpanContext` in `Link`. (#1877) +- Generate Semantic conventions from the specification YAML. (#1891) +- Spans created by the global `Tracer` obtained from `go.opentelemetry.io/otel`, prior to a functioning `TracerProvider` being set, now propagate the span context from their parent if one exists. (#1901) +- The `"go.opentelemetry.io/otel".Tracer` function now accepts tracer options. (#1902) +- Move the `go.opentelemetry.io/otel/unit` package to `go.opentelemetry.io/otel/metric/unit`. (#1903) +- Changed `go.opentelemetry.io/otel/trace.TracerConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config.) (#1921) +- Changed `go.opentelemetry.io/otel/trace.SpanConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) +- Changed `span.End()` now only accepts Options that are allowed at `End()`. (#1921) +- Changed `go.opentelemetry.io/otel/metric.InstrumentConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) +- Changed `go.opentelemetry.io/otel/metric.MeterConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) +- Refactored option types according to the contribution style guide. (#1882) +- Move the `go.opentelemetry.io/otel/trace.TraceStateFromKeyValues` function to the `go.opentelemetry.io/otel/oteltest` package. + This function is preserved for testing purposes where it may be useful to create a `TraceState` from `attribute.KeyValue`s, but it is not intended for production use. + The new `ParseTraceState` function should be used to create a `TraceState`. (#1931) +- Updated `MarshalJSON` method of the `go.opentelemetry.io/otel/trace.TraceState` type to marshal the type into the string representation of the `TraceState`. (#1931) +- The `TraceState.Delete` method from the `go.opentelemetry.io/otel/trace` package no longer returns an error in addition to a `TraceState`. (#1931) +- Updated `Get` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931) +- Updated `Insert` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a pair of `string`s instead of an `attribute.KeyValue` type. (#1931) +- Updated `Delete` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/stdout` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/metric/prometheus` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985) +- Renamed `NewUnstartedExporter` to `NewUnstarted` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985) +- The `go.opentelemetry.io/otel/semconv` package has been moved to `go.opentelemetry.io/otel/semconv/v1.4.0` to allow for multiple [telemetry schema](https://github.com/open-telemetry/oteps/blob/main/text/0152-telemetry-schemas.md) versions to be used concurrently. (#1987) +- Metrics test helpers in `go.opentelemetry.io/otel/oteltest` have been moved to `go.opentelemetry.io/otel/metric/metrictest`. (#1988) + +### Deprecated + +- The `go.opentelemetry.io/otel/exporters/metric/prometheus` is deprecated, use `go.opentelemetry.io/otel/exporters/prometheus` instead. (#1993) +- The `go.opentelemetry.io/otel/exporters/trace/jaeger` is deprecated, use `go.opentelemetry.io/otel/exporters/jaeger` instead. (#1993) +- The `go.opentelemetry.io/otel/exporters/trace/zipkin` is deprecated, use `go.opentelemetry.io/otel/exporters/zipkin` instead. (#1993) + +### Removed + +- Removed `resource.WithoutBuiltin()`. Use `resource.New()`. (#1810) +- Unexported types `resource.FromEnv`, `resource.Host`, and `resource.TelemetrySDK`, Use the corresponding `With*()` to use individually. (#1810) +- Removed the `Tracer` and `IsRecording` method from the `ReadOnlySpan` in the `go.opentelemetry.io/otel/sdk/trace`. + The `Tracer` method is not a required to be included in this interface and given the mutable nature of the tracer that is associated with a span, this method is not appropriate. + The `IsRecording` method returns if the span is recording or not. + A read-only span value does not need to know if updates to it will be recorded or not. + By definition, it cannot be updated so there is no point in communicating if an update is recorded. (#1873) +- Removed the `SpanSnapshot` type from the `go.opentelemetry.io/otel/sdk/trace` package. + The use of this type has been replaced with the use of the explicitly immutable `ReadOnlySpan` type. + When a concrete representation of a read-only span is needed for testing, the newly added `SpanStub` in the `go.opentelemetry.io/otel/sdk/trace/tracetest` package should be used. (#1873) +- Removed the `Tracer` method from the `Span` interface in the `go.opentelemetry.io/otel/trace` package. + Using the same tracer that created a span introduces the error where an instrumentation library's `Tracer` is used by other code instead of their own. + The `"go.opentelemetry.io/otel".Tracer` function or a `TracerProvider` should be used to acquire a library specific `Tracer` instead. (#1900) + - The `TracerProvider()` method on the `Span` interface may also be used to obtain a `TracerProvider` using the same trace processing pipeline. (#2009) +- The `http.url` attribute generated by `HTTPClientAttributesFromHTTPRequest` will no longer include username or password information. (#1919) +- Removed `IsEmpty` method of the `TraceState` type in the `go.opentelemetry.io/otel/trace` package in favor of using the added `TraceState.Len` method. (#1931) +- Removed `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions in the `go.opentelemetry.io/otel/baggage` package. + Handling of baggage is now done using the added `Baggage` type and related context functions (`ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext`) in that package. (#1967) +- The `InstallNewPipeline` and `NewExportPipeline` creation functions in all the exporters (prometheus, otlp, stdout, jaeger, and zipkin) have been removed. + These functions were deemed premature attempts to provide convenience that did not achieve this aim. (#1985) +- The `go.opentelemetry.io/otel/exporters/otlp` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace` instead. (#1990) +- The `go.opentelemetry.io/otel/exporters/stdout` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` or `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` instead. (#2005) + +### Fixed + +- Only report errors from the `"go.opentelemetry.io/otel/sdk/resource".Environment` function when they are not `nil`. (#1850, #1851) +- The `Shutdown` method of the simple `SpanProcessor` in the `go.opentelemetry.io/otel/sdk/trace` package now honors the context deadline or cancellation. (#1616, #1856) +- BatchSpanProcessor now drops span batches that failed to be exported. (#1860) +- Use `http://localhost:14268/api/traces` as default Jaeger collector endpoint instead of `http://localhost:14250`. (#1898) +- Allow trailing and leading whitespace in the parsing of a `tracestate` header. (#1931) +- Add logic to determine if the channel is closed to fix Jaeger exporter test panic with close closed channel. (#1870, #1973) +- Avoid transport security when OTLP endpoint is a Unix socket. (#2001) + +### Security + +## [0.20.0] - 2021-04-23 + +### Added + +- The OTLP exporter now has two new convenience functions, `NewExportPipeline` and `InstallNewPipeline`, setup and install the exporter in tracing and metrics pipelines. (#1373) +- Adds semantic conventions for exceptions. (#1492) +- Added Jaeger Environment variables: `OTEL_EXPORTER_JAEGER_AGENT_HOST`, `OTEL_EXPORTER_JAEGER_AGENT_PORT` + These environment variables can be used to override Jaeger agent hostname and port (#1752) +- Option `ExportTimeout` was added to batch span processor. (#1755) +- `trace.TraceFlags` is now a defined type over `byte` and `WithSampled(bool) TraceFlags` and `IsSampled() bool` methods have been added to it. (#1770) +- The `Event` and `Link` struct types from the `go.opentelemetry.io/otel` package now include a `DroppedAttributeCount` field to record the number of attributes that were not recorded due to configured limits being reached. (#1771) +- The Jaeger exporter now reports dropped attributes for a Span event in the exported log. (#1771) +- Adds test to check BatchSpanProcessor ignores `OnEnd` and `ForceFlush` post `Shutdown`. (#1772) +- Extract resource attributes from the `OTEL_RESOURCE_ATTRIBUTES` environment variable and merge them with the `resource.Default` resource as well as resources provided to the `TracerProvider` and metric `Controller`. (#1785) +- Added `WithOSType` resource configuration option to set OS (Operating System) type resource attribute (`os.type`). (#1788) +- Added `WithProcess*` resource configuration options to set Process resource attributes. (#1788) + - `process.pid` + - `process.executable.name` + - `process.executable.path` + - `process.command_args` + - `process.owner` + - `process.runtime.name` + - `process.runtime.version` + - `process.runtime.description` +- Adds `k8s.node.name` and `k8s.node.uid` attribute keys to the `semconv` package. (#1789) +- Added support for configuring OTLP/HTTP and OTLP/gRPC Endpoints, TLS Certificates, Headers, Compression and Timeout via Environment Variables. (#1758, #1769 and #1811) + - `OTEL_EXPORTER_OTLP_ENDPOINT` + - `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` + - `OTEL_EXPORTER_OTLP_METRICS_ENDPOINT` + - `OTEL_EXPORTER_OTLP_HEADERS` + - `OTEL_EXPORTER_OTLP_TRACES_HEADERS` + - `OTEL_EXPORTER_OTLP_METRICS_HEADERS` + - `OTEL_EXPORTER_OTLP_COMPRESSION` + - `OTEL_EXPORTER_OTLP_TRACES_COMPRESSION` + - `OTEL_EXPORTER_OTLP_METRICS_COMPRESSION` + - `OTEL_EXPORTER_OTLP_TIMEOUT` + - `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT` + - `OTEL_EXPORTER_OTLP_METRICS_TIMEOUT` + - `OTEL_EXPORTER_OTLP_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE` +- Adds `otlpgrpc.WithTimeout` option for configuring timeout to the otlp/gRPC exporter. (#1821) +- Adds `jaeger.WithMaxPacketSize` option for configuring maximum UDP packet size used when connecting to the Jaeger agent. (#1853) + +### Fixed + +- The `Span.IsRecording` implementation from `go.opentelemetry.io/otel/sdk/trace` always returns false when not being sampled. (#1750) +- The Jaeger exporter now correctly sets tags for the Span status code and message. + This means it uses the correct tag keys (`"otel.status_code"`, `"otel.status_description"`) and does not set the status message as a tag unless it is set on the span. (#1761) +- The Jaeger exporter now correctly records Span event's names using the `"event"` key for a tag. + Additionally, this tag is overridden, as specified in the OTel specification, if the event contains an attribute with that key. (#1768) +- Zipkin Exporter: Ensure mapping between OTel and Zipkin span data complies with the specification. (#1688) +- Fixed typo for default service name in Jaeger Exporter. (#1797) +- Fix flaky OTLP for the reconnnection of the client connection. (#1527, #1814) +- Fix Jaeger exporter dropping of span batches that exceed the UDP packet size limit. + Instead, the exporter now splits the batch into smaller sendable batches. (#1828) + +### Changed + +- Span `RecordError` now records an `exception` event to comply with the semantic convention specification. (#1492) +- Jaeger exporter was updated to use thrift v0.14.1. (#1712) +- Migrate from using internally built and maintained version of the OTLP to the one hosted at `go.opentelemetry.io/proto/otlp`. (#1713) +- Migrate from using `github.com/gogo/protobuf` to `google.golang.org/protobuf` to match `go.opentelemetry.io/proto/otlp`. (#1713) +- The storage of a local or remote Span in a `context.Context` using its SpanContext is unified to store just the current Span. + The Span's SpanContext can now self-identify as being remote or not. + This means that `"go.opentelemetry.io/otel/trace".ContextWithRemoteSpanContext` will now overwrite any existing current Span, not just existing remote Spans, and make it the current Span in a `context.Context`. (#1731) +- Improve OTLP/gRPC exporter connection errors. (#1737) +- Information about a parent span context in a `"go.opentelemetry.io/otel/export/trace".SpanSnapshot` is unified in a new `Parent` field. + The existing `ParentSpanID` and `HasRemoteParent` fields are removed in favor of this. (#1748) +- The `ParentContext` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is updated to hold a `context.Context` containing the parent span. + This changes it to make `SamplingParameters` conform with the OpenTelemetry specification. (#1749) +- Updated Jaeger Environment Variables: `JAEGER_ENDPOINT`, `JAEGER_USER`, `JAEGER_PASSWORD` + to `OTEL_EXPORTER_JAEGER_ENDPOINT`, `OTEL_EXPORTER_JAEGER_USER`, `OTEL_EXPORTER_JAEGER_PASSWORD` in compliance with OTel specification. (#1752) +- Modify `BatchSpanProcessor.ForceFlush` to abort after timeout/cancellation. (#1757) +- The `DroppedAttributeCount` field of the `Span` in the `go.opentelemetry.io/otel` package now only represents the number of attributes dropped for the span itself. + It no longer is a conglomerate of itself, events, and link attributes that have been dropped. (#1771) +- Make `ExportSpans` in Jaeger Exporter honor context deadline. (#1773) +- Modify Zipkin Exporter default service name, use default resource's serviceName instead of empty. (#1777) +- The `go.opentelemetry.io/otel/sdk/export/trace` package is merged into the `go.opentelemetry.io/otel/sdk/trace` package. (#1778) +- The prometheus.InstallNewPipeline example is moved from comment to example test (#1796) +- The convenience functions for the stdout exporter have been updated to return the `TracerProvider` implementation and enable the shutdown of the exporter. (#1800) +- Replace the flush function returned from the Jaeger exporter's convenience creation functions (`InstallNewPipeline` and `NewExportPipeline`) with the `TracerProvider` implementation they create. + This enables the caller to shutdown and flush using the related `TracerProvider` methods. (#1822) +- Updated the Jaeger exporter to have a default endpoint, `http://localhost:14250`, for the collector. (#1824) +- Changed the function `WithCollectorEndpoint` in the Jaeger exporter to no longer accept an endpoint as an argument. + The endpoint can be passed with the `CollectorEndpointOption` using the `WithEndpoint` function or by setting the `OTEL_EXPORTER_JAEGER_ENDPOINT` environment variable value appropriately. (#1824) +- The Jaeger exporter no longer batches exported spans itself, instead it relies on the SDK's `BatchSpanProcessor` for this functionality. (#1830) +- The Jaeger exporter creation functions (`NewRawExporter`, `NewExportPipeline`, and `InstallNewPipeline`) no longer accept the removed `Option` type as a variadic argument. (#1830) + +### Removed + +- Removed Jaeger Environment variables: `JAEGER_SERVICE_NAME`, `JAEGER_DISABLED`, `JAEGER_TAGS` + These environment variables will no longer be used to override values of the Jaeger exporter (#1752) +- No longer set the links for a `Span` in `go.opentelemetry.io/otel/sdk/trace` that is configured to be a new root. + This is unspecified behavior that the OpenTelemetry community plans to standardize in the future. + To prevent backwards incompatible changes when it is specified, these links are removed. (#1726) +- Setting error status while recording error with Span from oteltest package. (#1729) +- The concept of a remote and local Span stored in a context is unified to just the current Span. + Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed. + Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContex` can be used to return the current Span. + If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731) +- The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed. + This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749) +- The `trace.FlagsDebug` and `trace.FlagsDeferred` constants have been removed and will be localized to the B3 propagator. (#1770) +- Remove `Process` configuration, `WithProcessFromEnv` and `ProcessFromEnv`, and type from the Jaeger exporter package. + The information that could be configured in the `Process` struct should be configured in a `Resource` instead. (#1776, #1804) +- Remove the `WithDisabled` option from the Jaeger exporter. + To disable the exporter unregister it from the `TracerProvider` or use a no-operation `TracerProvider`. (#1806) +- Removed the functions `CollectorEndpointFromEnv` and `WithCollectorEndpointOptionFromEnv` from the Jaeger exporter. + These functions for retrieving specific environment variable values are redundant of other internal functions and + are not intended for end user use. (#1824) +- Removed the Jaeger exporter `WithSDKOptions` `Option`. + This option was used to set SDK options for the exporter creation convenience functions. + These functions are provided as a way to easily setup or install the exporter with what are deemed reasonable SDK settings for common use cases. + If the SDK needs to be configured differently, the `NewRawExporter` function and direct setup of the SDK with the desired settings should be used. (#1825) +- The `WithBufferMaxCount` and `WithBatchMaxCount` `Option`s from the Jaeger exporter are removed. + The exporter no longer batches exports, instead relying on the SDK's `BatchSpanProcessor` for this functionality. (#1830) +- The Jaeger exporter `Option` type is removed. + The type is no longer used by the exporter to configure anything. + All the previous configurations these options provided were duplicates of SDK configuration. + They have been removed in favor of using the SDK configuration and focuses the exporter configuration to be only about the endpoints it will send telemetry to. (#1830) + +## [0.19.0] - 2021-03-18 + +### Added + +- Added `Marshaler` config option to `otlphttp` to enable otlp over json or protobufs. (#1586) +- A `ForceFlush` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` to flush all registered `SpanProcessor`s. (#1608) +- Added `WithSampler` and `WithSpanLimits` to tracer provider. (#1633, #1702) +- `"go.opentelemetry.io/otel/trace".SpanContext` now has a `remote` property, and `IsRemote()` predicate, that is true when the `SpanContext` has been extracted from remote context data. (#1701) +- A `Valid` method to the `"go.opentelemetry.io/otel/attribute".KeyValue` type. (#1703) + +### Changed + +- `trace.SpanContext` is now immutable and has no exported fields. (#1573) + - `trace.NewSpanContext()` can be used in conjunction with the `trace.SpanContextConfig` struct to initialize a new `SpanContext` where all values are known. +- Update the `ForceFlush` method signature to the `"go.opentelemetry.io/otel/sdk/trace".SpanProcessor` to accept a `context.Context` and return an error. (#1608) +- Update the `Shutdown` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` return an error on shutdown failure. (#1608) +- The SimpleSpanProcessor will now shut down the enclosed `SpanExporter` and gracefully ignore subsequent calls to `OnEnd` after `Shutdown` is called. (#1612) +- `"go.opentelemetry.io/sdk/metric/controller.basic".WithPusher` is replaced with `WithExporter` to provide consistent naming across project. (#1656) +- Added non-empty string check for trace `Attribute` keys. (#1659) +- Add `description` to SpanStatus only when `StatusCode` is set to error. (#1662) +- Jaeger exporter falls back to `resource.Default`'s `service.name` if the exported Span does not have one. (#1673) +- Jaeger exporter populates Jaeger's Span Process from Resource. (#1673) +- Renamed the `LabelSet` method of `"go.opentelemetry.io/otel/sdk/resource".Resource` to `Set`. (#1692) +- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1693) +- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1693) + +### Removed + +- Removed `serviceName` parameter from Zipkin exporter and uses resource instead. (#1549) +- Removed `WithConfig` from tracer provider to avoid overriding configuration. (#1633) +- Removed the exported `SimpleSpanProcessor` and `BatchSpanProcessor` structs. + These are now returned as a SpanProcessor interface from their respective constructors. (#1638) +- Removed `WithRecord()` from `trace.SpanOption` when creating a span. (#1660) +- Removed setting status to `Error` while recording an error as a span event in `RecordError`. (#1663) +- Removed `jaeger.WithProcess` configuration option. (#1673) +- Removed `ApplyConfig` method from `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` and the now unneeded `Config` struct. (#1693) + +### Fixed + +- Jaeger Exporter: Ensure mapping between OTEL and Jaeger span data complies with the specification. (#1626) +- `SamplingResult.TraceState` is correctly propagated to a newly created span's `SpanContext`. (#1655) +- The `otel-collector` example now correctly flushes metric events prior to shutting down the exporter. (#1678) +- Do not set span status message in `SpanStatusFromHTTPStatusCode` if it can be inferred from `http.status_code`. (#1681) +- Synchronization issues in global trace delegate implementation. (#1686) +- Reduced excess memory usage by global `TracerProvider`. (#1687) + +## [0.18.0] - 2021-03-03 + +### Added + +- Added `resource.Default()` for use with meter and tracer providers. (#1507) +- `AttributePerEventCountLimit` and `AttributePerLinkCountLimit` for `SpanLimits`. (#1535) +- Added `Keys()` method to `propagation.TextMapCarrier` and `propagation.HeaderCarrier` to adapt `http.Header` to this interface. (#1544) +- Added `code` attributes to `go.opentelemetry.io/otel/semconv` package. (#1558) +- Compatibility testing suite in the CI system for the following systems. (#1567) + | OS | Go Version | Architecture | + | ------- | ---------- | ------------ | + | Ubuntu | 1.15 | amd64 | + | Ubuntu | 1.14 | amd64 | + | Ubuntu | 1.15 | 386 | + | Ubuntu | 1.14 | 386 | + | MacOS | 1.15 | amd64 | + | MacOS | 1.14 | amd64 | + | Windows | 1.15 | amd64 | + | Windows | 1.14 | amd64 | + | Windows | 1.15 | 386 | + | Windows | 1.14 | 386 | + +### Changed + +- Replaced interface `oteltest.SpanRecorder` with its existing implementation + `StandardSpanRecorder`. (#1542) +- Default span limit values to 128. (#1535) +- Rename `MaxEventsPerSpan`, `MaxAttributesPerSpan` and `MaxLinksPerSpan` to `EventCountLimit`, `AttributeCountLimit` and `LinkCountLimit`, and move these fields into `SpanLimits`. (#1535) +- Renamed the `otel/label` package to `otel/attribute`. (#1541) +- Vendor the Jaeger exporter's dependency on Apache Thrift. (#1551) +- Parallelize the CI linting and testing. (#1567) +- Stagger timestamps in exact aggregator tests. (#1569) +- Changed all examples to use `WithBatchTimeout(5 * time.Second)` rather than `WithBatchTimeout(5)`. (#1621) +- Prevent end-users from implementing some interfaces (#1575) + + ``` + "otel/exporters/otlp/otlphttp".Option + "otel/exporters/stdout".Option + "otel/oteltest".Option + "otel/trace".TracerOption + "otel/trace".SpanOption + "otel/trace".EventOption + "otel/trace".LifeCycleOption + "otel/trace".InstrumentationOption + "otel/sdk/resource".Option + "otel/sdk/trace".ParentBasedSamplerOption + "otel/sdk/trace".ReadOnlySpan + "otel/sdk/trace".ReadWriteSpan + ``` + +### Removed + +- Removed attempt to resample spans upon changing the span name with `span.SetName()`. (#1545) +- The `test-benchmark` is no longer a dependency of the `precommit` make target. (#1567) +- Removed the `test-386` make target. + This was replaced with a full compatibility testing suite (i.e. multi OS/arch) in the CI system. (#1567) + +### Fixed + +- The sequential timing check of timestamps in the stdout exporter are now setup explicitly to be sequential (#1571). (#1572) +- Windows build of Jaeger tests now compiles with OS specific functions (#1576). (#1577) +- The sequential timing check of timestamps of go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue are now setup explicitly to be sequential (#1578). (#1579) +- Validate tracestate header keys with vendors according to the W3C TraceContext specification (#1475). (#1581) +- The OTLP exporter includes related labels for translations of a GaugeArray (#1563). (#1570) + +## [0.17.0] - 2021-02-12 + +### Changed + +- Rename project default branch from `master` to `main`. (#1505) +- Reverse order in which `Resource` attributes are merged, per change in spec. (#1501) +- Add tooling to maintain "replace" directives in go.mod files automatically. (#1528) +- Create new modules: otel/metric, otel/trace, otel/oteltest, otel/sdk/export/metric, otel/sdk/metric (#1528) +- Move metric-related public global APIs from otel to otel/metric/global. (#1528) + +## Fixed + +- Fixed otlpgrpc reconnection issue. +- The example code in the README.md of `go.opentelemetry.io/otel/exporters/otlp` is moved to a compiled example test and used the new `WithAddress` instead of `WithEndpoint`. (#1513) +- The otel-collector example now uses the default OTLP receiver port of the collector. + +## [0.16.0] - 2021-01-13 + +### Added + +- Add the `ReadOnlySpan` and `ReadWriteSpan` interfaces to provide better control for accessing span data. (#1360) +- `NewGRPCDriver` function returns a `ProtocolDriver` that maintains a single gRPC connection to the collector. (#1369) +- Added documentation about the project's versioning policy. (#1388) +- Added `NewSplitDriver` for OTLP exporter that allows sending traces and metrics to different endpoints. (#1418) +- Added codeql workflow to GitHub Actions (#1428) +- Added Gosec workflow to GitHub Actions (#1429) +- Add new HTTP driver for OTLP exporter in `exporters/otlp/otlphttp`. Currently it only supports the binary protobuf payloads. (#1420) +- Add an OpenCensus exporter bridge. (#1444) + +### Changed + +- Rename `internal/testing` to `internal/internaltest`. (#1449) +- Rename `export.SpanData` to `export.SpanSnapshot` and use it only for exporting spans. (#1360) +- Store the parent's full `SpanContext` rather than just its span ID in the `span` struct. (#1360) +- Improve span duration accuracy. (#1360) +- Migrated CI/CD from CircleCI to GitHub Actions (#1382) +- Remove duplicate checkout from GitHub Actions workflow (#1407) +- Metric `array` aggregator renamed `exact` to match its `aggregation.Kind` (#1412) +- Metric `exact` aggregator includes per-point timestamps (#1412) +- Metric stdout exporter uses MinMaxSumCount aggregator for ValueRecorder instruments (#1412) +- `NewExporter` from `exporters/otlp` now takes a `ProtocolDriver` as a parameter. (#1369) +- Many OTLP Exporter options became gRPC ProtocolDriver options. (#1369) +- Unify endpoint API that related to OTel exporter. (#1401) +- Optimize metric histogram aggregator to re-use its slice of buckets. (#1435) +- Metric aggregator Count() and histogram Bucket.Counts are consistently `uint64`. (1430) +- Histogram aggregator accepts functional options, uses default boundaries if none given. (#1434) +- `SamplingResult` now passed a `Tracestate` from the parent `SpanContext` (#1432) +- Moved gRPC driver for OTLP exporter to `exporters/otlp/otlpgrpc`. (#1420) +- The `TraceContext` propagator now correctly propagates `TraceState` through the `SpanContext`. (#1447) +- Metric Push and Pull Controller components are combined into a single "basic" Controller: + - `WithExporter()` and `Start()` to configure Push behavior + - `Start()` is optional; use `Collect()` and `ForEach()` for Pull behavior + - `Start()` and `Stop()` accept Context. (#1378) +- The `Event` type is moved from the `otel/sdk/export/trace` package to the `otel/trace` API package. (#1452) + +### Removed + +- Remove `errUninitializedSpan` as its only usage is now obsolete. (#1360) +- Remove Metric export functionality related to quantiles and summary data points: this is not specified (#1412) +- Remove DDSketch metric aggregator; our intention is to re-introduce this as an option of the histogram aggregator after [new OTLP histogram data types](https://github.com/open-telemetry/opentelemetry-proto/pull/226) are released (#1412) + +### Fixed + +- `BatchSpanProcessor.Shutdown()` will now shutdown underlying `export.SpanExporter`. (#1443) + +## [0.15.0] - 2020-12-10 + +### Added + +- The `WithIDGenerator` `TracerProviderOption` is added to the `go.opentelemetry.io/otel/trace` package to configure an `IDGenerator` for the `TracerProvider`. (#1363) + +### Changed + +- The Zipkin exporter now uses the Span status code to determine. (#1328) +- `NewExporter` and `Start` functions in `go.opentelemetry.io/otel/exporters/otlp` now receive `context.Context` as a first parameter. (#1357) +- Move the OpenCensus example into `example` directory. (#1359) +- Moved the SDK's `internal.IDGenerator` interface in to the `sdk/trace` package to enable support for externally-defined ID generators. (#1363) +- Bump `github.com/google/go-cmp` from 0.5.3 to 0.5.4 (#1374) +- Bump `github.com/golangci/golangci-lint` in `/internal/tools` (#1375) + +### Fixed + +- Metric SDK `SumObserver` and `UpDownSumObserver` instruments correctness fixes. (#1381) + +## [0.14.0] - 2020-11-19 + +### Added + +- An `EventOption` and the related `NewEventConfig` function are added to the `go.opentelemetry.io/otel` package to configure Span events. (#1254) +- A `TextMapPropagator` and associated `TextMapCarrier` are added to the `go.opentelemetry.io/otel/oteltest` package to test `TextMap` type propagators and their use. (#1259) +- `SpanContextFromContext` returns `SpanContext` from context. (#1255) +- `TraceState` has been added to `SpanContext`. (#1340) +- `DeploymentEnvironmentKey` added to `go.opentelemetry.io/otel/semconv` package. (#1323) +- Add an OpenCensus to OpenTelemetry tracing bridge. (#1305) +- Add a parent context argument to `SpanProcessor.OnStart` to follow the specification. (#1333) +- Add missing tests for `sdk/trace/attributes_map.go`. (#1337) + +### Changed + +- Move the `go.opentelemetry.io/otel/api/trace` package into `go.opentelemetry.io/otel/trace` with the following changes. (#1229) (#1307) + - `ID` has been renamed to `TraceID`. + - `IDFromHex` has been renamed to `TraceIDFromHex`. + - `EmptySpanContext` is removed. +- Move the `go.opentelemetry.io/otel/api/trace/tracetest` package into `go.opentelemetry.io/otel/oteltest`. (#1229) +- OTLP Exporter updates: + - supports OTLP v0.6.0 (#1230, #1354) + - supports configurable aggregation temporality (default: Cumulative, optional: Stateless). (#1296) +- The Sampler is now called on local child spans. (#1233) +- The `Kind` type from the `go.opentelemetry.io/otel/api/metric` package was renamed to `InstrumentKind` to more specifically describe what it is and avoid semantic ambiguity. (#1240) +- The `MetricKind` method of the `Descriptor` type in the `go.opentelemetry.io/otel/api/metric` package was renamed to `Descriptor.InstrumentKind`. + This matches the returned type and fixes misuse of the term metric. (#1240) +- Move test harness from the `go.opentelemetry.io/otel/api/apitest` package into `go.opentelemetry.io/otel/oteltest`. (#1241) +- Move the `go.opentelemetry.io/otel/api/metric/metrictest` package into `go.opentelemetry.io/oteltest` as part of #964. (#1252) +- Move the `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric` as part of #1303. (#1321) +- Move the `go.opentelemetry.io/otel/api/metric/registry` package into `go.opentelemetry.io/otel/metric/registry` as a part of #1303. (#1316) +- Move the `Number` type (together with related functions) from `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric/number` as a part of #1303. (#1316) +- The function signature of the Span `AddEvent` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required name and a variable number of `EventOption`s. (#1254) +- The function signature of the Span `RecordError` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required error value and a variable number of `EventOption`s. (#1254) +- Move the `go.opentelemetry.io/otel/api/global` package to `go.opentelemetry.io/otel`. (#1262) (#1330) +- Move the `Version` function from `go.opentelemetry.io/otel/sdk` to `go.opentelemetry.io/otel`. (#1330) +- Rename correlation context header from `"otcorrelations"` to `"baggage"` to match the OpenTelemetry specification. (#1267) +- Fix `Code.UnmarshalJSON` to work with valid JSON only. (#1276) +- The `resource.New()` method changes signature to support builtin attributes and functional options, including `telemetry.sdk.*` and + `host.name` semantic conventions; the former method is renamed `resource.NewWithAttributes`. (#1235) +- The Prometheus exporter now exports non-monotonic counters (i.e. `UpDownCounter`s) as gauges. (#1210) +- Correct the `Span.End` method documentation in the `otel` API to state updates are not allowed on a span after it has ended. (#1310) +- Updated span collection limits for attribute, event and link counts to 1000 (#1318) +- Renamed `semconv.HTTPUrlKey` to `semconv.HTTPURLKey`. (#1338) + +### Removed + +- The `ErrInvalidHexID`, `ErrInvalidTraceIDLength`, `ErrInvalidSpanIDLength`, `ErrInvalidSpanIDLength`, or `ErrNilSpanID` from the `go.opentelemetry.io/otel` package are unexported now. (#1243) +- The `AddEventWithTimestamp` method on the `Span` interface in `go.opentelemetry.io/otel` is removed due to its redundancy. + It is replaced by using the `AddEvent` method with a `WithTimestamp` option. (#1254) +- The `MockSpan` and `MockTracer` types are removed from `go.opentelemetry.io/otel/oteltest`. + `Tracer` and `Span` from the same module should be used in their place instead. (#1306) +- `WorkerCount` option is removed from `go.opentelemetry.io/otel/exporters/otlp`. (#1350) +- Remove the following labels types: INT32, UINT32, UINT64 and FLOAT32. (#1314) + +### Fixed + +- Rename `MergeItererator` to `MergeIterator` in the `go.opentelemetry.io/otel/label` package. (#1244) +- The `go.opentelemetry.io/otel/api/global` packages global TextMapPropagator now delegates functionality to a globally set delegate for all previously returned propagators. (#1258) +- Fix condition in `label.Any`. (#1299) +- Fix global `TracerProvider` to pass options to its configured provider. (#1329) +- Fix missing handler for `ExactKind` aggregator in OTLP metrics transformer (#1309) + +## [0.13.0] - 2020-10-08 + +### Added + +- OTLP Metric exporter supports Histogram aggregation. (#1209) +- The `Code` struct from the `go.opentelemetry.io/otel/codes` package now supports JSON marshaling and unmarshaling as well as implements the `Stringer` interface. (#1214) +- A Baggage API to implement the OpenTelemetry specification. (#1217) +- Add Shutdown method to sdk/trace/provider, shutdown processors in the order they were registered. (#1227) + +### Changed + +- Set default propagator to no-op propagator. (#1184) +- The `HTTPSupplier`, `HTTPExtractor`, `HTTPInjector`, and `HTTPPropagator` from the `go.opentelemetry.io/otel/api/propagation` package were replaced with unified `TextMapCarrier` and `TextMapPropagator` in the `go.opentelemetry.io/otel/propagation` package. (#1212) (#1325) +- The `New` function from the `go.opentelemetry.io/otel/api/propagation` package was replaced with `NewCompositeTextMapPropagator` in the `go.opentelemetry.io/otel` package. (#1212) +- The status codes of the `go.opentelemetry.io/otel/codes` package have been updated to match the latest OpenTelemetry specification. + They now are `Unset`, `Error`, and `Ok`. + They no longer track the gRPC codes. (#1214) +- The `StatusCode` field of the `SpanData` struct in the `go.opentelemetry.io/otel/sdk/export/trace` package now uses the codes package from this package instead of the gRPC project. (#1214) +- Move the `go.opentelemetry.io/otel/api/baggage` package into `go.opentelemetry.io/otel/baggage`. (#1217) (#1325) +- A `Shutdown` method of `SpanProcessor` and all its implementations receives a context and returns an error. (#1264) + +### Fixed + +- Copies of data from arrays and slices passed to `go.opentelemetry.io/otel/label.ArrayValue()` are now used in the returned `Value` instead of using the mutable data itself. (#1226) + +### Removed + +- The `ExtractHTTP` and `InjectHTTP` functions from the `go.opentelemetry.io/otel/api/propagation` package were removed. (#1212) +- The `Propagators` interface from the `go.opentelemetry.io/otel/api/propagation` package was removed to conform to the OpenTelemetry specification. + The explicit `TextMapPropagator` type can be used in its place as this is the `Propagator` type the specification defines. (#1212) +- The `SetAttribute` method of the `Span` from the `go.opentelemetry.io/otel/api/trace` package was removed given its redundancy with the `SetAttributes` method. (#1216) +- The internal implementation of Baggage storage is removed in favor of using the new Baggage API functionality. (#1217) +- Remove duplicate hostname key `HostHostNameKey` in Resource semantic conventions. (#1219) +- Nested array/slice support has been removed. (#1226) + +## [0.12.0] - 2020-09-24 + +### Added + +- A `SpanConfigure` function in `go.opentelemetry.io/otel/api/trace` to create a new `SpanConfig` from `SpanOption`s. (#1108) +- In the `go.opentelemetry.io/otel/api/trace` package, `NewTracerConfig` was added to construct new `TracerConfig`s. + This addition was made to conform with our project option conventions. (#1155) +- Instrumentation library information was added to the Zipkin exporter. (#1119) +- The `SpanProcessor` interface now has a `ForceFlush()` method. (#1166) +- More semantic conventions for k8s as resource attributes. (#1167) + +### Changed + +- Add reconnecting udp connection type to Jaeger exporter. + This change adds a new optional implementation of the udp conn interface used to detect changes to an agent's host dns record. + It then adopts the new destination address to ensure the exporter doesn't get stuck. This change was ported from jaegertracing/jaeger-client-go#520. (#1063) +- Replace `StartOption` and `EndOption` in `go.opentelemetry.io/otel/api/trace` with `SpanOption`. + This change is matched by replacing the `StartConfig` and `EndConfig` with a unified `SpanConfig`. (#1108) +- Replace the `LinkedTo` span option in `go.opentelemetry.io/otel/api/trace` with `WithLinks`. + This is be more consistent with our other option patterns, i.e. passing the item to be configured directly instead of its component parts, and provides a cleaner function signature. (#1108) +- The `go.opentelemetry.io/otel/api/trace` `TracerOption` was changed to an interface to conform to project option conventions. (#1109) +- Move the `B3` and `TraceContext` from within the `go.opentelemetry.io/otel/api/trace` package to their own `go.opentelemetry.io/otel/propagators` package. + This removal of the propagators is reflective of the OpenTelemetry specification for these propagators as well as cleans up the `go.opentelemetry.io/otel/api/trace` API. (#1118) +- Rename Jaeger tags used for instrumentation library information to reflect changes in OpenTelemetry specification. (#1119) +- Rename `ProbabilitySampler` to `TraceIDRatioBased` and change semantics to ignore parent span sampling status. (#1115) +- Move `tools` package under `internal`. (#1141) +- Move `go.opentelemetry.io/otel/api/correlation` package to `go.opentelemetry.io/otel/api/baggage`. (#1142) + The `correlation.CorrelationContext` propagator has been renamed `baggage.Baggage`. Other exported functions and types are unchanged. +- Rename `ParentOrElse` sampler to `ParentBased` and allow setting samplers depending on parent span. (#1153) +- In the `go.opentelemetry.io/otel/api/trace` package, `SpanConfigure` was renamed to `NewSpanConfig`. (#1155) +- Change `dependabot.yml` to add a `Skip Changelog` label to dependabot-sourced PRs. (#1161) +- The [configuration style guide](https://github.com/open-telemetry/opentelemetry-go/blob/master/CONTRIBUTING.md#config) has been updated to + recommend the use of `newConfig()` instead of `configure()`. (#1163) +- The `otlp.Config` type has been unexported and changed to `otlp.config`, along with its initializer. (#1163) +- Ensure exported interface types include parameter names and update the + Style Guide to reflect this styling rule. (#1172) +- Don't consider unset environment variable for resource detection to be an error. (#1170) +- Rename `go.opentelemetry.io/otel/api/metric.ConfigureInstrument` to `NewInstrumentConfig` and + `go.opentelemetry.io/otel/api/metric.ConfigureMeter` to `NewMeterConfig`. +- ValueObserver instruments use LastValue aggregator by default. (#1165) +- OTLP Metric exporter supports LastValue aggregation. (#1165) +- Move the `go.opentelemetry.io/otel/api/unit` package to `go.opentelemetry.io/otel/unit`. (#1185) +- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190) +- Rename `NoopProvider` to `NoopMeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190) +- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metric/metrictest` package. (#1190) +- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric/registry` package. (#1190) +- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metri/registryc` package. (#1190) +- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190) +- Rename `NoopProvider` to `NoopTracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190) +- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190) +- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190) +- Rename `WrapperProvider` to `WrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190) +- Rename `NewWrapperProvider` to `NewWrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190) +- Rename `Provider` method of the pull controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/pull` package. (#1190) +- Rename `Provider` method of the push controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/push` package. (#1190) +- Rename `ProviderOptions` to `TracerProviderConfig` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Rename `ProviderOption` to `TracerProviderOption` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Renamed `SamplingDecision` values to comply with OpenTelemetry specification change. (#1192) +- Renamed Zipkin attribute names from `ot.status_code & ot.status_description` to `otel.status_code & otel.status_description`. (#1201) +- The default SDK now invokes registered `SpanProcessor`s in the order they were registered with the `TracerProvider`. (#1195) +- Add test of spans being processed by the `SpanProcessor`s in the order they were registered. (#1203) + +### Removed + +- Remove the B3 propagator from `go.opentelemetry.io/otel/propagators`. It is now located in the + `go.opentelemetry.io/contrib/propagators/` module. (#1191) +- Remove the semantic convention for HTTP status text, `HTTPStatusTextKey` from package `go.opentelemetry.io/otel/semconv`. (#1194) + +### Fixed + +- Zipkin example no longer mentions `ParentSampler`, corrected to `ParentBased`. (#1171) +- Fix missing shutdown processor in otel-collector example. (#1186) +- Fix missing shutdown processor in basic and namedtracer examples. (#1197) + +## [0.11.0] - 2020-08-24 + +### Added + +- Support for exporting array-valued attributes via OTLP. (#992) +- `Noop` and `InMemory` `SpanBatcher` implementations to help with testing integrations. (#994) +- Support for filtering metric label sets. (#1047) +- A dimensionality-reducing metric Processor. (#1057) +- Integration tests for more OTel Collector Attribute types. (#1062) +- A new `WithSpanProcessor` `ProviderOption` is added to the `go.opentelemetry.io/otel/sdk/trace` package to create a `Provider` and automatically register the `SpanProcessor`. (#1078) + +### Changed + +- Rename `sdk/metric/processor/test` to `sdk/metric/processor/processortest`. (#1049) +- Rename `sdk/metric/controller/test` to `sdk/metric/controller/controllertest`. (#1049) +- Rename `api/testharness` to `api/apitest`. (#1049) +- Rename `api/trace/testtrace` to `api/trace/tracetest`. (#1049) +- Change Metric Processor to merge multiple observations. (#1024) +- The `go.opentelemetry.io/otel/bridge/opentracing` bridge package has been made into its own module. + This removes the package dependencies of this bridge from the rest of the OpenTelemetry based project. (#1038) +- Renamed `go.opentelemetry.io/otel/api/standard` package to `go.opentelemetry.io/otel/semconv` to avoid the ambiguous and generic name `standard` and better describe the package as containing OpenTelemetry semantic conventions. (#1016) +- The environment variable used for resource detection has been changed from `OTEL_RESOURCE_LABELS` to `OTEL_RESOURCE_ATTRIBUTES` (#1042) +- Replace `WithSyncer` with `WithBatcher` in examples. (#1044) +- Replace the `google.golang.org/grpc/codes` dependency in the API with an equivalent `go.opentelemetry.io/otel/codes` package. (#1046) +- Merge the `go.opentelemetry.io/otel/api/label` and `go.opentelemetry.io/otel/api/kv` into the new `go.opentelemetry.io/otel/label` package. (#1060) +- Unify Callback Function Naming. + Rename `*Callback` with `*Func`. (#1061) +- CI builds validate against last two versions of Go, dropping 1.13 and adding 1.15. (#1064) +- The `go.opentelemetry.io/otel/sdk/export/trace` interfaces `SpanSyncer` and `SpanBatcher` have been replaced with a specification compliant `Exporter` interface. + This interface still supports the export of `SpanData`, but only as a slice. + Implementation are also required now to return any error from `ExportSpans` if one occurs as well as implement a `Shutdown` method for exporter clean-up. (#1078) +- The `go.opentelemetry.io/otel/sdk/trace` `NewBatchSpanProcessor` function no longer returns an error. + If a `nil` exporter is passed as an argument to this function, instead of it returning an error, it now returns a `BatchSpanProcessor` that handles the export of `SpanData` by not taking any action. (#1078) +- The `go.opentelemetry.io/otel/sdk/trace` `NewProvider` function to create a `Provider` no longer returns an error, instead only a `*Provider`. + This change is related to `NewBatchSpanProcessor` not returning an error which was the only error this function would return. (#1078) + +### Removed + +- Duplicate, unused API sampler interface. (#999) + Use the [`Sampler` interface](https://github.com/open-telemetry/opentelemetry-go/blob/v0.11.0/sdk/trace/sampling.go) provided by the SDK instead. +- The `grpctrace` instrumentation was moved to the `go.opentelemetry.io/contrib` repository and out of this repository. + This move includes moving the `grpc` example to the `go.opentelemetry.io/contrib` as well. (#1027) +- The `WithSpan` method of the `Tracer` interface. + The functionality this method provided was limited compared to what a user can provide themselves. + It was removed with the understanding that if there is sufficient user need it can be added back based on actual user usage. (#1043) +- The `RegisterSpanProcessor` and `UnregisterSpanProcessor` functions. + These were holdovers from an approach prior to the TracerProvider design. They were not used anymore. (#1077) +- The `oterror` package. (#1026) +- The `othttp` and `httptrace` instrumentations were moved to `go.opentelemetry.io/contrib`. (#1032) + +### Fixed + +- The `semconv.HTTPServerMetricAttributesFromHTTPRequest()` function no longer generates the high-cardinality `http.request.content.length` label. (#1031) +- Correct instrumentation version tag in Jaeger exporter. (#1037) +- The SDK span will now set an error event if the `End` method is called during a panic (i.e. it was deferred). (#1043) +- Move internally generated protobuf code from the `go.opentelemetry.io/otel` to the OTLP exporter to reduce dependency overhead. (#1050) +- The `otel-collector` example referenced outdated collector processors. (#1006) + +## [0.10.0] - 2020-07-29 + +This release migrates the default OpenTelemetry SDK into its own Go module, decoupling the SDK from the API and reducing dependencies for instrumentation packages. + +### Added + +- The Zipkin exporter now has `NewExportPipeline` and `InstallNewPipeline` constructor functions to match the common pattern. + These function build a new exporter with default SDK options and register the exporter with the `global` package respectively. (#944) +- Add propagator option for gRPC instrumentation. (#986) +- The `testtrace` package now tracks the `trace.SpanKind` for each span. (#987) + +### Changed + +- Replace the `RegisterGlobal` `Option` in the Jaeger exporter with an `InstallNewPipeline` constructor function. + This matches the other exporter constructor patterns and will register a new exporter after building it with default configuration. (#944) +- The trace (`go.opentelemetry.io/otel/exporters/trace/stdout`) and metric (`go.opentelemetry.io/otel/exporters/metric/stdout`) `stdout` exporters are now merged into a single exporter at `go.opentelemetry.io/otel/exporters/stdout`. + This new exporter was made into its own Go module to follow the pattern of all exporters and decouple it from the `go.opentelemetry.io/otel` module. (#956, #963) +- Move the `go.opentelemetry.io/otel/exporters/test` test package to `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#962) +- The `go.opentelemetry.io/otel/api/kv/value` package was merged into the parent `go.opentelemetry.io/otel/api/kv` package. (#968) + - `value.Bool` was replaced with `kv.BoolValue`. + - `value.Int64` was replaced with `kv.Int64Value`. + - `value.Uint64` was replaced with `kv.Uint64Value`. + - `value.Float64` was replaced with `kv.Float64Value`. + - `value.Int32` was replaced with `kv.Int32Value`. + - `value.Uint32` was replaced with `kv.Uint32Value`. + - `value.Float32` was replaced with `kv.Float32Value`. + - `value.String` was replaced with `kv.StringValue`. + - `value.Int` was replaced with `kv.IntValue`. + - `value.Uint` was replaced with `kv.UintValue`. + - `value.Array` was replaced with `kv.ArrayValue`. +- Rename `Infer` to `Any` in the `go.opentelemetry.io/otel/api/kv` package. (#972) +- Change `othttp` to use the `httpsnoop` package to wrap the `ResponseWriter` so that optional interfaces (`http.Hijacker`, `http.Flusher`, etc.) that are implemented by the original `ResponseWriter`are also implemented by the wrapped `ResponseWriter`. (#979) +- Rename `go.opentelemetry.io/otel/sdk/metric/aggregator/test` package to `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest`. (#980) +- Make the SDK into its own Go module called `go.opentelemetry.io/otel/sdk`. (#985) +- Changed the default trace `Sampler` from `AlwaysOn` to `ParentOrElse(AlwaysOn)`. (#989) + +### Removed + +- The `IndexedAttribute` function from the `go.opentelemetry.io/otel/api/label` package was removed in favor of `IndexedLabel` which it was synonymous with. (#970) + +### Fixed + +- Bump github.com/golangci/golangci-lint from 1.28.3 to 1.29.0 in /tools. (#953) +- Bump github.com/google/go-cmp from 0.5.0 to 0.5.1. (#957) +- Use `global.Handle` for span export errors in the OTLP exporter. (#946) +- Correct Go language formatting in the README documentation. (#961) +- Remove default SDK dependencies from the `go.opentelemetry.io/otel/api` package. (#977) +- Remove default SDK dependencies from the `go.opentelemetry.io/otel/instrumentation` package. (#983) +- Move documented examples for `go.opentelemetry.io/otel/instrumentation/grpctrace` interceptors into Go example tests. (#984) + +## [0.9.0] - 2020-07-20 + +### Added + +- A new Resource Detector interface is included to allow resources to be automatically detected and included. (#939) +- A Detector to automatically detect resources from an environment variable. (#939) +- Github action to generate protobuf Go bindings locally in `internal/opentelemetry-proto-gen`. (#938) +- OTLP .proto files from `open-telemetry/opentelemetry-proto` imported as a git submodule under `internal/opentelemetry-proto`. + References to `github.com/open-telemetry/opentelemetry-proto` changed to `go.opentelemetry.io/otel/internal/opentelemetry-proto-gen`. (#942) + +### Changed + +- Non-nil value `struct`s for key-value pairs will be marshalled using JSON rather than `Sprintf`. (#948) + +### Removed + +- Removed dependency on `github.com/open-telemetry/opentelemetry-collector`. (#943) + +## [0.8.0] - 2020-07-09 + +### Added + +- The `B3Encoding` type to represent the B3 encoding(s) the B3 propagator can inject. + A value for HTTP supported encodings (Multiple Header: `MultipleHeader`, Single Header: `SingleHeader`) are included. (#882) +- The `FlagsDeferred` trace flag to indicate if the trace sampling decision has been deferred. (#882) +- The `FlagsDebug` trace flag to indicate if the trace is a debug trace. (#882) +- Add `peer.service` semantic attribute. (#898) +- Add database-specific semantic attributes. (#899) +- Add semantic convention for `faas.coldstart` and `container.id`. (#909) +- Add http content size semantic conventions. (#905) +- Include `http.request_content_length` in HTTP request basic attributes. (#905) +- Add semantic conventions for operating system process resource attribute keys. (#919) +- The Jaeger exporter now has a `WithBatchMaxCount` option to specify the maximum number of spans sent in a batch. (#931) + +### Changed + +- Update `CONTRIBUTING.md` to ask for updates to `CHANGELOG.md` with each pull request. (#879) +- Use lowercase header names for B3 Multiple Headers. (#881) +- The B3 propagator `SingleHeader` field has been replaced with `InjectEncoding`. + This new field can be set to combinations of the `B3Encoding` bitmasks and will inject trace information in these encodings. + If no encoding is set, the propagator will default to `MultipleHeader` encoding. (#882) +- The B3 propagator now extracts from either HTTP encoding of B3 (Single Header or Multiple Header) based on what is contained in the header. + Preference is given to Single Header encoding with Multiple Header being the fallback if Single Header is not found or is invalid. + This behavior change is made to dynamically support all correctly encoded traces received instead of having to guess the expected encoding prior to receiving. (#882) +- Extend semantic conventions for RPC. (#900) +- To match constant naming conventions in the `api/standard` package, the `FaaS*` key names are appended with a suffix of `Key`. (#920) + - `"api/standard".FaaSName` -> `FaaSNameKey` + - `"api/standard".FaaSID` -> `FaaSIDKey` + - `"api/standard".FaaSVersion` -> `FaaSVersionKey` + - `"api/standard".FaaSInstance` -> `FaaSInstanceKey` + +### Removed + +- The `FlagsUnused` trace flag is removed. + The purpose of this flag was to act as the inverse of `FlagsSampled`, the inverse of `FlagsSampled` is used instead. (#882) +- The B3 header constants (`B3SingleHeader`, `B3DebugFlagHeader`, `B3TraceIDHeader`, `B3SpanIDHeader`, `B3SampledHeader`, `B3ParentSpanIDHeader`) are removed. + If B3 header keys are needed [the authoritative OpenZipkin package constants](https://pkg.go.dev/github.com/openzipkin/zipkin-go@v0.2.2/propagation/b3?tab=doc#pkg-constants) should be used instead. (#882) + +### Fixed + +- The B3 Single Header name is now correctly `b3` instead of the previous `X-B3`. (#881) +- The B3 propagator now correctly supports sampling only values (`b3: 0`, `b3: 1`, or `b3: d`) for a Single B3 Header. (#882) +- The B3 propagator now propagates the debug flag. + This removes the behavior of changing the debug flag into a set sampling bit. + Instead, this now follow the B3 specification and omits the `X-B3-Sampling` header. (#882) +- The B3 propagator now tracks "unset" sampling state (meaning "defer the decision") and does not set the `X-B3-Sampling` header when injecting. (#882) +- Bump github.com/itchyny/gojq from 0.10.3 to 0.10.4 in /tools. (#883) +- Bump github.com/opentracing/opentracing-go from v1.1.1-0.20190913142402-a7454ce5950e to v1.2.0. (#885) +- The tracing time conversion for OTLP spans is now correctly set to `UnixNano`. (#896) +- Ensure span status is not set to `Unknown` when no HTTP status code is provided as it is assumed to be `200 OK`. (#908) +- Ensure `httptrace.clientTracer` closes `http.headers` span. (#912) +- Prometheus exporter will not apply stale updates or forget inactive metrics. (#903) +- Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905) +- Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913) +- Update otel-colector example to use the v0.5.0 collector. (#915) +- The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922) +- The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922) +- The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists. + This is in accordance with OpenTelemetry semantic conventions. (#922) +- Correlation Context extractor will no longer insert an empty map into the returned context when no valid values are extracted. (#923) +- Bump google.golang.org/api from 0.28.0 to 0.29.0 in /exporters/trace/jaeger. (#925) +- Bump github.com/itchyny/gojq from 0.10.4 to 0.11.0 in /tools. (#926) +- Bump github.com/golangci/golangci-lint from 1.28.1 to 1.28.2 in /tools. (#930) + +## [0.7.0] - 2020-06-26 + +This release implements the v0.5.0 version of the OpenTelemetry specification. + +### Added + +- The othttp instrumentation now includes default metrics. (#861) +- This CHANGELOG file to track all changes in the project going forward. +- Support for array type attributes. (#798) +- Apply transitive dependabot go.mod dependency updates as part of a new automatic Github workflow. (#844) +- Timestamps are now passed to exporters for each export. (#835) +- Add new `Accumulation` type to metric SDK to transport telemetry from `Accumulator`s to `Processor`s. + This replaces the prior `Record` `struct` use for this purpose. (#835) +- New dependabot integration to automate package upgrades. (#814) +- `Meter` and `Tracer` implementations accept instrumentation version version as an optional argument. + This instrumentation version is passed on to exporters. (#811) (#805) (#802) +- The OTLP exporter includes the instrumentation version in telemetry it exports. (#811) +- Environment variables for Jaeger exporter are supported. (#796) +- New `aggregation.Kind` in the export metric API. (#808) +- New example that uses OTLP and the collector. (#790) +- Handle errors in the span `SetName` during span initialization. (#791) +- Default service config to enable retries for retry-able failed requests in the OTLP exporter and an option to override this default. (#777) +- New `go.opentelemetry.io/otel/api/oterror` package to uniformly support error handling and definitions for the project. (#778) +- New `global` default implementation of the `go.opentelemetry.io/otel/api/oterror.Handler` interface to be used to handle errors prior to an user defined `Handler`. + There is also functionality for the user to register their `Handler` as well as a convenience function `Handle` to handle an error with this global `Handler`(#778) +- Options to specify propagators for httptrace and grpctrace instrumentation. (#784) +- The required `application/json` header for the Zipkin exporter is included in all exports. (#774) +- Integrate HTTP semantics helpers from the contrib repository into the `api/standard` package. #769 + +### Changed + +- Rename `Integrator` to `Processor` in the metric SDK. (#863) +- Rename `AggregationSelector` to `AggregatorSelector`. (#859) +- Rename `SynchronizedCopy` to `SynchronizedMove`. (#858) +- Rename `simple` integrator to `basic` integrator. (#857) +- Merge otlp collector examples. (#841) +- Change the metric SDK to support cumulative, delta, and pass-through exporters directly. + With these changes, cumulative and delta specific exporters are able to request the correct kind of aggregation from the SDK. (#840) +- The `Aggregator.Checkpoint` API is renamed to `SynchronizedCopy` and adds an argument, a different `Aggregator` into which the copy is stored. (#812) +- The `export.Aggregator` contract is that `Update()` and `SynchronizedCopy()` are synchronized with each other. + All the aggregation interfaces (`Sum`, `LastValue`, ...) are not meant to be synchronized, as the caller is expected to synchronize aggregators at a higher level after the `Accumulator`. + Some of the `Aggregators` used unnecessary locking and that has been cleaned up. (#812) +- Use of `metric.Number` was replaced by `int64` now that we use `sync.Mutex` in the `MinMaxSumCount` and `Histogram` `Aggregators`. (#812) +- Replace `AlwaysParentSample` with `ParentSample(fallback)` to match the OpenTelemetry v0.5.0 specification. (#810) +- Rename `sdk/export/metric/aggregator` to `sdk/export/metric/aggregation`. #808 +- Send configured headers with every request in the OTLP exporter, instead of just on connection creation. (#806) +- Update error handling for any one off error handlers, replacing, instead, with the `global.Handle` function. (#791) +- Rename `plugin` directory to `instrumentation` to match the OpenTelemetry specification. (#779) +- Makes the argument order to Histogram and DDSketch `New()` consistent. (#781) + +### Removed + +- `Uint64NumberKind` and related functions from the API. (#864) +- Context arguments from `Aggregator.Checkpoint` and `Integrator.Process` as they were unused. (#803) +- `SpanID` is no longer included in parameters for sampling decision to match the OpenTelemetry specification. (#775) + +### Fixed + +- Upgrade OTLP exporter to opentelemetry-proto matching the opentelemetry-collector v0.4.0 release. (#866) +- Allow changes to `go.sum` and `go.mod` when running dependabot tidy-up. (#871) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1. (#824) +- Bump github.com/prometheus/client_golang from 1.7.0 to 1.7.1 in /exporters/metric/prometheus. (#867) +- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/jaeger. (#853) +- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/zipkin. (#854) +- Bumps github.com/golang/protobuf from 1.3.2 to 1.4.2 (#848) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/otlp (#817) +- Bump github.com/golangci/golangci-lint from 1.25.1 to 1.27.0 in /tools (#828) +- Bump github.com/prometheus/client_golang from 1.5.0 to 1.7.0 in /exporters/metric/prometheus (#838) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/jaeger (#829) +- Bump github.com/benbjohnson/clock from 1.0.0 to 1.0.3 (#815) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/zipkin (#823) +- Bump github.com/itchyny/gojq from 0.10.1 to 0.10.3 in /tools (#830) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/metric/prometheus (#822) +- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/zipkin (#820) +- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/jaeger (#831) +- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 (#836) +- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/trace/jaeger (#837) +- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/otlp (#839) +- Bump google.golang.org/api from 0.20.0 to 0.28.0 in /exporters/trace/jaeger (#843) +- Set span status from HTTP status code in the othttp instrumentation. (#832) +- Fixed typo in push controller comment. (#834) +- The `Aggregator` testing has been updated and cleaned. (#812) +- `metric.Number(0)` expressions are replaced by `0` where possible. (#812) +- Fixed `global` `handler_test.go` test failure. #804 +- Fixed `BatchSpanProcessor.Shutdown` to wait until all spans are processed. (#766) +- Fixed OTLP example's accidental early close of exporter. (#807) +- Ensure zipkin exporter reads and closes response body. (#788) +- Update instrumentation to use `api/standard` keys instead of custom keys. (#782) +- Clean up tools and RELEASING documentation. (#762) + +## [0.6.0] - 2020-05-21 + +### Added + +- Support for `Resource`s in the prometheus exporter. (#757) +- New pull controller. (#751) +- New `UpDownSumObserver` instrument. (#750) +- OpenTelemetry collector demo. (#711) +- New `SumObserver` instrument. (#747) +- New `UpDownCounter` instrument. (#745) +- New timeout `Option` and configuration function `WithTimeout` to the push controller. (#742) +- New `api/standards` package to implement semantic conventions and standard key-value generation. (#731) + +### Changed + +- Rename `Register*` functions in the metric API to `New*` for all `Observer` instruments. (#761) +- Use `[]float64` for histogram boundaries, not `[]metric.Number`. (#758) +- Change OTLP example to use exporter as a trace `Syncer` instead of as an unneeded `Batcher`. (#756) +- Replace `WithResourceAttributes()` with `WithResource()` in the trace SDK. (#754) +- The prometheus exporter now uses the new pull controller. (#751) +- Rename `ScheduleDelayMillis` to `BatchTimeout` in the trace `BatchSpanProcessor`.(#752) +- Support use of synchronous instruments in asynchronous callbacks (#725) +- Move `Resource` from the `Export` method parameter into the metric export `Record`. (#739) +- Rename `Observer` instrument to `ValueObserver`. (#734) +- The push controller now has a method (`Provider()`) to return a `metric.Provider` instead of the old `Meter` method that acted as a `metric.Provider`. (#738) +- Replace `Measure` instrument by `ValueRecorder` instrument. (#732) +- Rename correlation context header from `"Correlation-Context"` to `"otcorrelations"` to match the OpenTelemetry specification. (#727) + +### Fixed + +- Ensure gRPC `ClientStream` override methods do not panic in grpctrace package. (#755) +- Disable parts of `BatchSpanProcessor` test until a fix is found. (#743) +- Fix `string` case in `kv` `Infer` function. (#746) +- Fix panic in grpctrace client interceptors. (#740) +- Refactor the `api/metrics` push controller and add `CheckpointSet` synchronization. (#737) +- Rewrite span batch process queue batching logic. (#719) +- Remove the push controller named Meter map. (#738) +- Fix Histogram aggregator initial state (fix #735). (#736) +- Ensure golang alpine image is running `golang-1.14` for examples. (#733) +- Added test for grpctrace `UnaryInterceptorClient`. (#695) +- Rearrange `api/metric` code layout. (#724) + +## [0.5.0] - 2020-05-13 + +### Added + +- Batch `Observer` callback support. (#717) +- Alias `api` types to root package of project. (#696) +- Create basic `othttp.Transport` for simple client instrumentation. (#678) +- `SetAttribute(string, interface{})` to the trace API. (#674) +- Jaeger exporter option that allows user to specify custom http client. (#671) +- `Stringer` and `Infer` methods to `key`s. (#662) + +### Changed + +- Rename `NewKey` in the `kv` package to just `Key`. (#721) +- Move `core` and `key` to `kv` package. (#720) +- Make the metric API `Meter` a `struct` so the abstract `MeterImpl` can be passed and simplify implementation. (#709) +- Rename SDK `Batcher` to `Integrator` to match draft OpenTelemetry SDK specification. (#710) +- Rename SDK `Ungrouped` integrator to `simple.Integrator` to match draft OpenTelemetry SDK specification. (#710) +- Rename SDK `SDK` `struct` to `Accumulator` to match draft OpenTelemetry SDK specification. (#710) +- Move `Number` from `core` to `api/metric` package. (#706) +- Move `SpanContext` from `core` to `trace` package. (#692) +- Change traceparent header from `Traceparent` to `traceparent` to implement the W3C specification. (#681) + +### Fixed + +- Update tooling to run generators in all submodules. (#705) +- gRPC interceptor regexp to match methods without a service name. (#683) +- Use a `const` for padding 64-bit B3 trace IDs. (#701) +- Update `mockZipkin` listen address from `:0` to `127.0.0.1:0`. (#700) +- Left-pad 64-bit B3 trace IDs with zero. (#698) +- Propagate at least the first W3C tracestate header. (#694) +- Remove internal `StateLocker` implementation. (#688) +- Increase instance size CI system uses. (#690) +- Add a `key` benchmark and use reflection in `key.Infer()`. (#679) +- Fix internal `global` test by using `global.Meter` with `RecordBatch()`. (#680) +- Reimplement histogram using mutex instead of `StateLocker`. (#669) +- Switch `MinMaxSumCount` to a mutex lock implementation instead of `StateLocker`. (#667) +- Update documentation to not include any references to `WithKeys`. (#672) +- Correct misspelling. (#668) +- Fix clobbering of the span context if extraction fails. (#656) +- Bump `golangci-lint` and work around the corrupting bug. (#666) (#670) + +## [0.4.3] - 2020-04-24 + +### Added + +- `Dockerfile` and `docker-compose.yml` to run example code. (#635) +- New `grpctrace` package that provides gRPC client and server interceptors for both unary and stream connections. (#621) +- New `api/label` package, providing common label set implementation. (#651) +- Support for JSON marshaling of `Resources`. (#654) +- `TraceID` and `SpanID` implementations for `Stringer` interface. (#642) +- `RemoteAddrKey` in the othttp plugin to include the HTTP client address in top-level spans. (#627) +- `WithSpanFormatter` option to the othttp plugin. (#617) +- Updated README to include section for compatible libraries and include reference to the contrib repository. (#612) +- The prometheus exporter now supports exporting histograms. (#601) +- A `String` method to the `Resource` to return a hashable identifier for a now unique resource. (#613) +- An `Iter` method to the `Resource` to return an array `AttributeIterator`. (#613) +- An `Equal` method to the `Resource` test the equivalence of resources. (#613) +- An iterable structure (`AttributeIterator`) for `Resource` attributes. + +### Changed + +- zipkin export's `NewExporter` now requires a `serviceName` argument to ensure this needed values is provided. (#644) +- Pass `Resources` through the metrics export pipeline. (#659) + +### Removed + +- `WithKeys` option from the metric API. (#639) + +### Fixed + +- Use the `label.Set.Equivalent` value instead of an encoding in the batcher. (#658) +- Correct typo `trace.Exporter` to `trace.SpanSyncer` in comments. (#653) +- Use type names for return values in jaeger exporter. (#648) +- Increase the visibility of the `api/key` package by updating comments and fixing usages locally. (#650) +- `Checkpoint` only after `Update`; Keep records in the `sync.Map` longer. (#647) +- Do not cache `reflect.ValueOf()` in metric Labels. (#649) +- Batch metrics exported from the OTLP exporter based on `Resource` and labels. (#626) +- Add error wrapping to the prometheus exporter. (#631) +- Update the OTLP exporter batching of traces to use a unique `string` representation of an associated `Resource` as the batching key. (#623) +- Update OTLP `SpanData` transform to only include the `ParentSpanID` if one exists. (#614) +- Update `Resource` internal representation to uniquely and reliably identify resources. (#613) +- Check return value from `CheckpointSet.ForEach` in prometheus exporter. (#622) +- Ensure spans created by httptrace client tracer reflect operation structure. (#618) +- Create a new recorder rather than reuse when multiple observations in same epoch for asynchronous instruments. #610 +- The default port the OTLP exporter uses to connect to the OpenTelemetry collector is updated to match the one the collector listens on by default. (#611) + +## [0.4.2] - 2020-03-31 + +### Fixed + +- Fix `pre_release.sh` to update version in `sdk/opentelemetry.go`. (#607) +- Fix time conversion from internal to OTLP in OTLP exporter. (#606) + +## [0.4.1] - 2020-03-31 + +### Fixed + +- Update `tag.sh` to create signed tags. (#604) + +## [0.4.0] - 2020-03-30 + +### Added + +- New API package `api/metric/registry` that exposes a `MeterImpl` wrapper for use by SDKs to generate unique instruments. (#580) +- Script to verify examples after a new release. (#579) + +### Removed + +- The dogstatsd exporter due to lack of support. + This additionally removes support for statsd. (#591) +- `LabelSet` from the metric API. + This is replaced by a `[]core.KeyValue` slice. (#595) +- `Labels` from the metric API's `Meter` interface. (#595) + +### Changed + +- The metric `export.Labels` became an interface which the SDK implements and the `export` package provides a simple, immutable implementation of this interface intended for testing purposes. (#574) +- Renamed `internal/metric.Meter` to `MeterImpl`. (#580) +- Renamed `api/global/internal.obsImpl` to `asyncImpl`. (#580) + +### Fixed + +- Corrected missing return in mock span. (#582) +- Update License header for all source files to match CNCF guidelines and include a test to ensure it is present. (#586) (#596) +- Update to v0.3.0 of the OTLP in the OTLP exporter. (#588) +- Update pre-release script to be compatible between GNU and BSD based systems. (#592) +- Add a `RecordBatch` benchmark. (#594) +- Moved span transforms of the OTLP exporter to the internal package. (#593) +- Build both go-1.13 and go-1.14 in circleci to test for all supported versions of Go. (#569) +- Removed unneeded allocation on empty labels in OLTP exporter. (#597) +- Update `BatchedSpanProcessor` to process the queue until no data but respect max batch size. (#599) +- Update project documentation godoc.org links to pkg.go.dev. (#602) + +## [0.3.0] - 2020-03-21 + +This is a first official beta release, which provides almost fully complete metrics, tracing, and context propagation functionality. +There is still a possibility of breaking changes. + +### Added + +- Add `Observer` metric instrument. (#474) +- Add global `Propagators` functionality to enable deferred initialization for propagators registered before the first Meter SDK is installed. (#494) +- Simplified export setup pipeline for the jaeger exporter to match other exporters. (#459) +- The zipkin trace exporter. (#495) +- The OTLP exporter to export metric and trace telemetry to the OpenTelemetry collector. (#497) (#544) (#545) +- Add `StatusMessage` field to the trace `Span`. (#524) +- Context propagation in OpenTracing bridge in terms of OpenTelemetry context propagation. (#525) +- The `Resource` type was added to the SDK. (#528) +- The global API now supports a `Tracer` and `Meter` function as shortcuts to getting a global `*Provider` and calling these methods directly. (#538) +- The metric API now defines a generic `MeterImpl` interface to support general purpose `Meter` construction. + Additionally, `SyncImpl` and `AsyncImpl` are added to support general purpose instrument construction. (#560) +- A metric `Kind` is added to represent the `MeasureKind`, `ObserverKind`, and `CounterKind`. (#560) +- Scripts to better automate the release process. (#576) + +### Changed + +- Default to to use `AlwaysSampler` instead of `ProbabilitySampler` to match OpenTelemetry specification. (#506) +- Renamed `AlwaysSampleSampler` to `AlwaysOnSampler` in the trace API. (#511) +- Renamed `NeverSampleSampler` to `AlwaysOffSampler` in the trace API. (#511) +- The `Status` field of the `Span` was changed to `StatusCode` to disambiguate with the added `StatusMessage`. (#524) +- Updated the trace `Sampler` interface conform to the OpenTelemetry specification. (#531) +- Rename metric API `Options` to `Config`. (#541) +- Rename metric `Counter` aggregator to be `Sum`. (#541) +- Unify metric options into `Option` from instrument specific options. (#541) +- The trace API's `TraceProvider` now support `Resource`s. (#545) +- Correct error in zipkin module name. (#548) +- The jaeger trace exporter now supports `Resource`s. (#551) +- Metric SDK now supports `Resource`s. + The `WithResource` option was added to configure a `Resource` on creation and the `Resource` method was added to the metric `Descriptor` to return the associated `Resource`. (#552) +- Replace `ErrNoLastValue` and `ErrEmptyDataSet` by `ErrNoData` in the metric SDK. (#557) +- The stdout trace exporter now supports `Resource`s. (#558) +- The metric `Descriptor` is now included at the API instead of the SDK. (#560) +- Replace `Ordered` with an iterator in `export.Labels`. (#567) + +### Removed + +- The vendor specific Stackdriver. It is now hosted on 3rd party vendor infrastructure. (#452) +- The `Unregister` method for metric observers as it is not in the OpenTelemetry specification. (#560) +- `GetDescriptor` from the metric SDK. (#575) +- The `Gauge` instrument from the metric API. (#537) + +### Fixed + +- Make histogram aggregator checkpoint consistent. (#438) +- Update README with import instructions and how to build and test. (#505) +- The default label encoding was updated to be unique. (#508) +- Use `NewRoot` in the othttp plugin for public endpoints. (#513) +- Fix data race in `BatchedSpanProcessor`. (#518) +- Skip test-386 for Mac OS 10.15.x (Catalina and upwards). #521 +- Use a variable-size array to represent ordered labels in maps. (#523) +- Update the OTLP protobuf and update changed import path. (#532) +- Use `StateLocker` implementation in `MinMaxSumCount`. (#546) +- Eliminate goroutine leak in histogram stress test. (#547) +- Update OTLP exporter with latest protobuf. (#550) +- Add filters to the othttp plugin. (#556) +- Provide an implementation of the `Header*` filters that do not depend on Go 1.14. (#565) +- Encode labels once during checkpoint. + The checkpoint function is executed in a single thread so we can do the encoding lazily before passing the encoded version of labels to the exporter. + This is a cheap and quick way to avoid encoding the labels on every collection interval. (#572) +- Run coverage over all packages in `COVERAGE_MOD_DIR`. (#573) + +## [0.2.3] - 2020-03-04 + +### Added + +- `RecordError` method on `Span`s in the trace API to Simplify adding error events to spans. (#473) +- Configurable push frequency for exporters setup pipeline. (#504) + +### Changed + +- Rename the `exporter` directory to `exporters`. + The `go.opentelemetry.io/otel/exporter/trace/jaeger` package was mistakenly released with a `v1.0.0` tag instead of `v0.1.0`. + This resulted in all subsequent releases not becoming the default latest. + A consequence of this was that all `go get`s pulled in the incompatible `v0.1.0` release of that package when pulling in more recent packages from other otel packages. + Renaming the `exporter` directory to `exporters` fixes this issue by renaming the package and therefore clearing any existing dependency tags. + Consequentially, this action also renames *all* exporter packages. (#502) + +### Removed + +- The `CorrelationContextHeader` constant in the `correlation` package is no longer exported. (#503) + +## [0.2.2] - 2020-02-27 + +### Added + +- `HTTPSupplier` interface in the propagation API to specify methods to retrieve and store a single value for a key to be associated with a carrier. (#467) +- `HTTPExtractor` interface in the propagation API to extract information from an `HTTPSupplier` into a context. (#467) +- `HTTPInjector` interface in the propagation API to inject information into an `HTTPSupplier.` (#467) +- `Config` and configuring `Option` to the propagator API. (#467) +- `Propagators` interface in the propagation API to contain the set of injectors and extractors for all supported carrier formats. (#467) +- `HTTPPropagator` interface in the propagation API to inject and extract from an `HTTPSupplier.` (#467) +- `WithInjectors` and `WithExtractors` functions to the propagator API to configure injectors and extractors to use. (#467) +- `ExtractHTTP` and `InjectHTTP` functions to apply configured HTTP extractors and injectors to a passed context. (#467) +- Histogram aggregator. (#433) +- `DefaultPropagator` function and have it return `trace.TraceContext` as the default context propagator. (#456) +- `AlwaysParentSample` sampler to the trace API. (#455) +- `WithNewRoot` option function to the trace API to specify the created span should be considered a root span. (#451) + +### Changed + +- Renamed `WithMap` to `ContextWithMap` in the correlation package. (#481) +- Renamed `FromContext` to `MapFromContext` in the correlation package. (#481) +- Move correlation context propagation to correlation package. (#479) +- Do not default to putting remote span context into links. (#480) +- `Tracer.WithSpan` updated to accept `StartOptions`. (#472) +- Renamed `MetricKind` to `Kind` to not stutter in the type usage. (#432) +- Renamed the `export` package to `metric` to match directory structure. (#432) +- Rename the `api/distributedcontext` package to `api/correlation`. (#444) +- Rename the `api/propagators` package to `api/propagation`. (#444) +- Move the propagators from the `propagators` package into the `trace` API package. (#444) +- Update `Float64Gauge`, `Int64Gauge`, `Float64Counter`, `Int64Counter`, `Float64Measure`, and `Int64Measure` metric methods to use value receivers instead of pointers. (#462) +- Moved all dependencies of tools package to a tools directory. (#466) + +### Removed + +- Binary propagators. (#467) +- NOOP propagator. (#467) + +### Fixed + +- Upgraded `github.com/golangci/golangci-lint` from `v1.21.0` to `v1.23.6` in `tools/`. (#492) +- Fix a possible nil-dereference crash (#478) +- Correct comments for `InstallNewPipeline` in the stdout exporter. (#483) +- Correct comments for `InstallNewPipeline` in the dogstatsd exporter. (#484) +- Correct comments for `InstallNewPipeline` in the prometheus exporter. (#482) +- Initialize `onError` based on `Config` in prometheus exporter. (#486) +- Correct module name in prometheus exporter README. (#475) +- Removed tracer name prefix from span names. (#430) +- Fix `aggregator_test.go` import package comment. (#431) +- Improved detail in stdout exporter. (#436) +- Fix a dependency issue (generate target should depend on stringer, not lint target) in Makefile. (#442) +- Reorders the Makefile targets within `precommit` target so we generate files and build the code before doing linting, so we can get much nicer errors about syntax errors from the compiler. (#442) +- Reword function documentation in gRPC plugin. (#446) +- Send the `span.kind` tag to Jaeger from the jaeger exporter. (#441) +- Fix `metadataSupplier` in the jaeger exporter to overwrite the header if existing instead of appending to it. (#441) +- Upgraded to Go 1.13 in CI. (#465) +- Correct opentelemetry.io URL in trace SDK documentation. (#464) +- Refactored reference counting logic in SDK determination of stale records. (#468) +- Add call to `runtime.Gosched` in instrument `acquireHandle` logic to not block the collector. (#469) + +## [0.2.1.1] - 2020-01-13 + +### Fixed + +- Use stateful batcher on Prometheus exporter fixing regression introduced in #395. (#428) + +## [0.2.1] - 2020-01-08 + +### Added + +- Global meter forwarding implementation. + This enables deferred initialization for metric instruments registered before the first Meter SDK is installed. (#392) +- Global trace forwarding implementation. + This enables deferred initialization for tracers registered before the first Trace SDK is installed. (#406) +- Standardize export pipeline creation in all exporters. (#395) +- A testing, organization, and comments for 64-bit field alignment. (#418) +- Script to tag all modules in the project. (#414) + +### Changed + +- Renamed `propagation` package to `propagators`. (#362) +- Renamed `B3Propagator` propagator to `B3`. (#362) +- Renamed `TextFormatPropagator` propagator to `TextFormat`. (#362) +- Renamed `BinaryPropagator` propagator to `Binary`. (#362) +- Renamed `BinaryFormatPropagator` propagator to `BinaryFormat`. (#362) +- Renamed `NoopTextFormatPropagator` propagator to `NoopTextFormat`. (#362) +- Renamed `TraceContextPropagator` propagator to `TraceContext`. (#362) +- Renamed `SpanOption` to `StartOption` in the trace API. (#369) +- Renamed `StartOptions` to `StartConfig` in the trace API. (#369) +- Renamed `EndOptions` to `EndConfig` in the trace API. (#369) +- `Number` now has a pointer receiver for its methods. (#375) +- Renamed `CurrentSpan` to `SpanFromContext` in the trace API. (#379) +- Renamed `SetCurrentSpan` to `ContextWithSpan` in the trace API. (#379) +- Renamed `Message` in Event to `Name` in the trace API. (#389) +- Prometheus exporter no longer aggregates metrics, instead it only exports them. (#385) +- Renamed `HandleImpl` to `BoundInstrumentImpl` in the metric API. (#400) +- Renamed `Float64CounterHandle` to `Float64CounterBoundInstrument` in the metric API. (#400) +- Renamed `Int64CounterHandle` to `Int64CounterBoundInstrument` in the metric API. (#400) +- Renamed `Float64GaugeHandle` to `Float64GaugeBoundInstrument` in the metric API. (#400) +- Renamed `Int64GaugeHandle` to `Int64GaugeBoundInstrument` in the metric API. (#400) +- Renamed `Float64MeasureHandle` to `Float64MeasureBoundInstrument` in the metric API. (#400) +- Renamed `Int64MeasureHandle` to `Int64MeasureBoundInstrument` in the metric API. (#400) +- Renamed `Release` method for bound instruments in the metric API to `Unbind`. (#400) +- Renamed `AcquireHandle` method for bound instruments in the metric API to `Bind`. (#400) +- Renamed the `File` option in the stdout exporter to `Writer`. (#404) +- Renamed all `Options` to `Config` for all metric exports where this wasn't already the case. + +### Fixed + +- Aggregator import path corrected. (#421) +- Correct links in README. (#368) +- The README was updated to match latest code changes in its examples. (#374) +- Don't capitalize error statements. (#375) +- Fix ignored errors. (#375) +- Fix ambiguous variable naming. (#375) +- Removed unnecessary type casting. (#375) +- Use named parameters. (#375) +- Updated release schedule. (#378) +- Correct http-stackdriver example module name. (#394) +- Removed the `http.request` span in `httptrace` package. (#397) +- Add comments in the metrics SDK (#399) +- Initialize checkpoint when creating ddsketch aggregator to prevent panic when merging into a empty one. (#402) (#403) +- Add documentation of compatible exporters in the README. (#405) +- Typo fix. (#408) +- Simplify span check logic in SDK tracer implementation. (#419) + +## [0.2.0] - 2019-12-03 + +### Added + +- Unary gRPC tracing example. (#351) +- Prometheus exporter. (#334) +- Dogstatsd metrics exporter. (#326) + +### Changed + +- Rename `MaxSumCount` aggregation to `MinMaxSumCount` and add the `Min` interface for this aggregation. (#352) +- Rename `GetMeter` to `Meter`. (#357) +- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355) +- Rename `HTTPB3Propagator` to `B3Propagator`. (#355) +- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355) +- Move `/global` package to `/api/global`. (#356) +- Rename `GetTracer` to `Tracer`. (#347) + +### Removed + +- `SetAttribute` from the `Span` interface in the trace API. (#361) +- `AddLink` from the `Span` interface in the trace API. (#349) +- `Link` from the `Span` interface in the trace API. (#349) + +### Fixed + +- Exclude example directories from coverage report. (#365) +- Lint make target now implements automatic fixes with `golangci-lint` before a second run to report the remaining issues. (#360) +- Drop `GO111MODULE` environment variable in Makefile as Go 1.13 is the project specified minimum version and this is environment variable is not needed for that version of Go. (#359) +- Run the race checker for all test. (#354) +- Redundant commands in the Makefile are removed. (#354) +- Split the `generate` and `lint` targets of the Makefile. (#354) +- Renames `circle-ci` target to more generic `ci` in Makefile. (#354) +- Add example Prometheus binary to gitignore. (#358) +- Support negative numbers with the `MaxSumCount`. (#335) +- Resolve race conditions in `push_test.go` identified in #339. (#340) +- Use `/usr/bin/env bash` as a shebang in scripts rather than `/bin/bash`. (#336) +- Trace benchmark now tests both `AlwaysSample` and `NeverSample`. + Previously it was testing `AlwaysSample` twice. (#325) +- Trace benchmark now uses a `[]byte` for `TraceID` to fix failing test. (#325) +- Added a trace benchmark to test variadic functions in `setAttribute` vs `setAttributes` (#325) +- The `defaultkeys` batcher was only using the encoded label set as its map key while building a checkpoint. + This allowed distinct label sets through, but any metrics sharing a label set could be overwritten or merged incorrectly. + This was corrected. (#333) + +## [0.1.2] - 2019-11-18 + +### Fixed + +- Optimized the `simplelru` map for attributes to reduce the number of allocations. (#328) +- Removed unnecessary unslicing of parameters that are already a slice. (#324) + +## [0.1.1] - 2019-11-18 + +This release contains a Metrics SDK with stdout exporter and supports basic aggregations such as counter, gauges, array, maxsumcount, and ddsketch. + +### Added + +- Metrics stdout export pipeline. (#265) +- Array aggregation for raw measure metrics. (#282) +- The core.Value now have a `MarshalJSON` method. (#281) + +### Removed + +- `WithService`, `WithResources`, and `WithComponent` methods of tracers. (#314) +- Prefix slash in `Tracer.Start()` for the Jaeger example. (#292) + +### Changed + +- Allocation in LabelSet construction to reduce GC overhead. (#318) +- `trace.WithAttributes` to append values instead of replacing (#315) +- Use a formula for tolerance in sampling tests. (#298) +- Move export types into trace and metric-specific sub-directories. (#289) +- `SpanKind` back to being based on an `int` type. (#288) + +### Fixed + +- URL to OpenTelemetry website in README. (#323) +- Name of othttp default tracer. (#321) +- `ExportSpans` for the stackdriver exporter now handles `nil` context. (#294) +- CI modules cache to correctly restore/save from/to the cache. (#316) +- Fix metric SDK race condition between `LoadOrStore` and the assignment `rec.recorder = i.meter.exporter.AggregatorFor(rec)`. (#293) +- README now reflects the new code structure introduced with these changes. (#291) +- Make the basic example work. (#279) + +## [0.1.0] - 2019-11-04 + +This is the first release of open-telemetry go library. +It contains api and sdk for trace and meter. + +### Added + +- Initial OpenTelemetry trace and metric API prototypes. +- Initial OpenTelemetry trace, metric, and export SDK packages. +- A wireframe bridge to support compatibility with OpenTracing. +- Example code for a basic, http-stackdriver, http, jaeger, and named tracer setup. +- Exporters for Jaeger, Stackdriver, and stdout. +- Propagators for binary, B3, and trace-context protocols. +- Project information and guidelines in the form of a README and CONTRIBUTING. +- Tools to build the project and a Makefile to automate the process. +- Apache-2.0 license. +- CircleCI build CI manifest files. +- CODEOWNERS file to track owners of this project. + +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.22.0...HEAD +[1.22.0/0.45.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.22.0 +[1.21.0/0.44.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.21.0 +[1.20.0/0.43.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.20.0 +[1.19.0/0.42.0/0.0.7]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0 +[1.19.0-rc.1/0.42.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0-rc.1 +[1.18.0/0.41.0/0.0.6]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.18.0 +[1.17.0/0.40.0/0.0.5]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.17.0 +[1.16.0/0.39.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0 +[1.16.0-rc.1/0.39.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0-rc.1 +[1.15.1/0.38.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.1 +[1.15.0/0.38.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0 +[1.15.0-rc.2/0.38.0-rc.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.2 +[1.15.0-rc.1/0.38.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.1 +[1.14.0/0.37.0/0.0.4]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.14.0 +[1.13.0/0.36.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.13.0 +[1.12.0/0.35.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.12.0 +[1.11.2/0.34.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.2 +[1.11.1/0.33.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.1 +[1.11.0/0.32.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.0 +[0.32.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.2 +[0.32.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.1 +[0.32.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.0 +[1.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.10.0 +[1.9.0/0.0.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.9.0 +[1.8.0/0.31.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.8.0 +[1.7.0/0.30.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.7.0 +[0.29.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.29.0 +[1.6.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.3 +[1.6.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.2 +[1.6.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.1 +[1.6.0/0.28.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.0 +[1.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.5.0 +[1.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.1 +[1.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.0 +[1.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.3.0 +[1.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.2.0 +[1.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.1.0 +[1.0.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.1 +[Metrics 0.24.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.24.0 +[1.0.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0 +[1.0.0-RC3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC3 +[1.0.0-RC2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC2 +[Experimental Metrics v0.22.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.22.0 +[1.0.0-RC1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC1 +[0.20.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.20.0 +[0.19.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.19.0 +[0.18.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.18.0 +[0.17.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.17.0 +[0.16.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.16.0 +[0.15.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.15.0 +[0.14.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.14.0 +[0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.13.0 +[0.12.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.12.0 +[0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.11.0 +[0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.10.0 +[0.9.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.9.0 +[0.8.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.8.0 +[0.7.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.7.0 +[0.6.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.6.0 +[0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.5.0 +[0.4.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.3 +[0.4.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.2 +[0.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.1 +[0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.0 +[0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.3.0 +[0.2.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.3 +[0.2.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.2 +[0.2.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1.1 +[0.2.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1 +[0.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.0 +[0.1.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.2 +[0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1 +[0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0 + +[Go 1.20]: https://go.dev/doc/go1.20 +[Go 1.19]: https://go.dev/doc/go1.19 +[Go 1.18]: https://go.dev/doc/go1.18 + +[metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric +[metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric +[trace API]:https://pkg.go.dev/go.opentelemetry.io/otel/trace diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS new file mode 100644 index 000000000..623740007 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -0,0 +1,17 @@ +##################################################### +# +# List of approvers for this repository +# +##################################################### +# +# Learn about membership in OpenTelemetry community: +# https://github.com/open-telemetry/community/blob/main/community-membership.md +# +# +# Learn about CODEOWNERS file format: +# https://help.github.com/en/articles/about-code-owners +# + +* @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu + +CODEOWNERS @MrAlias @MadVikingGod @pellared \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md new file mode 100644 index 000000000..31857a617 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -0,0 +1,645 @@ +# Contributing to opentelemetry-go + +The Go special interest group (SIG) meets regularly. See the +OpenTelemetry +[community](https://github.com/open-telemetry/community#golang-sdk) +repo for information on this and other language SIGs. + +See the [public meeting +notes](https://docs.google.com/document/d/1E5e7Ld0NuU1iVvf-42tOBpu2VBBLYnh73GJuITGJTTU/edit) +for a summary description of past meetings. To request edit access, +join the meeting or get in touch on +[Slack](https://cloud-native.slack.com/archives/C01NPAXACKT). + +## Development + +You can view and edit the source code by cloning this repository: + +```sh +git clone https://github.com/open-telemetry/opentelemetry-go.git +``` + +Run `make test` to run the tests instead of `go test`. + +There are some generated files checked into the repo. To make sure +that the generated files are up-to-date, run `make` (or `make +precommit` - the `precommit` target is the default). + +The `precommit` target also fixes the formatting of the code and +checks the status of the go module files. + +Additionally, there is a `codespell` target that checks for common +typos in the code. It is not run by default, but you can run it +manually with `make codespell`. It will set up a virtual environment +in `venv` and install `codespell` there. + +If after running `make precommit` the output of `git status` contains +`nothing to commit, working tree clean` then it means that everything +is up-to-date and properly formatted. + +## Pull Requests + +### How to Send Pull Requests + +Everyone is welcome to contribute code to `opentelemetry-go` via +GitHub pull requests (PRs). + +To create a new PR, fork the project in GitHub and clone the upstream +repo: + +```sh +go get -d go.opentelemetry.io/otel +``` + +(This may print some warning about "build constraints exclude all Go +files", just ignore it.) + +This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. You +can alternatively use `git` directly with: + +```sh +git clone https://github.com/open-telemetry/opentelemetry-go +``` + +(Note that `git clone` is *not* using the `go.opentelemetry.io/otel` name - +that name is a kind of a redirector to GitHub that `go get` can +understand, but `git` does not.) + +This would put the project in the `opentelemetry-go` directory in +current working directory. + +Enter the newly created directory and add your fork as a new remote: + +```sh +git remote add git@github.com:/opentelemetry-go +``` + +Check out a new branch, make modifications, run linters and tests, update +`CHANGELOG.md`, and push the branch to your fork: + +```sh +git checkout -b +# edit files +# update changelog +make precommit +git add -p +git commit +git push +``` + +Open a pull request against the main `opentelemetry-go` repo. Be sure to add the pull +request ID to the entry you added to `CHANGELOG.md`. + +Avoid rebasing and force-pushing to your branch to facilitate reviewing the pull request. +Rewriting Git history makes it difficult to keep track of iterations during code review. +All pull requests are squashed to a single commit upon merge to `main`. + +### How to Receive Comments + +* If the PR is not ready for review, please put `[WIP]` in the title, + tag it as `work-in-progress`, or mark it as + [`draft`](https://github.blog/2019-02-14-introducing-draft-pull-requests/). +* Make sure CLA is signed and CI is clear. + +### How to Get PRs Merged + +A PR is considered **ready to merge** when: + +* It has received two qualified approvals[^1]. + + This is not enforced through automation, but needs to be validated by the + maintainer merging. + * The qualified approvals need to be from [Approver]s/[Maintainer]s + affiliated with different companies. Two qualified approvals from + [Approver]s or [Maintainer]s affiliated with the same company counts as a + single qualified approval. + * PRs introducing changes that have already been discussed and consensus + reached only need one qualified approval. The discussion and resolution + needs to be linked to the PR. + * Trivial changes[^2] only need one qualified approval. + +* All feedback has been addressed. + * All PR comments and suggestions are resolved. + * All GitHub Pull Request reviews with a status of "Request changes" have + been addressed. Another review by the objecting reviewer with a different + status can be submitted to clear the original review, or the review can be + dismissed by a [Maintainer] when the issues from the original review have + been addressed. + * Any comments or reviews that cannot be resolved between the PR author and + reviewers can be submitted to the community [Approver]s and [Maintainer]s + during the weekly SIG meeting. If consensus is reached among the + [Approver]s and [Maintainer]s during the SIG meeting the objections to the + PR may be dismissed or resolved or the PR closed by a [Maintainer]. + * Any substantive changes to the PR require existing Approval reviews be + cleared unless the approver explicitly states that their approval persists + across changes. This includes changes resulting from other feedback. + [Approver]s and [Maintainer]s can help in clearing reviews and they should + be consulted if there are any questions. + +* The PR branch is up to date with the base branch it is merging into. + * To ensure this does not block the PR, it should be configured to allow + maintainers to update it. + +* It has been open for review for at least one working day. This gives people + reasonable time to review. + * Trivial changes[^2] do not have to wait for one day and may be merged with + a single [Maintainer]'s approval. + +* All required GitHub workflows have succeeded. +* Urgent fix can take exception as long as it has been actively communicated + among [Maintainer]s. + +Any [Maintainer] can merge the PR once the above criteria have been met. + +[^1]: A qualified approval is a GitHub Pull Request review with "Approve" + status from an OpenTelemetry Go [Approver] or [Maintainer]. +[^2]: Trivial changes include: typo corrections, cosmetic non-substantive + changes, documentation corrections or updates, dependency updates, etc. + +## Design Choices + +As with other OpenTelemetry clients, opentelemetry-go follows the +[OpenTelemetry Specification](https://opentelemetry.io/docs/specs/otel). + +It's especially valuable to read through the [library +guidelines](https://opentelemetry.io/docs/specs/otel/library-guidelines). + +### Focus on Capabilities, Not Structure Compliance + +OpenTelemetry is an evolving specification, one where the desires and +use cases are clear, but the method to satisfy those uses cases are +not. + +As such, Contributions should provide functionality and behavior that +conforms to the specification, but the interface and structure is +flexible. + +It is preferable to have contributions follow the idioms of the +language rather than conform to specific API names or argument +patterns in the spec. + +For a deeper discussion, see +[this](https://github.com/open-telemetry/opentelemetry-specification/issues/165). + +## Documentation + +Each (non-internal, non-test) package must be documented using +[Go Doc Comments](https://go.dev/doc/comment), +preferably in a `doc.go` file. + +Prefer using [Examples](https://pkg.go.dev/testing#hdr-Examples) +instead of putting code snippets in Go doc comments. +In some cases, you can even create [Testable Examples](https://go.dev/blog/examples). + +You can install and run a "local Go Doc site" in the following way: + + ```sh + go install golang.org/x/pkgsite/cmd/pkgsite@latest + pkgsite + ``` + +[`go.opentelemetry.io/otel/metric`](https://pkg.go.dev/go.opentelemetry.io/otel/metric) +is an example of a very well-documented package. + +## Style Guide + +One of the primary goals of this project is that it is actually used by +developers. With this goal in mind the project strives to build +user-friendly and idiomatic Go code adhering to the Go community's best +practices. + +For a non-comprehensive but foundational overview of these best practices +the [Effective Go](https://golang.org/doc/effective_go.html) documentation +is an excellent starting place. + +As a convenience for developers building this project the `make precommit` +will format, lint, validate, and in some cases fix the changes you plan to +submit. This check will need to pass for your changes to be able to be +merged. + +In addition to idiomatic Go, the project has adopted certain standards for +implementations of common patterns. These standards should be followed as a +default, and if they are not followed documentation needs to be included as +to the reasons why. + +### Configuration + +When creating an instantiation function for a complex `type T struct`, it is +useful to allow variable number of options to be applied. However, the strong +type system of Go restricts the function design options. There are a few ways +to solve this problem, but we have landed on the following design. + +#### `config` + +Configuration should be held in a `struct` named `config`, or prefixed with +specific type name this Configuration applies to if there are multiple +`config` in the package. This type must contain configuration options. + +```go +// config contains configuration options for a thing. +type config struct { + // options ... +} +``` + +In general the `config` type will not need to be used externally to the +package and should be unexported. If, however, it is expected that the user +will likely want to build custom options for the configuration, the `config` +should be exported. Please, include in the documentation for the `config` +how the user can extend the configuration. + +It is important that internal `config` are not shared across package boundaries. +Meaning a `config` from one package should not be directly used by another. The +one exception is the API packages. The configs from the base API, eg. +`go.opentelemetry.io/otel/trace.TracerConfig` and +`go.opentelemetry.io/otel/metric.InstrumentConfig`, are intended to be consumed +by the SDK therefore it is expected that these are exported. + +When a config is exported we want to maintain forward and backward +compatibility, to achieve this no fields should be exported but should +instead be accessed by methods. + +Optionally, it is common to include a `newConfig` function (with the same +naming scheme). This function wraps any defaults setting and looping over +all options to create a configured `config`. + +```go +// newConfig returns an appropriately configured config. +func newConfig(options ...Option) config { + // Set default values for config. + config := config{/* […] */} + for _, option := range options { + config = option.apply(config) + } + // Perform any validation here. + return config +} +``` + +If validation of the `config` options is also performed this can return an +error as well that is expected to be handled by the instantiation function +or propagated to the user. + +Given the design goal of not having the user need to work with the `config`, +the `newConfig` function should also be unexported. + +#### `Option` + +To set the value of the options a `config` contains, a corresponding +`Option` interface type should be used. + +```go +type Option interface { + apply(config) config +} +``` + +Having `apply` unexported makes sure that it will not be used externally. +Moreover, the interface becomes sealed so the user cannot easily implement +the interface on its own. + +The `apply` method should return a modified version of the passed config. +This approach, instead of passing a pointer, is used to prevent the config from being allocated to the heap. + +The name of the interface should be prefixed in the same way the +corresponding `config` is (if at all). + +#### Options + +All user configurable options for a `config` must have a related unexported +implementation of the `Option` interface and an exported configuration +function that wraps this implementation. + +The wrapping function name should be prefixed with `With*` (or in the +special case of a boolean options `Without*`) and should have the following +function signature. + +```go +func With*(…) Option { … } +``` + +##### `bool` Options + +```go +type defaultFalseOption bool + +func (o defaultFalseOption) apply(c config) config { + c.Bool = bool(o) + return c +} + +// WithOption sets a T to have an option included. +func WithOption() Option { + return defaultFalseOption(true) +} +``` + +```go +type defaultTrueOption bool + +func (o defaultTrueOption) apply(c config) config { + c.Bool = bool(o) + return c +} + +// WithoutOption sets a T to have Bool option excluded. +func WithoutOption() Option { + return defaultTrueOption(false) +} +``` + +##### Declared Type Options + +```go +type myTypeOption struct { + MyType MyType +} + +func (o myTypeOption) apply(c config) config { + c.MyType = o.MyType + return c +} + +// WithMyType sets T to have include MyType. +func WithMyType(t MyType) Option { + return myTypeOption{t} +} +``` + +##### Functional Options + +```go +type optionFunc func(config) config + +func (fn optionFunc) apply(c config) config { + return fn(c) +} + +// WithMyType sets t as MyType. +func WithMyType(t MyType) Option { + return optionFunc(func(c config) config { + c.MyType = t + return c + }) +} +``` + +#### Instantiation + +Using this configuration pattern to configure instantiation with a `NewT` +function. + +```go +func NewT(options ...Option) T {…} +``` + +Any required parameters can be declared before the variadic `options`. + +#### Dealing with Overlap + +Sometimes there are multiple complex `struct` that share common +configuration and also have distinct configuration. To avoid repeated +portions of `config`s, a common `config` can be used with the union of +options being handled with the `Option` interface. + +For example. + +```go +// config holds options for all animals. +type config struct { + Weight float64 + Color string + MaxAltitude float64 +} + +// DogOption apply Dog specific options. +type DogOption interface { + applyDog(config) config +} + +// BirdOption apply Bird specific options. +type BirdOption interface { + applyBird(config) config +} + +// Option apply options for all animals. +type Option interface { + BirdOption + DogOption +} + +type weightOption float64 + +func (o weightOption) applyDog(c config) config { + c.Weight = float64(o) + return c +} + +func (o weightOption) applyBird(c config) config { + c.Weight = float64(o) + return c +} + +func WithWeight(w float64) Option { return weightOption(w) } + +type furColorOption string + +func (o furColorOption) applyDog(c config) config { + c.Color = string(o) + return c +} + +func WithFurColor(c string) DogOption { return furColorOption(c) } + +type maxAltitudeOption float64 + +func (o maxAltitudeOption) applyBird(c config) config { + c.MaxAltitude = float64(o) + return c +} + +func WithMaxAltitude(a float64) BirdOption { return maxAltitudeOption(a) } + +func NewDog(name string, o ...DogOption) Dog {…} +func NewBird(name string, o ...BirdOption) Bird {…} +``` + +### Interfaces + +To allow other developers to better comprehend the code, it is important +to ensure it is sufficiently documented. One simple measure that contributes +to this aim is self-documenting by naming method parameters. Therefore, +where appropriate, methods of every exported interface type should have +their parameters appropriately named. + +#### Interface Stability + +All exported stable interfaces that include the following warning in their +documentation are allowed to be extended with additional methods. + +> Warning: methods may be added to this interface in minor releases. + +These interfaces are defined by the OpenTelemetry specification and will be +updated as the specification evolves. + +Otherwise, stable interfaces MUST NOT be modified. + +#### How to Change Specification Interfaces + +When an API change must be made, we will update the SDK with the new method one +release before the API change. This will allow the SDK one version before the +API change to work seamlessly with the new API. + +If an incompatible version of the SDK is used with the new API the application +will fail to compile. + +#### How Not to Change Specification Interfaces + +We have explored using a v2 of the API to change interfaces and found that there +was no way to introduce a v2 and have it work seamlessly with the v1 of the API. +Problems happened with libraries that upgraded to v2 when an application did not, +and would not produce any telemetry. + +More detail of the approaches considered and their limitations can be found in +the [Use a V2 API to evolve interfaces](https://github.com/open-telemetry/opentelemetry-go/issues/3920) +issue. + +#### How to Change Other Interfaces + +If new functionality is needed for an interface that cannot be changed it MUST +be added by including an additional interface. That added interface can be a +simple interface for the specific functionality that you want to add or it can +be a super-set of the original interface. For example, if you wanted to a +`Close` method to the `Exporter` interface: + +```go +type Exporter interface { + Export() +} +``` + +A new interface, `Closer`, can be added: + +```go +type Closer interface { + Close() +} +``` + +Code that is passed the `Exporter` interface can now check to see if the passed +value also satisfies the new interface. E.g. + +```go +func caller(e Exporter) { + /* ... */ + if c, ok := e.(Closer); ok { + c.Close() + } + /* ... */ +} +``` + +Alternatively, a new type that is the super-set of an `Exporter` can be created. + +```go +type ClosingExporter struct { + Exporter + Close() +} +``` + +This new type can be used similar to the simple interface above in that a +passed `Exporter` type can be asserted to satisfy the `ClosingExporter` type +and the `Close` method called. + +This super-set approach can be useful if there is explicit behavior that needs +to be coupled with the original type and passed as a unified type to a new +function, but, because of this coupling, it also limits the applicability of +the added functionality. If there exist other interfaces where this +functionality should be added, each one will need their own super-set +interfaces and will duplicate the pattern. For this reason, the simple targeted +interface that defines the specific functionality should be preferred. + +### Testing + +The tests should never leak goroutines. + +Use the term `ConcurrentSafe` in the test name when it aims to verify the +absence of race conditions. + +### Internal packages + +The use of internal packages should be scoped to a single module. A sub-module +should never import from a parent internal package. This creates a coupling +between the two modules where a user can upgrade the parent without the child +and if the internal package API has changed it will fail to upgrade[^3]. + +There are two known exceptions to this rule: + +- `go.opentelemetry.io/otel/internal/global` + - This package manages global state for all of opentelemetry-go. It needs to + be a single package in order to ensure the uniqueness of the global state. +- `go.opentelemetry.io/otel/internal/baggage` + - This package provides values in a `context.Context` that need to be + recognized by `go.opentelemetry.io/otel/baggage` and + `go.opentelemetry.io/otel/bridge/opentracing` but remain private. + +If you have duplicate code in multiple modules, make that code into a Go +template stored in `go.opentelemetry.io/otel/internal/shared` and use [gotmpl] +to render the templates in the desired locations. See [#4404] for an example of +this. + +[^3]: https://github.com/open-telemetry/opentelemetry-go/issues/3548 + +### Ignoring context cancellation + +OpenTelemetry API implementations need to ignore the cancellation of the context that are +passed when recording a value (e.g. starting a span, recording a measurement, emitting a log). +Recording methods should not return an error describing the cancellation state of the context +when they complete, nor should they abort any work. + +This rule may not apply if the OpenTelemetry specification defines a timeout mechanism for +the method. In that case the context cancellation can be used for the timeout with the +restriction that this behavior is documented for the method. Otherwise, timeouts +are expected to be handled by the user calling the API, not the implementation. + +Stoppage of the telemetry pipeline is handled by calling the appropriate `Shutdown` method +of a provider. It is assumed the context passed from a user is not used for this purpose. + +Outside of the direct recording of telemetry from the API (e.g. exporting telemetry, +force flushing telemetry, shutting down a signal provider) the context cancellation +should be honored. This means all work done on behalf of the user provided context +should be canceled. + +## Approvers and Maintainers + +### Approvers + +- [Evan Torrie](https://github.com/evantorrie), Verizon Media +- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics +- [David Ashpole](https://github.com/dashpole), Google +- [Chester Cheung](https://github.com/hanyuancheung), Tencent +- [Damien Mathieu](https://github.com/dmathieu), Elastic +- [Anthony Mirabella](https://github.com/Aneurysm9), AWS + +### Maintainers + +- [Aaron Clawson](https://github.com/MadVikingGod), LightStep +- [Robert Pająk](https://github.com/pellared), Splunk +- [Tyler Yahn](https://github.com/MrAlias), Splunk + +### Emeritus + +- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb +- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep +- [Josh MacDonald](https://github.com/jmacd), LightStep + +### Become an Approver or a Maintainer + +See the [community membership document in OpenTelemetry community +repo](https://github.com/open-telemetry/community/blob/main/community-membership.md). + +[Approver]: #approvers +[Maintainer]: #maintainers +[gotmpl]: https://pkg.go.dev/go.opentelemetry.io/build-tools/gotmpl +[#4404]: https://github.com/open-telemetry/opentelemetry-go/pull/4404 diff --git a/vendor/go.opentelemetry.io/otel/LICENSE b/vendor/go.opentelemetry.io/otel/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile new file mode 100644 index 000000000..35fc18996 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -0,0 +1,318 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +TOOLS_MOD_DIR := ./internal/tools + +ALL_DOCS := $(shell find . -name '*.md' -type f | sort) +ALL_GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort) +OTEL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(ALL_GO_MOD_DIRS)) +ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | grep -E -v '^./example|^$(TOOLS_MOD_DIR)' | sort) + +GO = go +TIMEOUT = 60 + +.DEFAULT_GOAL := precommit + +.PHONY: precommit ci +precommit: generate dependabot-generate license-check misspell go-mod-tidy golangci-lint-fix test-default +ci: generate dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage + +# Tools + +TOOLS = $(CURDIR)/.tools + +$(TOOLS): + @mkdir -p $@ +$(TOOLS)/%: | $(TOOLS) + cd $(TOOLS_MOD_DIR) && \ + $(GO) build -o $@ $(PACKAGE) + +MULTIMOD = $(TOOLS)/multimod +$(TOOLS)/multimod: PACKAGE=go.opentelemetry.io/build-tools/multimod + +SEMCONVGEN = $(TOOLS)/semconvgen +$(TOOLS)/semconvgen: PACKAGE=go.opentelemetry.io/build-tools/semconvgen + +CROSSLINK = $(TOOLS)/crosslink +$(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink + +SEMCONVKIT = $(TOOLS)/semconvkit +$(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit + +DBOTCONF = $(TOOLS)/dbotconf +$(TOOLS)/dbotconf: PACKAGE=go.opentelemetry.io/build-tools/dbotconf + +GOLANGCI_LINT = $(TOOLS)/golangci-lint +$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint + +MISSPELL = $(TOOLS)/misspell +$(TOOLS)/misspell: PACKAGE=github.com/client9/misspell/cmd/misspell + +GOCOVMERGE = $(TOOLS)/gocovmerge +$(TOOLS)/gocovmerge: PACKAGE=github.com/wadey/gocovmerge + +STRINGER = $(TOOLS)/stringer +$(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer + +PORTO = $(TOOLS)/porto +$(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto + +GOJQ = $(TOOLS)/gojq +$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq + +GOTMPL = $(TOOLS)/gotmpl +$(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl + +GORELEASE = $(TOOLS)/gorelease +$(GORELEASE): PACKAGE=golang.org/x/exp/cmd/gorelease + +GOVULNCHECK = $(TOOLS)/govulncheck +$(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck + +.PHONY: tools +tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) + +# Virtualized python tools via docker + +# The directory where the virtual environment is created. +VENVDIR := venv + +# The directory where the python tools are installed. +PYTOOLS := $(VENVDIR)/bin + +# The pip executable in the virtual environment. +PIP := $(PYTOOLS)/pip + +# The directory in the docker image where the current directory is mounted. +WORKDIR := /workdir + +# The python image to use for the virtual environment. +PYTHONIMAGE := python:3.11.3-slim-bullseye + +# Run the python image with the current directory mounted. +DOCKERPY := docker run --rm -v "$(CURDIR):$(WORKDIR)" -w $(WORKDIR) $(PYTHONIMAGE) + +# Create a virtual environment for Python tools. +$(PYTOOLS): +# The `--upgrade` flag is needed to ensure that the virtual environment is +# created with the latest pip version. + @$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade pip" + +# Install python packages into the virtual environment. +$(PYTOOLS)/%: | $(PYTOOLS) + @$(DOCKERPY) $(PIP) install -r requirements.txt + +CODESPELL = $(PYTOOLS)/codespell +$(CODESPELL): PACKAGE=codespell + +# Generate + +.PHONY: generate +generate: go-generate vanity-import-fix + +.PHONY: go-generate +go-generate: $(OTEL_GO_MOD_DIRS:%=go-generate/%) +go-generate/%: DIR=$* +go-generate/%: | $(STRINGER) $(GOTMPL) + @echo "$(GO) generate $(DIR)/..." \ + && cd $(DIR) \ + && PATH="$(TOOLS):$${PATH}" $(GO) generate ./... + +.PHONY: vanity-import-fix +vanity-import-fix: | $(PORTO) + @$(PORTO) --include-internal -w . + +# Generate go.work file for local development. +.PHONY: go-work +go-work: | $(CROSSLINK) + $(CROSSLINK) work --root=$(shell pwd) + +# Build + +.PHONY: build + +build: $(OTEL_GO_MOD_DIRS:%=build/%) $(OTEL_GO_MOD_DIRS:%=build-tests/%) +build/%: DIR=$* +build/%: + @echo "$(GO) build $(DIR)/..." \ + && cd $(DIR) \ + && $(GO) build ./... + +build-tests/%: DIR=$* +build-tests/%: + @echo "$(GO) build tests $(DIR)/..." \ + && cd $(DIR) \ + && $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -vet=off -run xxxxxMatchNothingxxxxx >/dev/null + +# Tests + +TEST_TARGETS := test-default test-bench test-short test-verbose test-race +.PHONY: $(TEST_TARGETS) test +test-default test-race: ARGS=-race +test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=. +test-short: ARGS=-short +test-verbose: ARGS=-v -race +$(TEST_TARGETS): test +test: $(OTEL_GO_MOD_DIRS:%=test/%) +test/%: DIR=$* +test/%: + @echo "$(GO) test -timeout $(TIMEOUT)s $(ARGS) $(DIR)/..." \ + && cd $(DIR) \ + && $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -timeout $(TIMEOUT)s $(ARGS) + +COVERAGE_MODE = atomic +COVERAGE_PROFILE = coverage.out +.PHONY: test-coverage +test-coverage: | $(GOCOVMERGE) + @set -e; \ + printf "" > coverage.txt; \ + for dir in $(ALL_COVERAGE_MOD_DIRS); do \ + echo "$(GO) test -coverpkg=go.opentelemetry.io/otel/... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" $${dir}/..."; \ + (cd "$${dir}" && \ + $(GO) list ./... \ + | grep -v third_party \ + | grep -v 'semconv/v.*' \ + | xargs $(GO) test -coverpkg=./... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" && \ + $(GO) tool cover -html=coverage.out -o coverage.html); \ + done; \ + $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt + +# Adding a directory will include all benchmarks in that direcotry if a filter is not specified. +BENCHMARK_TARGETS := sdk/trace +.PHONY: benchmark +benchmark: $(BENCHMARK_TARGETS:%=benchmark/%) +BENCHMARK_FILTER = . +# You can override the filter for a particular directory by adding a rule here. +benchmark/sdk/trace: BENCHMARK_FILTER = SpanWithAttributes_8/AlwaysSample +benchmark/%: + @echo "$(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(BENCHMARK_FILTER) $*..." \ + && cd $* \ + $(foreach filter, $(BENCHMARK_FILTER), && $(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(filter)) + +.PHONY: golangci-lint golangci-lint-fix +golangci-lint-fix: ARGS=--fix +golangci-lint-fix: golangci-lint +golangci-lint: $(OTEL_GO_MOD_DIRS:%=golangci-lint/%) +golangci-lint/%: DIR=$* +golangci-lint/%: | $(GOLANGCI_LINT) + @echo 'golangci-lint $(if $(ARGS),$(ARGS) ,)$(DIR)' \ + && cd $(DIR) \ + && $(GOLANGCI_LINT) run --allow-serial-runners $(ARGS) + +.PHONY: crosslink +crosslink: | $(CROSSLINK) + @echo "Updating intra-repository dependencies in all go modules" \ + && $(CROSSLINK) --root=$(shell pwd) --prune + +.PHONY: go-mod-tidy +go-mod-tidy: $(ALL_GO_MOD_DIRS:%=go-mod-tidy/%) +go-mod-tidy/%: DIR=$* +go-mod-tidy/%: | crosslink + @echo "$(GO) mod tidy in $(DIR)" \ + && cd $(DIR) \ + && $(GO) mod tidy -compat=1.20 + +.PHONY: lint-modules +lint-modules: go-mod-tidy + +.PHONY: lint +lint: misspell lint-modules golangci-lint govulncheck + +.PHONY: vanity-import-check +vanity-import-check: | $(PORTO) + @$(PORTO) --include-internal -l . || ( echo "(run: make vanity-import-fix)"; exit 1 ) + +.PHONY: misspell +misspell: | $(MISSPELL) + @$(MISSPELL) -w $(ALL_DOCS) + +.PHONY: govulncheck +govulncheck: $(OTEL_GO_MOD_DIRS:%=govulncheck/%) +govulncheck/%: DIR=$* +govulncheck/%: | $(GOVULNCHECK) + @echo "govulncheck ./... in $(DIR)" \ + && cd $(DIR) \ + && $(GOVULNCHECK) ./... + +.PHONY: codespell +codespell: | $(CODESPELL) + @$(DOCKERPY) $(CODESPELL) + +.PHONY: license-check +license-check: + @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \ + awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=4 { found=1; next } END { if (!found) print FILENAME }' $$f; \ + done); \ + if [ -n "$${licRes}" ]; then \ + echo "license header checking failed:"; echo "$${licRes}"; \ + exit 1; \ + fi + +DEPENDABOT_CONFIG = .github/dependabot.yml +.PHONY: dependabot-check +dependabot-check: | $(DBOTCONF) + @$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || ( echo "(run: make dependabot-generate)"; exit 1 ) + +.PHONY: dependabot-generate +dependabot-generate: | $(DBOTCONF) + @$(DBOTCONF) generate > $(DEPENDABOT_CONFIG) + +.PHONY: check-clean-work-tree +check-clean-work-tree: + @if ! git diff --quiet; then \ + echo; \ + echo 'Working tree is not clean, did you forget to run "make precommit"?'; \ + echo; \ + git status; \ + exit 1; \ + fi + +SEMCONVPKG ?= "semconv/" +.PHONY: semconv-generate +semconv-generate: | $(SEMCONVGEN) $(SEMCONVKIT) + [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 ) + [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 ) + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=span -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=event -p conventionType=event -f event.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" + +.PHONY: gorelease +gorelease: $(OTEL_GO_MOD_DIRS:%=gorelease/%) +gorelease/%: DIR=$* +gorelease/%:| $(GORELEASE) + @echo "gorelease in $(DIR):" \ + && cd $(DIR) \ + && $(GORELEASE) \ + || echo "" + +.PHONY: prerelease +prerelease: | $(MULTIMOD) + @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) + $(MULTIMOD) verify && $(MULTIMOD) prerelease -m ${MODSET} + +COMMIT ?= "HEAD" +.PHONY: add-tags +add-tags: | $(MULTIMOD) + @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) + $(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} + +.PHONY: lint-markdown +lint-markdown: + docker run -v "$(CURDIR):$(WORKDIR)" docker://avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md new file mode 100644 index 000000000..44e1bfc9b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -0,0 +1,108 @@ +# OpenTelemetry-Go + +[![CI](https://github.com/open-telemetry/opentelemetry-go/workflows/ci/badge.svg)](https://github.com/open-telemetry/opentelemetry-go/actions?query=workflow%3Aci+branch%3Amain) +[![codecov.io](https://codecov.io/gh/open-telemetry/opentelemetry-go/coverage.svg?branch=main)](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main) +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel)](https://pkg.go.dev/go.opentelemetry.io/otel) +[![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel) +[![Slack](https://img.shields.io/badge/slack-@cncf/otel--go-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01NPAXACKT) + +OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/). +It provides a set of APIs to directly measure performance and behavior of your software and send this data to observability platforms. + +## Project Status + +| Signal | Status | +|---------|------------| +| Traces | Stable | +| Metrics | Stable | +| Logs | Design [1] | + +- [1]: Currently the logs signal development is in a design phase ([#4696](https://github.com/open-telemetry/opentelemetry-go/issues/4696)). + No Logs Pull Requests are currently being accepted. + +Progress and status specific to this repository is tracked in our +[project boards](https://github.com/open-telemetry/opentelemetry-go/projects) +and +[milestones](https://github.com/open-telemetry/opentelemetry-go/milestones). + +Project versioning information and stability guarantees can be found in the +[versioning documentation](VERSIONING.md). + +### Compatibility + +OpenTelemetry-Go ensures compatibility with the current supported versions of +the [Go language](https://golang.org/doc/devel/release#policy): + +> Each major Go release is supported until there are two newer major releases. +> For example, Go 1.5 was supported until the Go 1.7 release, and Go 1.6 was supported until the Go 1.8 release. + +For versions of Go that are no longer supported upstream, opentelemetry-go will +stop ensuring compatibility with these versions in the following manner: + +- A minor release of opentelemetry-go will be made to add support for the new + supported release of Go. +- The following minor release of opentelemetry-go will remove compatibility + testing for the oldest (now archived upstream) version of Go. This, and + future, releases of opentelemetry-go may include features only supported by + the currently supported versions of Go. + +Currently, this project supports the following environments. + +| OS | Go Version | Architecture | +|---------|------------|--------------| +| Ubuntu | 1.21 | amd64 | +| Ubuntu | 1.20 | amd64 | +| Ubuntu | 1.21 | 386 | +| Ubuntu | 1.20 | 386 | +| MacOS | 1.21 | amd64 | +| MacOS | 1.20 | amd64 | +| Windows | 1.21 | amd64 | +| Windows | 1.20 | amd64 | +| Windows | 1.21 | 386 | +| Windows | 1.20 | 386 | + +While this project should work for other systems, no compatibility guarantees +are made for those systems currently. + +## Getting Started + +You can find a getting started guide on [opentelemetry.io](https://opentelemetry.io/docs/languages/go/getting-started/). + +OpenTelemetry's goal is to provide a single set of APIs to capture distributed +traces and metrics from your application and send them to an observability +platform. This project allows you to do just that for applications written in +Go. There are two steps to this process: instrument your application, and +configure an exporter. + +### Instrumentation + +To start capturing distributed traces and metric events from your application +it first needs to be instrumented. The easiest way to do this is by using an +instrumentation library for your code. Be sure to check out [the officially +supported instrumentation +libraries](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/instrumentation). + +If you need to extend the telemetry an instrumentation library provides or want +to build your own instrumentation for your application directly you will need +to use the +[Go otel](https://pkg.go.dev/go.opentelemetry.io/otel) +package. The included [examples](./example/) are a good way to see some +practical uses of this process. + +### Export + +Now that your application is instrumented to collect telemetry, it needs an +export pipeline to send that telemetry to an observability platform. + +All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters). + +| Exporter | Metrics | Traces | +|---------------------------------------|:-------:|:------:| +| [OTLP](./exporters/otlp/) | ✓ | ✓ | +| [Prometheus](./exporters/prometheus/) | ✓ | | +| [stdout](./exporters/stdout/) | ✓ | ✓ | +| [Zipkin](./exporters/zipkin/) | | ✓ | + +## Contributing + +See the [contributing documentation](CONTRIBUTING.md). diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md new file mode 100644 index 000000000..d2691d0bd --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -0,0 +1,139 @@ +# Release Process + +## Semantic Convention Generation + +New versions of the [OpenTelemetry Semantic Conventions] mean new versions of the `semconv` package need to be generated. +The `semconv-generate` make target is used for this. + +1. Checkout a local copy of the [OpenTelemetry Semantic Conventions] to the desired release tag. +2. Pull the latest `otel/semconvgen` image: `docker pull otel/semconvgen:latest` +3. Run the `make semconv-generate ...` target from this repository. + +For example, + +```sh +export TAG="v1.21.0" # Change to the release version you are generating. +export OTEL_SEMCONV_REPO="/absolute/path/to/opentelemetry/semantic-conventions" +docker pull otel/semconvgen:latest +make semconv-generate # Uses the exported TAG and OTEL_SEMCONV_REPO. +``` + +This should create a new sub-package of [`semconv`](./semconv). +Ensure things look correct before submitting a pull request to include the addition. + +## Breaking changes validation + +You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes done in the public API. + +You can check/report problems with `gorelease` [here](https://golang.org/issues/26420). + +## Pre-Release + +First, decide which module sets will be released and update their versions +in `versions.yaml`. Commit this change to a new branch. + +Update go.mod for submodules to depend on the new release which will happen in the next step. + +1. Run the `prerelease` make target. It creates a branch + `prerelease__` that will contain all release changes. + + ``` + make prerelease MODSET= + ``` + +2. Verify the changes. + + ``` + git diff ...prerelease__ + ``` + + This should have changed the version for all modules to be ``. + If these changes look correct, merge them into your pre-release branch: + + ```go + git merge prerelease__ + ``` + +3. Update the [Changelog](./CHANGELOG.md). + - Make sure all relevant changes for this release are included and are in language that non-contributors to the project can understand. + To verify this, you can look directly at the commits since the ``. + + ``` + git --no-pager log --pretty=oneline "..HEAD" + ``` + + - Move all the `Unreleased` changes into a new section following the title scheme (`[] - `). + - Update all the appropriate links at the bottom. + +4. Push the changes to upstream and create a Pull Request on GitHub. + Be sure to include the curated changes from the [Changelog](./CHANGELOG.md) in the description. + +## Tag + +Once the Pull Request with all the version changes has been approved and merged it is time to tag the merged commit. + +***IMPORTANT***: It is critical you use the same tag that you used in the Pre-Release step! +Failure to do so will leave things in a broken state. As long as you do not +change `versions.yaml` between pre-release and this step, things should be fine. + +***IMPORTANT***: [There is currently no way to remove an incorrectly tagged version of a Go module](https://github.com/golang/go/issues/34189). +It is critical you make sure the version you push upstream is correct. +[Failure to do so will lead to minor emergencies and tough to work around](https://github.com/open-telemetry/opentelemetry-go/issues/331). + +1. For each module set that will be released, run the `add-tags` make target + using the `` of the commit on the main branch for the merged Pull Request. + + ``` + make add-tags MODSET= COMMIT= + ``` + + It should only be necessary to provide an explicit `COMMIT` value if the + current `HEAD` of your working directory is not the correct commit. + +2. Push tags to the upstream remote (not your fork: `github.com/open-telemetry/opentelemetry-go.git`). + Make sure you push all sub-modules as well. + + ``` + git push upstream + git push upstream + ... + ``` + +## Release + +Finally create a Release for the new `` on GitHub. +The release body should include all the release notes from the Changelog for this release. + +## Verify Examples + +After releasing verify that examples build outside of the repository. + +``` +./verify_examples.sh +``` + +The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them. +This ensures they build with the published release, not the local copy. + +## Post-Release + +### Contrib Repository + +Once verified be sure to [make a release for the `contrib` repository](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md) that uses this release. + +### Website Documentation + +Update the [Go instrumentation documentation] in the OpenTelemetry website under [content/en/docs/languages/go]. +Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate. + +[OpenTelemetry Semantic Conventions]: https://github.com/open-telemetry/semantic-conventions +[Go instrumentation documentation]: https://opentelemetry.io/docs/languages/go/ +[content/en/docs/languages/go]: https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/languages/go + +### Demo Repository + +Bump the dependencies in the following Go services: + +- [`accountingservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accountingservice) +- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkoutservice) +- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/productcatalogservice) diff --git a/vendor/go.opentelemetry.io/otel/VERSIONING.md b/vendor/go.opentelemetry.io/otel/VERSIONING.md new file mode 100644 index 000000000..412f1e362 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/VERSIONING.md @@ -0,0 +1,224 @@ +# Versioning + +This document describes the versioning policy for this repository. This policy +is designed so the following goals can be achieved. + +**Users are provided a codebase of value that is stable and secure.** + +## Policy + +* Versioning of this project will be idiomatic of a Go project using [Go + modules](https://github.com/golang/go/wiki/Modules). + * [Semantic import + versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) + will be used. + * Versions will comply with [semver + 2.0](https://semver.org/spec/v2.0.0.html) with the following exceptions. + * New methods may be added to exported API interfaces. All exported + interfaces that fall within this exception will include the following + paragraph in their public documentation. + + > Warning: methods may be added to this interface in minor releases. + + * If a module is version `v2` or higher, the major version of the module + must be included as a `/vN` at the end of the module paths used in + `go.mod` files (e.g., `module go.opentelemetry.io/otel/v2`, `require + go.opentelemetry.io/otel/v2 v2.0.1`) and in the package import path + (e.g., `import "go.opentelemetry.io/otel/v2/trace"`). This includes the + paths used in `go get` commands (e.g., `go get + go.opentelemetry.io/otel/v2@v2.0.1`. Note there is both a `/v2` and a + `@v2.0.1` in that example. One way to think about it is that the module + name now includes the `/v2`, so include `/v2` whenever you are using the + module name). + * If a module is version `v0` or `v1`, do not include the major version in + either the module path or the import path. + * Modules will be used to encapsulate signals and components. + * Experimental modules still under active development will be versioned at + `v0` to imply the stability guarantee defined by + [semver](https://semver.org/spec/v2.0.0.html#spec-item-4). + + > Major version zero (0.y.z) is for initial development. Anything MAY + > change at any time. The public API SHOULD NOT be considered stable. + + * Mature modules for which we guarantee a stable public API will be versioned + with a major version greater than `v0`. + * The decision to make a module stable will be made on a case-by-case + basis by the maintainers of this project. + * Experimental modules will start their versioning at `v0.0.0` and will + increment their minor version when backwards incompatible changes are + released and increment their patch version when backwards compatible + changes are released. + * All stable modules that use the same major version number will use the + same entire version number. + * Stable modules may be released with an incremented minor or patch + version even though that module has not been changed, but rather so + that it will remain at the same version as other stable modules that + did undergo change. + * When an experimental module becomes stable a new stable module version + will be released and will include this now stable module. The new + stable module version will be an increment of the minor version number + and will be applied to all existing stable modules as well as the newly + stable module being released. +* Versioning of the associated [contrib + repository](https://github.com/open-telemetry/opentelemetry-go-contrib) of + this project will be idiomatic of a Go project using [Go + modules](https://github.com/golang/go/wiki/Modules). + * [Semantic import + versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) + will be used. + * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html). + * If a module is version `v2` or higher, the + major version of the module must be included as a `/vN` at the end of the + module paths used in `go.mod` files (e.g., `module + go.opentelemetry.io/contrib/instrumentation/host/v2`, `require + go.opentelemetry.io/contrib/instrumentation/host/v2 v2.0.1`) and in the + package import path (e.g., `import + "go.opentelemetry.io/contrib/instrumentation/host/v2"`). This includes + the paths used in `go get` commands (e.g., `go get + go.opentelemetry.io/contrib/instrumentation/host/v2@v2.0.1`. Note there + is both a `/v2` and a `@v2.0.1` in that example. One way to think about + it is that the module name now includes the `/v2`, so include `/v2` + whenever you are using the module name). + * If a module is version `v0` or `v1`, do not include the major version + in either the module path or the import path. + * In addition to public APIs, telemetry produced by stable instrumentation + will remain stable and backwards compatible. This is to avoid breaking + alerts and dashboard. + * Modules will be used to encapsulate instrumentation, detectors, exporters, + propagators, and any other independent sets of related components. + * Experimental modules still under active development will be versioned at + `v0` to imply the stability guarantee defined by + [semver](https://semver.org/spec/v2.0.0.html#spec-item-4). + + > Major version zero (0.y.z) is for initial development. Anything MAY + > change at any time. The public API SHOULD NOT be considered stable. + + * Mature modules for which we guarantee a stable public API and telemetry will + be versioned with a major version greater than `v0`. + * Experimental modules will start their versioning at `v0.0.0` and will + increment their minor version when backwards incompatible changes are + released and increment their patch version when backwards compatible + changes are released. + * Stable contrib modules cannot depend on experimental modules from this + project. + * All stable contrib modules of the same major version with this project + will use the same entire version as this project. + * Stable modules may be released with an incremented minor or patch + version even though that module's code has not been changed. Instead + the only change that will have been included is to have updated that + modules dependency on this project's stable APIs. + * When an experimental module in contrib becomes stable a new stable + module version will be released and will include this now stable + module. The new stable module version will be an increment of the minor + version number and will be applied to all existing stable contrib + modules, this project's modules, and the newly stable module being + released. + * Contrib modules will be kept up to date with this project's releases. + * Due to the dependency contrib modules will implicitly have on this + project's modules the release of stable contrib modules to match the + released version number will be staggered after this project's release. + There is no explicit time guarantee for how long after this projects + release the contrib release will be. Effort should be made to keep them + as close in time as possible. + * No additional stable release in this project can be made until the + contrib repository has a matching stable release. + * No release can be made in the contrib repository after this project's + stable release except for a stable release of the contrib repository. +* GitHub releases will be made for all releases. +* Go modules will be made available at Go package mirrors. + +## Example Versioning Lifecycle + +To better understand the implementation of the above policy the following +example is provided. This project is simplified to include only the following +modules and their versions: + +* `otel`: `v0.14.0` +* `otel/trace`: `v0.14.0` +* `otel/metric`: `v0.14.0` +* `otel/baggage`: `v0.14.0` +* `otel/sdk/trace`: `v0.14.0` +* `otel/sdk/metric`: `v0.14.0` + +These modules have been developed to a point where the `otel/trace`, +`otel/baggage`, and `otel/sdk/trace` modules have reached a point that they +should be considered for a stable release. The `otel/metric` and +`otel/sdk/metric` are still under active development and the `otel` module +depends on both `otel/trace` and `otel/metric`. + +The `otel` package is refactored to remove its dependencies on `otel/metric` so +it can be released as stable as well. With that done the following release +candidates are made: + +* `otel`: `v1.0.0-RC1` +* `otel/trace`: `v1.0.0-RC1` +* `otel/baggage`: `v1.0.0-RC1` +* `otel/sdk/trace`: `v1.0.0-RC1` + +The `otel/metric` and `otel/sdk/metric` modules remain at `v0.14.0`. + +A few minor issues are discovered in the `otel/trace` package. These issues are +resolved with some minor, but backwards incompatible, changes and are released +as a second release candidate: + +* `otel`: `v1.0.0-RC2` +* `otel/trace`: `v1.0.0-RC2` +* `otel/baggage`: `v1.0.0-RC2` +* `otel/sdk/trace`: `v1.0.0-RC2` + +Notice that all module version numbers are incremented to adhere to our +versioning policy. + +After these release candidates have been evaluated to satisfaction, they are +released as version `v1.0.0`. + +* `otel`: `v1.0.0` +* `otel/trace`: `v1.0.0` +* `otel/baggage`: `v1.0.0` +* `otel/sdk/trace`: `v1.0.0` + +Since both the `go` utility and the Go module system support [the semantic +versioning definition of +precedence](https://semver.org/spec/v2.0.0.html#spec-item-11), this release +will correctly be interpreted as the successor to the previous release +candidates. + +Active development of this project continues. The `otel/metric` module now has +backwards incompatible changes to its API that need to be released and the +`otel/baggage` module has a minor bug fix that needs to be released. The +following release is made: + +* `otel`: `v1.0.1` +* `otel/trace`: `v1.0.1` +* `otel/metric`: `v0.15.0` +* `otel/baggage`: `v1.0.1` +* `otel/sdk/trace`: `v1.0.1` +* `otel/sdk/metric`: `v0.15.0` + +Notice that, again, all stable module versions are incremented in unison and +the `otel/sdk/metric` package, which depends on the `otel/metric` package, also +bumped its version. This bump of the `otel/sdk/metric` package makes sense +given their coupling, though it is not explicitly required by our versioning +policy. + +As we progress, the `otel/metric` and `otel/sdk/metric` packages have reached a +point where they should be evaluated for stability. The `otel` module is +reintegrated with the `otel/metric` package and the following release is made: + +* `otel`: `v1.1.0-RC1` +* `otel/trace`: `v1.1.0-RC1` +* `otel/metric`: `v1.1.0-RC1` +* `otel/baggage`: `v1.1.0-RC1` +* `otel/sdk/trace`: `v1.1.0-RC1` +* `otel/sdk/metric`: `v1.1.0-RC1` + +All the modules are evaluated and determined to a viable stable release. They +are then released as version `v1.1.0` (the minor version is incremented to +indicate the addition of new signal). + +* `otel`: `v1.1.0` +* `otel/trace`: `v1.1.0` +* `otel/metric`: `v1.1.0` +* `otel/baggage`: `v1.1.0` +* `otel/sdk/trace`: `v1.1.0` +* `otel/sdk/metric`: `v1.1.0` diff --git a/vendor/github.com/go-openapi/swag/post_go18.go b/vendor/go.opentelemetry.io/otel/attribute/doc.go similarity index 68% rename from vendor/github.com/go-openapi/swag/post_go18.go rename to vendor/go.opentelemetry.io/otel/attribute/doc.go index f5228b82c..dafe7424d 100644 --- a/vendor/github.com/go-openapi/swag/post_go18.go +++ b/vendor/go.opentelemetry.io/otel/attribute/doc.go @@ -1,10 +1,10 @@ -// Copyright 2015 go-swagger maintainers +// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -12,13 +12,5 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build go1.8 -// +build go1.8 - -package swag - -import "net/url" - -func pathUnescape(path string) (string, error) { - return url.PathUnescape(path) -} +// Package attribute provides key and value attributes. +package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go new file mode 100644 index 000000000..fe2bc5766 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go @@ -0,0 +1,146 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "bytes" + "sync" + "sync/atomic" +) + +type ( + // Encoder is a mechanism for serializing an attribute set into a specific + // string representation that supports caching, to avoid repeated + // serialization. An example could be an exporter encoding the attribute + // set into a wire representation. + Encoder interface { + // Encode returns the serialized encoding of the attribute set using + // its Iterator. This result may be cached by a attribute.Set. + Encode(iterator Iterator) string + + // ID returns a value that is unique for each class of attribute + // encoder. Attribute encoders allocate these using `NewEncoderID`. + ID() EncoderID + } + + // EncoderID is used to identify distinct Encoder + // implementations, for caching encoded results. + EncoderID struct { + value uint64 + } + + // defaultAttrEncoder uses a sync.Pool of buffers to reduce the number of + // allocations used in encoding attributes. This implementation encodes a + // comma-separated list of key=value, with '/'-escaping of '=', ',', and + // '\'. + defaultAttrEncoder struct { + // pool is a pool of attribute set builders. The buffers in this pool + // grow to a size that most attribute encodings will not allocate new + // memory. + pool sync.Pool // *bytes.Buffer + } +) + +// escapeChar is used to ensure uniqueness of the attribute encoding where +// keys or values contain either '=' or ','. Since there is no parser needed +// for this encoding and its only requirement is to be unique, this choice is +// arbitrary. Users will see these in some exporters (e.g., stdout), so the +// backslash ('\') is used as a conventional choice. +const escapeChar = '\\' + +var ( + _ Encoder = &defaultAttrEncoder{} + + // encoderIDCounter is for generating IDs for other attribute encoders. + encoderIDCounter uint64 + + defaultEncoderOnce sync.Once + defaultEncoderID = NewEncoderID() + defaultEncoderInstance *defaultAttrEncoder +) + +// NewEncoderID returns a unique attribute encoder ID. It should be called +// once per each type of attribute encoder. Preferably in init() or in var +// definition. +func NewEncoderID() EncoderID { + return EncoderID{value: atomic.AddUint64(&encoderIDCounter, 1)} +} + +// DefaultEncoder returns an attribute encoder that encodes attributes in such +// a way that each escaped attribute's key is followed by an equal sign and +// then by an escaped attribute's value. All key-value pairs are separated by +// a comma. +// +// Escaping is done by prepending a backslash before either a backslash, equal +// sign or a comma. +func DefaultEncoder() Encoder { + defaultEncoderOnce.Do(func() { + defaultEncoderInstance = &defaultAttrEncoder{ + pool: sync.Pool{ + New: func() interface{} { + return &bytes.Buffer{} + }, + }, + } + }) + return defaultEncoderInstance +} + +// Encode is a part of an implementation of the AttributeEncoder interface. +func (d *defaultAttrEncoder) Encode(iter Iterator) string { + buf := d.pool.Get().(*bytes.Buffer) + defer d.pool.Put(buf) + buf.Reset() + + for iter.Next() { + i, keyValue := iter.IndexedAttribute() + if i > 0 { + _, _ = buf.WriteRune(',') + } + copyAndEscape(buf, string(keyValue.Key)) + + _, _ = buf.WriteRune('=') + + if keyValue.Value.Type() == STRING { + copyAndEscape(buf, keyValue.Value.AsString()) + } else { + _, _ = buf.WriteString(keyValue.Value.Emit()) + } + } + return buf.String() +} + +// ID is a part of an implementation of the AttributeEncoder interface. +func (*defaultAttrEncoder) ID() EncoderID { + return defaultEncoderID +} + +// copyAndEscape escapes `=`, `,` and its own escape character (`\`), +// making the default encoding unique. +func copyAndEscape(buf *bytes.Buffer, val string) { + for _, ch := range val { + switch ch { + case '=', ',', escapeChar: + _, _ = buf.WriteRune(escapeChar) + } + _, _ = buf.WriteRune(ch) + } +} + +// Valid returns true if this encoder ID was allocated by +// `NewEncoderID`. Invalid encoder IDs will not be cached. +func (id EncoderID) Valid() bool { + return id.value != 0 +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/filter.go b/vendor/go.opentelemetry.io/otel/attribute/filter.go new file mode 100644 index 000000000..638c213d5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/filter.go @@ -0,0 +1,60 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +// Filter supports removing certain attributes from attribute sets. When +// the filter returns true, the attribute will be kept in the filtered +// attribute set. When the filter returns false, the attribute is excluded +// from the filtered attribute set, and the attribute instead appears in +// the removed list of excluded attributes. +type Filter func(KeyValue) bool + +// NewAllowKeysFilter returns a Filter that only allows attributes with one of +// the provided keys. +// +// If keys is empty a deny-all filter is returned. +func NewAllowKeysFilter(keys ...Key) Filter { + if len(keys) <= 0 { + return func(kv KeyValue) bool { return false } + } + + allowed := make(map[Key]struct{}) + for _, k := range keys { + allowed[k] = struct{}{} + } + return func(kv KeyValue) bool { + _, ok := allowed[kv.Key] + return ok + } +} + +// NewDenyKeysFilter returns a Filter that only allows attributes +// that do not have one of the provided keys. +// +// If keys is empty an allow-all filter is returned. +func NewDenyKeysFilter(keys ...Key) Filter { + if len(keys) <= 0 { + return func(kv KeyValue) bool { return true } + } + + forbid := make(map[Key]struct{}) + for _, k := range keys { + forbid[k] = struct{}{} + } + return func(kv KeyValue) bool { + _, ok := forbid[kv.Key] + return !ok + } +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/vendor/go.opentelemetry.io/otel/attribute/iterator.go new file mode 100644 index 000000000..841b271fb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/iterator.go @@ -0,0 +1,161 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +// Iterator allows iterating over the set of attributes in order, sorted by +// key. +type Iterator struct { + storage *Set + idx int +} + +// MergeIterator supports iterating over two sets of attributes while +// eliminating duplicate values from the combined set. The first iterator +// value takes precedence. +type MergeIterator struct { + one oneIterator + two oneIterator + current KeyValue +} + +type oneIterator struct { + iter Iterator + done bool + attr KeyValue +} + +// Next moves the iterator to the next position. Returns false if there are no +// more attributes. +func (i *Iterator) Next() bool { + i.idx++ + return i.idx < i.Len() +} + +// Label returns current KeyValue. Must be called only after Next returns +// true. +// +// Deprecated: Use Attribute instead. +func (i *Iterator) Label() KeyValue { + return i.Attribute() +} + +// Attribute returns the current KeyValue of the Iterator. It must be called +// only after Next returns true. +func (i *Iterator) Attribute() KeyValue { + kv, _ := i.storage.Get(i.idx) + return kv +} + +// IndexedLabel returns current index and attribute. Must be called only +// after Next returns true. +// +// Deprecated: Use IndexedAttribute instead. +func (i *Iterator) IndexedLabel() (int, KeyValue) { + return i.idx, i.Attribute() +} + +// IndexedAttribute returns current index and attribute. Must be called only +// after Next returns true. +func (i *Iterator) IndexedAttribute() (int, KeyValue) { + return i.idx, i.Attribute() +} + +// Len returns a number of attributes in the iterated set. +func (i *Iterator) Len() int { + return i.storage.Len() +} + +// ToSlice is a convenience function that creates a slice of attributes from +// the passed iterator. The iterator is set up to start from the beginning +// before creating the slice. +func (i *Iterator) ToSlice() []KeyValue { + l := i.Len() + if l == 0 { + return nil + } + i.idx = -1 + slice := make([]KeyValue, 0, l) + for i.Next() { + slice = append(slice, i.Attribute()) + } + return slice +} + +// NewMergeIterator returns a MergeIterator for merging two attribute sets. +// Duplicates are resolved by taking the value from the first set. +func NewMergeIterator(s1, s2 *Set) MergeIterator { + mi := MergeIterator{ + one: makeOne(s1.Iter()), + two: makeOne(s2.Iter()), + } + return mi +} + +func makeOne(iter Iterator) oneIterator { + oi := oneIterator{ + iter: iter, + } + oi.advance() + return oi +} + +func (oi *oneIterator) advance() { + if oi.done = !oi.iter.Next(); !oi.done { + oi.attr = oi.iter.Attribute() + } +} + +// Next returns true if there is another attribute available. +func (m *MergeIterator) Next() bool { + if m.one.done && m.two.done { + return false + } + if m.one.done { + m.current = m.two.attr + m.two.advance() + return true + } + if m.two.done { + m.current = m.one.attr + m.one.advance() + return true + } + if m.one.attr.Key == m.two.attr.Key { + m.current = m.one.attr // first iterator attribute value wins + m.one.advance() + m.two.advance() + return true + } + if m.one.attr.Key < m.two.attr.Key { + m.current = m.one.attr + m.one.advance() + return true + } + m.current = m.two.attr + m.two.advance() + return true +} + +// Label returns the current value after Next() returns true. +// +// Deprecated: Use Attribute instead. +func (m *MergeIterator) Label() KeyValue { + return m.current +} + +// Attribute returns the current value after Next() returns true. +func (m *MergeIterator) Attribute() KeyValue { + return m.current +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/key.go b/vendor/go.opentelemetry.io/otel/attribute/key.go new file mode 100644 index 000000000..0656a04e4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/key.go @@ -0,0 +1,134 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +// Key represents the key part in key-value pairs. It's a string. The +// allowed character set in the key depends on the use of the key. +type Key string + +// Bool creates a KeyValue instance with a BOOL Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Bool(name, value). +func (k Key) Bool(v bool) KeyValue { + return KeyValue{ + Key: k, + Value: BoolValue(v), + } +} + +// BoolSlice creates a KeyValue instance with a BOOLSLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- BoolSlice(name, value). +func (k Key) BoolSlice(v []bool) KeyValue { + return KeyValue{ + Key: k, + Value: BoolSliceValue(v), + } +} + +// Int creates a KeyValue instance with an INT64 Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Int(name, value). +func (k Key) Int(v int) KeyValue { + return KeyValue{ + Key: k, + Value: IntValue(v), + } +} + +// IntSlice creates a KeyValue instance with an INT64SLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- IntSlice(name, value). +func (k Key) IntSlice(v []int) KeyValue { + return KeyValue{ + Key: k, + Value: IntSliceValue(v), + } +} + +// Int64 creates a KeyValue instance with an INT64 Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Int64(name, value). +func (k Key) Int64(v int64) KeyValue { + return KeyValue{ + Key: k, + Value: Int64Value(v), + } +} + +// Int64Slice creates a KeyValue instance with an INT64SLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Int64Slice(name, value). +func (k Key) Int64Slice(v []int64) KeyValue { + return KeyValue{ + Key: k, + Value: Int64SliceValue(v), + } +} + +// Float64 creates a KeyValue instance with a FLOAT64 Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Float64(name, value). +func (k Key) Float64(v float64) KeyValue { + return KeyValue{ + Key: k, + Value: Float64Value(v), + } +} + +// Float64Slice creates a KeyValue instance with a FLOAT64SLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Float64(name, value). +func (k Key) Float64Slice(v []float64) KeyValue { + return KeyValue{ + Key: k, + Value: Float64SliceValue(v), + } +} + +// String creates a KeyValue instance with a STRING Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- String(name, value). +func (k Key) String(v string) KeyValue { + return KeyValue{ + Key: k, + Value: StringValue(v), + } +} + +// StringSlice creates a KeyValue instance with a STRINGSLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- StringSlice(name, value). +func (k Key) StringSlice(v []string) KeyValue { + return KeyValue{ + Key: k, + Value: StringSliceValue(v), + } +} + +// Defined returns true for non-empty keys. +func (k Key) Defined() bool { + return len(k) != 0 +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/kv.go b/vendor/go.opentelemetry.io/otel/attribute/kv.go new file mode 100644 index 000000000..1ddf3ce05 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/kv.go @@ -0,0 +1,86 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "fmt" +) + +// KeyValue holds a key and value pair. +type KeyValue struct { + Key Key + Value Value +} + +// Valid returns if kv is a valid OpenTelemetry attribute. +func (kv KeyValue) Valid() bool { + return kv.Key.Defined() && kv.Value.Type() != INVALID +} + +// Bool creates a KeyValue with a BOOL Value type. +func Bool(k string, v bool) KeyValue { + return Key(k).Bool(v) +} + +// BoolSlice creates a KeyValue with a BOOLSLICE Value type. +func BoolSlice(k string, v []bool) KeyValue { + return Key(k).BoolSlice(v) +} + +// Int creates a KeyValue with an INT64 Value type. +func Int(k string, v int) KeyValue { + return Key(k).Int(v) +} + +// IntSlice creates a KeyValue with an INT64SLICE Value type. +func IntSlice(k string, v []int) KeyValue { + return Key(k).IntSlice(v) +} + +// Int64 creates a KeyValue with an INT64 Value type. +func Int64(k string, v int64) KeyValue { + return Key(k).Int64(v) +} + +// Int64Slice creates a KeyValue with an INT64SLICE Value type. +func Int64Slice(k string, v []int64) KeyValue { + return Key(k).Int64Slice(v) +} + +// Float64 creates a KeyValue with a FLOAT64 Value type. +func Float64(k string, v float64) KeyValue { + return Key(k).Float64(v) +} + +// Float64Slice creates a KeyValue with a FLOAT64SLICE Value type. +func Float64Slice(k string, v []float64) KeyValue { + return Key(k).Float64Slice(v) +} + +// String creates a KeyValue with a STRING Value type. +func String(k, v string) KeyValue { + return Key(k).String(v) +} + +// StringSlice creates a KeyValue with a STRINGSLICE Value type. +func StringSlice(k string, v []string) KeyValue { + return Key(k).StringSlice(v) +} + +// Stringer creates a new key-value pair with a passed name and a string +// value generated by the passed Stringer interface. +func Stringer(k string, v fmt.Stringer) KeyValue { + return Key(k).String(v.String()) +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go new file mode 100644 index 000000000..7e6765b06 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -0,0 +1,452 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "encoding/json" + "reflect" + "sort" + "sync" +) + +type ( + // Set is the representation for a distinct attribute set. It manages an + // immutable set of attributes, with an internal cache for storing + // attribute encodings. + // + // This type supports the Equivalent method of comparison using values of + // type Distinct. + Set struct { + equivalent Distinct + } + + // Distinct wraps a variable-size array of KeyValue, constructed with keys + // in sorted order. This can be used as a map key or for equality checking + // between Sets. + Distinct struct { + iface interface{} + } + + // Sortable implements sort.Interface, used for sorting KeyValue. This is + // an exported type to support a memory optimization. A pointer to one of + // these is needed for the call to sort.Stable(), which the caller may + // provide in order to avoid an allocation. See NewSetWithSortable(). + Sortable []KeyValue +) + +var ( + // keyValueType is used in computeDistinctReflect. + keyValueType = reflect.TypeOf(KeyValue{}) + + // emptySet is returned for empty attribute sets. + emptySet = &Set{ + equivalent: Distinct{ + iface: [0]KeyValue{}, + }, + } + + // sortables is a pool of Sortables used to create Sets with a user does + // not provide one. + sortables = sync.Pool{ + New: func() interface{} { return new(Sortable) }, + } +) + +// EmptySet returns a reference to a Set with no elements. +// +// This is a convenience provided for optimized calling utility. +func EmptySet() *Set { + return emptySet +} + +// reflectValue abbreviates reflect.ValueOf(d). +func (d Distinct) reflectValue() reflect.Value { + return reflect.ValueOf(d.iface) +} + +// Valid returns true if this value refers to a valid Set. +func (d Distinct) Valid() bool { + return d.iface != nil +} + +// Len returns the number of attributes in this set. +func (l *Set) Len() int { + if l == nil || !l.equivalent.Valid() { + return 0 + } + return l.equivalent.reflectValue().Len() +} + +// Get returns the KeyValue at ordered position idx in this set. +func (l *Set) Get(idx int) (KeyValue, bool) { + if l == nil || !l.equivalent.Valid() { + return KeyValue{}, false + } + value := l.equivalent.reflectValue() + + if idx >= 0 && idx < value.Len() { + // Note: The Go compiler successfully avoids an allocation for + // the interface{} conversion here: + return value.Index(idx).Interface().(KeyValue), true + } + + return KeyValue{}, false +} + +// Value returns the value of a specified key in this set. +func (l *Set) Value(k Key) (Value, bool) { + if l == nil || !l.equivalent.Valid() { + return Value{}, false + } + rValue := l.equivalent.reflectValue() + vlen := rValue.Len() + + idx := sort.Search(vlen, func(idx int) bool { + return rValue.Index(idx).Interface().(KeyValue).Key >= k + }) + if idx >= vlen { + return Value{}, false + } + keyValue := rValue.Index(idx).Interface().(KeyValue) + if k == keyValue.Key { + return keyValue.Value, true + } + return Value{}, false +} + +// HasValue tests whether a key is defined in this set. +func (l *Set) HasValue(k Key) bool { + if l == nil { + return false + } + _, ok := l.Value(k) + return ok +} + +// Iter returns an iterator for visiting the attributes in this set. +func (l *Set) Iter() Iterator { + return Iterator{ + storage: l, + idx: -1, + } +} + +// ToSlice returns the set of attributes belonging to this set, sorted, where +// keys appear no more than once. +func (l *Set) ToSlice() []KeyValue { + iter := l.Iter() + return iter.ToSlice() +} + +// Equivalent returns a value that may be used as a map key. The Distinct type +// guarantees that the result will equal the equivalent. Distinct value of any +// attribute set with the same elements as this, where sets are made unique by +// choosing the last value in the input for any given key. +func (l *Set) Equivalent() Distinct { + if l == nil || !l.equivalent.Valid() { + return emptySet.equivalent + } + return l.equivalent +} + +// Equals returns true if the argument set is equivalent to this set. +func (l *Set) Equals(o *Set) bool { + return l.Equivalent() == o.Equivalent() +} + +// Encoded returns the encoded form of this set, according to encoder. +func (l *Set) Encoded(encoder Encoder) string { + if l == nil || encoder == nil { + return "" + } + + return encoder.Encode(l.Iter()) +} + +func empty() Set { + return Set{ + equivalent: emptySet.equivalent, + } +} + +// NewSet returns a new Set. See the documentation for +// NewSetWithSortableFiltered for more details. +// +// Except for empty sets, this method adds an additional allocation compared +// with calls that include a Sortable. +func NewSet(kvs ...KeyValue) Set { + // Check for empty set. + if len(kvs) == 0 { + return empty() + } + srt := sortables.Get().(*Sortable) + s, _ := NewSetWithSortableFiltered(kvs, srt, nil) + sortables.Put(srt) + return s +} + +// NewSetWithSortable returns a new Set. See the documentation for +// NewSetWithSortableFiltered for more details. +// +// This call includes a Sortable option as a memory optimization. +func NewSetWithSortable(kvs []KeyValue, tmp *Sortable) Set { + // Check for empty set. + if len(kvs) == 0 { + return empty() + } + s, _ := NewSetWithSortableFiltered(kvs, tmp, nil) + return s +} + +// NewSetWithFiltered returns a new Set. See the documentation for +// NewSetWithSortableFiltered for more details. +// +// This call includes a Filter to include/exclude attribute keys from the +// return value. Excluded keys are returned as a slice of attribute values. +func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { + // Check for empty set. + if len(kvs) == 0 { + return empty(), nil + } + srt := sortables.Get().(*Sortable) + s, filtered := NewSetWithSortableFiltered(kvs, srt, filter) + sortables.Put(srt) + return s, filtered +} + +// NewSetWithSortableFiltered returns a new Set. +// +// Duplicate keys are eliminated by taking the last value. This +// re-orders the input slice so that unique last-values are contiguous +// at the end of the slice. +// +// This ensures the following: +// +// - Last-value-wins semantics +// - Caller sees the reordering, but doesn't lose values +// - Repeated call preserve last-value wins. +// +// Note that methods are defined on Set, although this returns Set. Callers +// can avoid memory allocations by: +// +// - allocating a Sortable for use as a temporary in this method +// - allocating a Set for storing the return value of this constructor. +// +// The result maintains a cache of encoded attributes, by attribute.EncoderID. +// This value should not be copied after its first use. +// +// The second []KeyValue return value is a list of attributes that were +// excluded by the Filter (if non-nil). +func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (Set, []KeyValue) { + // Check for empty set. + if len(kvs) == 0 { + return empty(), nil + } + + *tmp = kvs + + // Stable sort so the following de-duplication can implement + // last-value-wins semantics. + sort.Stable(tmp) + + *tmp = nil + + position := len(kvs) - 1 + offset := position - 1 + + // The requirements stated above require that the stable + // result be placed in the end of the input slice, while + // overwritten values are swapped to the beginning. + // + // De-duplicate with last-value-wins semantics. Preserve + // duplicate values at the beginning of the input slice. + for ; offset >= 0; offset-- { + if kvs[offset].Key == kvs[position].Key { + continue + } + position-- + kvs[offset], kvs[position] = kvs[position], kvs[offset] + } + kvs = kvs[position:] + + if filter != nil { + if div := filteredToFront(kvs, filter); div != 0 { + return Set{equivalent: computeDistinct(kvs[div:])}, kvs[:div] + } + } + return Set{equivalent: computeDistinct(kvs)}, nil +} + +// filteredToFront filters slice in-place using keep function. All KeyValues that need to +// be removed are moved to the front. All KeyValues that need to be kept are +// moved (in-order) to the back. The index for the first KeyValue to be kept is +// returned. +func filteredToFront(slice []KeyValue, keep Filter) int { + n := len(slice) + j := n + for i := n - 1; i >= 0; i-- { + if keep(slice[i]) { + j-- + slice[i], slice[j] = slice[j], slice[i] + } + } + return j +} + +// Filter returns a filtered copy of this Set. See the documentation for +// NewSetWithSortableFiltered for more details. +func (l *Set) Filter(re Filter) (Set, []KeyValue) { + if re == nil { + return *l, nil + } + + // Iterate in reverse to the first attribute that will be filtered out. + n := l.Len() + first := n - 1 + for ; first >= 0; first-- { + kv, _ := l.Get(first) + if !re(kv) { + break + } + } + + // No attributes will be dropped, return the immutable Set l and nil. + if first < 0 { + return *l, nil + } + + // Copy now that we know we need to return a modified set. + // + // Do not do this in-place on the underlying storage of *Set l. Sets are + // immutable and filtering should not change this. + slice := l.ToSlice() + + // Don't re-iterate the slice if only slice[0] is filtered. + if first == 0 { + // It is safe to assume len(slice) >= 1 given we found at least one + // attribute above that needs to be filtered out. + return Set{equivalent: computeDistinct(slice[1:])}, slice[:1] + } + + // Move the filtered slice[first] to the front (preserving order). + kv := slice[first] + copy(slice[1:first+1], slice[:first]) + slice[0] = kv + + // Do not re-evaluate re(slice[first+1:]). + div := filteredToFront(slice[1:first+1], re) + 1 + return Set{equivalent: computeDistinct(slice[div:])}, slice[:div] +} + +// computeDistinct returns a Distinct using either the fixed- or +// reflect-oriented code path, depending on the size of the input. The input +// slice is assumed to already be sorted and de-duplicated. +func computeDistinct(kvs []KeyValue) Distinct { + iface := computeDistinctFixed(kvs) + if iface == nil { + iface = computeDistinctReflect(kvs) + } + return Distinct{ + iface: iface, + } +} + +// computeDistinctFixed computes a Distinct for small slices. It returns nil +// if the input is too large for this code path. +func computeDistinctFixed(kvs []KeyValue) interface{} { + switch len(kvs) { + case 1: + ptr := new([1]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 2: + ptr := new([2]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 3: + ptr := new([3]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 4: + ptr := new([4]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 5: + ptr := new([5]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 6: + ptr := new([6]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 7: + ptr := new([7]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 8: + ptr := new([8]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 9: + ptr := new([9]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 10: + ptr := new([10]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + default: + return nil + } +} + +// computeDistinctReflect computes a Distinct using reflection, works for any +// size input. +func computeDistinctReflect(kvs []KeyValue) interface{} { + at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem() + for i, keyValue := range kvs { + *(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue + } + return at.Interface() +} + +// MarshalJSON returns the JSON encoding of the Set. +func (l *Set) MarshalJSON() ([]byte, error) { + return json.Marshal(l.equivalent.iface) +} + +// MarshalLog is the marshaling function used by the logging system to represent this exporter. +func (l Set) MarshalLog() interface{} { + kvs := make(map[string]string) + for _, kv := range l.ToSlice() { + kvs[string(kv.Key)] = kv.Value.Emit() + } + return kvs +} + +// Len implements sort.Interface. +func (l *Sortable) Len() int { + return len(*l) +} + +// Swap implements sort.Interface. +func (l *Sortable) Swap(i, j int) { + (*l)[i], (*l)[j] = (*l)[j], (*l)[i] +} + +// Less implements sort.Interface. +func (l *Sortable) Less(i, j int) bool { + return (*l)[i].Key < (*l)[j].Key +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/type_string.go b/vendor/go.opentelemetry.io/otel/attribute/type_string.go new file mode 100644 index 000000000..e584b2477 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/type_string.go @@ -0,0 +1,31 @@ +// Code generated by "stringer -type=Type"; DO NOT EDIT. + +package attribute + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[INVALID-0] + _ = x[BOOL-1] + _ = x[INT64-2] + _ = x[FLOAT64-3] + _ = x[STRING-4] + _ = x[BOOLSLICE-5] + _ = x[INT64SLICE-6] + _ = x[FLOAT64SLICE-7] + _ = x[STRINGSLICE-8] +} + +const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICESTRINGSLICE" + +var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71} + +func (i Type) String() string { + if i < 0 || i >= Type(len(_Type_index)-1) { + return "Type(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Type_name[_Type_index[i]:_Type_index[i+1]] +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go new file mode 100644 index 000000000..cb21dd5c0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/value.go @@ -0,0 +1,270 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" + + "go.opentelemetry.io/otel/internal" + "go.opentelemetry.io/otel/internal/attribute" +) + +//go:generate stringer -type=Type + +// Type describes the type of the data Value holds. +type Type int // nolint: revive // redefines builtin Type. + +// Value represents the value part in key-value pairs. +type Value struct { + vtype Type + numeric uint64 + stringly string + slice interface{} +} + +const ( + // INVALID is used for a Value with no value set. + INVALID Type = iota + // BOOL is a boolean Type Value. + BOOL + // INT64 is a 64-bit signed integral Type Value. + INT64 + // FLOAT64 is a 64-bit floating point Type Value. + FLOAT64 + // STRING is a string Type Value. + STRING + // BOOLSLICE is a slice of booleans Type Value. + BOOLSLICE + // INT64SLICE is a slice of 64-bit signed integral numbers Type Value. + INT64SLICE + // FLOAT64SLICE is a slice of 64-bit floating point numbers Type Value. + FLOAT64SLICE + // STRINGSLICE is a slice of strings Type Value. + STRINGSLICE +) + +// BoolValue creates a BOOL Value. +func BoolValue(v bool) Value { + return Value{ + vtype: BOOL, + numeric: internal.BoolToRaw(v), + } +} + +// BoolSliceValue creates a BOOLSLICE Value. +func BoolSliceValue(v []bool) Value { + return Value{vtype: BOOLSLICE, slice: attribute.BoolSliceValue(v)} +} + +// IntValue creates an INT64 Value. +func IntValue(v int) Value { + return Int64Value(int64(v)) +} + +// IntSliceValue creates an INTSLICE Value. +func IntSliceValue(v []int) Value { + var int64Val int64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(int64Val))) + for i, val := range v { + cp.Elem().Index(i).SetInt(int64(val)) + } + return Value{ + vtype: INT64SLICE, + slice: cp.Elem().Interface(), + } +} + +// Int64Value creates an INT64 Value. +func Int64Value(v int64) Value { + return Value{ + vtype: INT64, + numeric: internal.Int64ToRaw(v), + } +} + +// Int64SliceValue creates an INT64SLICE Value. +func Int64SliceValue(v []int64) Value { + return Value{vtype: INT64SLICE, slice: attribute.Int64SliceValue(v)} +} + +// Float64Value creates a FLOAT64 Value. +func Float64Value(v float64) Value { + return Value{ + vtype: FLOAT64, + numeric: internal.Float64ToRaw(v), + } +} + +// Float64SliceValue creates a FLOAT64SLICE Value. +func Float64SliceValue(v []float64) Value { + return Value{vtype: FLOAT64SLICE, slice: attribute.Float64SliceValue(v)} +} + +// StringValue creates a STRING Value. +func StringValue(v string) Value { + return Value{ + vtype: STRING, + stringly: v, + } +} + +// StringSliceValue creates a STRINGSLICE Value. +func StringSliceValue(v []string) Value { + return Value{vtype: STRINGSLICE, slice: attribute.StringSliceValue(v)} +} + +// Type returns a type of the Value. +func (v Value) Type() Type { + return v.vtype +} + +// AsBool returns the bool value. Make sure that the Value's type is +// BOOL. +func (v Value) AsBool() bool { + return internal.RawToBool(v.numeric) +} + +// AsBoolSlice returns the []bool value. Make sure that the Value's type is +// BOOLSLICE. +func (v Value) AsBoolSlice() []bool { + if v.vtype != BOOLSLICE { + return nil + } + return v.asBoolSlice() +} + +func (v Value) asBoolSlice() []bool { + return attribute.AsBoolSlice(v.slice) +} + +// AsInt64 returns the int64 value. Make sure that the Value's type is +// INT64. +func (v Value) AsInt64() int64 { + return internal.RawToInt64(v.numeric) +} + +// AsInt64Slice returns the []int64 value. Make sure that the Value's type is +// INT64SLICE. +func (v Value) AsInt64Slice() []int64 { + if v.vtype != INT64SLICE { + return nil + } + return v.asInt64Slice() +} + +func (v Value) asInt64Slice() []int64 { + return attribute.AsInt64Slice(v.slice) +} + +// AsFloat64 returns the float64 value. Make sure that the Value's +// type is FLOAT64. +func (v Value) AsFloat64() float64 { + return internal.RawToFloat64(v.numeric) +} + +// AsFloat64Slice returns the []float64 value. Make sure that the Value's type is +// FLOAT64SLICE. +func (v Value) AsFloat64Slice() []float64 { + if v.vtype != FLOAT64SLICE { + return nil + } + return v.asFloat64Slice() +} + +func (v Value) asFloat64Slice() []float64 { + return attribute.AsFloat64Slice(v.slice) +} + +// AsString returns the string value. Make sure that the Value's type +// is STRING. +func (v Value) AsString() string { + return v.stringly +} + +// AsStringSlice returns the []string value. Make sure that the Value's type is +// STRINGSLICE. +func (v Value) AsStringSlice() []string { + if v.vtype != STRINGSLICE { + return nil + } + return v.asStringSlice() +} + +func (v Value) asStringSlice() []string { + return attribute.AsStringSlice(v.slice) +} + +type unknownValueType struct{} + +// AsInterface returns Value's data as interface{}. +func (v Value) AsInterface() interface{} { + switch v.Type() { + case BOOL: + return v.AsBool() + case BOOLSLICE: + return v.asBoolSlice() + case INT64: + return v.AsInt64() + case INT64SLICE: + return v.asInt64Slice() + case FLOAT64: + return v.AsFloat64() + case FLOAT64SLICE: + return v.asFloat64Slice() + case STRING: + return v.stringly + case STRINGSLICE: + return v.asStringSlice() + } + return unknownValueType{} +} + +// Emit returns a string representation of Value's data. +func (v Value) Emit() string { + switch v.Type() { + case BOOLSLICE: + return fmt.Sprint(v.asBoolSlice()) + case BOOL: + return strconv.FormatBool(v.AsBool()) + case INT64SLICE: + return fmt.Sprint(v.asInt64Slice()) + case INT64: + return strconv.FormatInt(v.AsInt64(), 10) + case FLOAT64SLICE: + return fmt.Sprint(v.asFloat64Slice()) + case FLOAT64: + return fmt.Sprint(v.AsFloat64()) + case STRINGSLICE: + return fmt.Sprint(v.asStringSlice()) + case STRING: + return v.stringly + default: + return "unknown" + } +} + +// MarshalJSON returns the JSON encoding of the Value. +func (v Value) MarshalJSON() ([]byte, error) { + var jsonVal struct { + Type string + Value interface{} + } + jsonVal.Type = v.Type().String() + jsonVal.Value = v.AsInterface() + return json.Marshal(jsonVal) +} diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go new file mode 100644 index 000000000..7d27cf77d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -0,0 +1,744 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package baggage // import "go.opentelemetry.io/otel/baggage" + +import ( + "errors" + "fmt" + "net/url" + "strings" + + "go.opentelemetry.io/otel/internal/baggage" +) + +const ( + maxMembers = 180 + maxBytesPerMembers = 4096 + maxBytesPerBaggageString = 8192 + + listDelimiter = "," + keyValueDelimiter = "=" + propertyDelimiter = ";" +) + +var ( + errInvalidKey = errors.New("invalid key") + errInvalidValue = errors.New("invalid value") + errInvalidProperty = errors.New("invalid baggage list-member property") + errInvalidMember = errors.New("invalid baggage list-member") + errMemberNumber = errors.New("too many list-members in baggage-string") + errMemberBytes = errors.New("list-member too large") + errBaggageBytes = errors.New("baggage-string too large") +) + +// Property is an additional metadata entry for a baggage list-member. +type Property struct { + key, value string + + // hasValue indicates if a zero-value value means the property does not + // have a value or if it was the zero-value. + hasValue bool +} + +// NewKeyProperty returns a new Property for key. +// +// If key is invalid, an error will be returned. +func NewKeyProperty(key string) (Property, error) { + if !validateKey(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + + p := Property{key: key} + return p, nil +} + +// NewKeyValueProperty returns a new Property for key with value. +// +// The passed key must be compliant with W3C Baggage specification. +// The passed value must be precent-encoded as defined in W3C Baggage specification. +// +// Notice: Consider using [NewKeyValuePropertyRaw] instead +// that does not require precent-encoding of the value. +func NewKeyValueProperty(key, value string) (Property, error) { + if !validateValue(value) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + decodedValue, err := url.PathUnescape(value) + if err != nil { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + return NewKeyValuePropertyRaw(key, decodedValue) +} + +// NewKeyValuePropertyRaw returns a new Property for key with value. +// +// The passed key must be compliant with W3C Baggage specification. +func NewKeyValuePropertyRaw(key, value string) (Property, error) { + if !validateKey(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + + p := Property{ + key: key, + value: value, + hasValue: true, + } + return p, nil +} + +func newInvalidProperty() Property { + return Property{} +} + +// parseProperty attempts to decode a Property from the passed string. It +// returns an error if the input is invalid according to the W3C Baggage +// specification. +func parseProperty(property string) (Property, error) { + if property == "" { + return newInvalidProperty(), nil + } + + p, ok := parsePropertyInternal(property) + if !ok { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidProperty, property) + } + + return p, nil +} + +// validate ensures p conforms to the W3C Baggage specification, returning an +// error otherwise. +func (p Property) validate() error { + errFunc := func(err error) error { + return fmt.Errorf("invalid property: %w", err) + } + + if !validateKey(p.key) { + return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key)) + } + if !p.hasValue && p.value != "" { + return errFunc(errors.New("inconsistent value")) + } + return nil +} + +// Key returns the Property key. +func (p Property) Key() string { + return p.key +} + +// Value returns the Property value. Additionally, a boolean value is returned +// indicating if the returned value is the empty if the Property has a value +// that is empty or if the value is not set. +func (p Property) Value() (string, bool) { + return p.value, p.hasValue +} + +// String encodes Property into a header string compliant with the W3C Baggage +// specification. +func (p Property) String() string { + if p.hasValue { + return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, valueEscape(p.value)) + } + return p.key +} + +type properties []Property + +func fromInternalProperties(iProps []baggage.Property) properties { + if len(iProps) == 0 { + return nil + } + + props := make(properties, len(iProps)) + for i, p := range iProps { + props[i] = Property{ + key: p.Key, + value: p.Value, + hasValue: p.HasValue, + } + } + return props +} + +func (p properties) asInternal() []baggage.Property { + if len(p) == 0 { + return nil + } + + iProps := make([]baggage.Property, len(p)) + for i, prop := range p { + iProps[i] = baggage.Property{ + Key: prop.key, + Value: prop.value, + HasValue: prop.hasValue, + } + } + return iProps +} + +func (p properties) Copy() properties { + if len(p) == 0 { + return nil + } + + props := make(properties, len(p)) + copy(props, p) + return props +} + +// validate ensures each Property in p conforms to the W3C Baggage +// specification, returning an error otherwise. +func (p properties) validate() error { + for _, prop := range p { + if err := prop.validate(); err != nil { + return err + } + } + return nil +} + +// String encodes properties into a header string compliant with the W3C Baggage +// specification. +func (p properties) String() string { + props := make([]string, len(p)) + for i, prop := range p { + props[i] = prop.String() + } + return strings.Join(props, propertyDelimiter) +} + +// Member is a list-member of a baggage-string as defined by the W3C Baggage +// specification. +type Member struct { + key, value string + properties properties + + // hasData indicates whether the created property contains data or not. + // Properties that do not contain data are invalid with no other check + // required. + hasData bool +} + +// NewMemberRaw returns a new Member from the passed arguments. +// +// The passed key must be compliant with W3C Baggage specification. +// The passed value must be precent-encoded as defined in W3C Baggage specification. +// +// Notice: Consider using [NewMemberRaw] instead +// that does not require precent-encoding of the value. +func NewMember(key, value string, props ...Property) (Member, error) { + if !validateValue(value) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + decodedValue, err := url.PathUnescape(value) + if err != nil { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + return NewMemberRaw(key, decodedValue, props...) +} + +// NewMemberRaw returns a new Member from the passed arguments. +// +// The passed key must be compliant with W3C Baggage specification. +func NewMemberRaw(key, value string, props ...Property) (Member, error) { + m := Member{ + key: key, + value: value, + properties: properties(props).Copy(), + hasData: true, + } + if err := m.validate(); err != nil { + return newInvalidMember(), err + } + return m, nil +} + +func newInvalidMember() Member { + return Member{} +} + +// parseMember attempts to decode a Member from the passed string. It returns +// an error if the input is invalid according to the W3C Baggage +// specification. +func parseMember(member string) (Member, error) { + if n := len(member); n > maxBytesPerMembers { + return newInvalidMember(), fmt.Errorf("%w: %d", errMemberBytes, n) + } + + var props properties + keyValue, properties, found := strings.Cut(member, propertyDelimiter) + if found { + // Parse the member properties. + for _, pStr := range strings.Split(properties, propertyDelimiter) { + p, err := parseProperty(pStr) + if err != nil { + return newInvalidMember(), err + } + props = append(props, p) + } + } + // Parse the member key/value pair. + + // Take into account a value can contain equal signs (=). + k, v, found := strings.Cut(keyValue, keyValueDelimiter) + if !found { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidMember, member) + } + // "Leading and trailing whitespaces are allowed but MUST be trimmed + // when converting the header into a data structure." + key := strings.TrimSpace(k) + if !validateKey(key) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + + val := strings.TrimSpace(v) + if !validateValue(val) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v) + } + + // Decode a precent-encoded value. + value, err := url.PathUnescape(val) + if err != nil { + return newInvalidMember(), fmt.Errorf("%w: %v", errInvalidValue, err) + } + return Member{key: key, value: value, properties: props, hasData: true}, nil +} + +// validate ensures m conforms to the W3C Baggage specification. +// A key must be an ASCII string, returning an error otherwise. +func (m Member) validate() error { + if !m.hasData { + return fmt.Errorf("%w: %q", errInvalidMember, m) + } + + if !validateKey(m.key) { + return fmt.Errorf("%w: %q", errInvalidKey, m.key) + } + return m.properties.validate() +} + +// Key returns the Member key. +func (m Member) Key() string { return m.key } + +// Value returns the Member value. +func (m Member) Value() string { return m.value } + +// Properties returns a copy of the Member properties. +func (m Member) Properties() []Property { return m.properties.Copy() } + +// String encodes Member into a header string compliant with the W3C Baggage +// specification. +func (m Member) String() string { + // A key is just an ASCII string. A value is restricted to be + // US-ASCII characters excluding CTLs, whitespace, + // DQUOTE, comma, semicolon, and backslash. + s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, valueEscape(m.value)) + if len(m.properties) > 0 { + s = fmt.Sprintf("%s%s%s", s, propertyDelimiter, m.properties.String()) + } + return s +} + +// Baggage is a list of baggage members representing the baggage-string as +// defined by the W3C Baggage specification. +type Baggage struct { //nolint:golint + list baggage.List +} + +// New returns a new valid Baggage. It returns an error if it results in a +// Baggage exceeding limits set in that specification. +// +// It expects all the provided members to have already been validated. +func New(members ...Member) (Baggage, error) { + if len(members) == 0 { + return Baggage{}, nil + } + + b := make(baggage.List) + for _, m := range members { + if !m.hasData { + return Baggage{}, errInvalidMember + } + + // OpenTelemetry resolves duplicates by last-one-wins. + b[m.key] = baggage.Item{ + Value: m.value, + Properties: m.properties.asInternal(), + } + } + + // Check member numbers after deduplication. + if len(b) > maxMembers { + return Baggage{}, errMemberNumber + } + + bag := Baggage{b} + if n := len(bag.String()); n > maxBytesPerBaggageString { + return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n) + } + + return bag, nil +} + +// Parse attempts to decode a baggage-string from the passed string. It +// returns an error if the input is invalid according to the W3C Baggage +// specification. +// +// If there are duplicate list-members contained in baggage, the last one +// defined (reading left-to-right) will be the only one kept. This diverges +// from the W3C Baggage specification which allows duplicate list-members, but +// conforms to the OpenTelemetry Baggage specification. +func Parse(bStr string) (Baggage, error) { + if bStr == "" { + return Baggage{}, nil + } + + if n := len(bStr); n > maxBytesPerBaggageString { + return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n) + } + + b := make(baggage.List) + for _, memberStr := range strings.Split(bStr, listDelimiter) { + m, err := parseMember(memberStr) + if err != nil { + return Baggage{}, err + } + // OpenTelemetry resolves duplicates by last-one-wins. + b[m.key] = baggage.Item{ + Value: m.value, + Properties: m.properties.asInternal(), + } + } + + // OpenTelemetry does not allow for duplicate list-members, but the W3C + // specification does. Now that we have deduplicated, ensure the baggage + // does not exceed list-member limits. + if len(b) > maxMembers { + return Baggage{}, errMemberNumber + } + + return Baggage{b}, nil +} + +// Member returns the baggage list-member identified by key. +// +// If there is no list-member matching the passed key the returned Member will +// be a zero-value Member. +// The returned member is not validated, as we assume the validation happened +// when it was added to the Baggage. +func (b Baggage) Member(key string) Member { + v, ok := b.list[key] + if !ok { + // We do not need to worry about distinguishing between the situation + // where a zero-valued Member is included in the Baggage because a + // zero-valued Member is invalid according to the W3C Baggage + // specification (it has an empty key). + return newInvalidMember() + } + + return Member{ + key: key, + value: v.Value, + properties: fromInternalProperties(v.Properties), + hasData: true, + } +} + +// Members returns all the baggage list-members. +// The order of the returned list-members does not have significance. +// +// The returned members are not validated, as we assume the validation happened +// when they were added to the Baggage. +func (b Baggage) Members() []Member { + if len(b.list) == 0 { + return nil + } + + members := make([]Member, 0, len(b.list)) + for k, v := range b.list { + members = append(members, Member{ + key: k, + value: v.Value, + properties: fromInternalProperties(v.Properties), + hasData: true, + }) + } + return members +} + +// SetMember returns a copy the Baggage with the member included. If the +// baggage contains a Member with the same key the existing Member is +// replaced. +// +// If member is invalid according to the W3C Baggage specification, an error +// is returned with the original Baggage. +func (b Baggage) SetMember(member Member) (Baggage, error) { + if !member.hasData { + return b, errInvalidMember + } + + n := len(b.list) + if _, ok := b.list[member.key]; !ok { + n++ + } + list := make(baggage.List, n) + + for k, v := range b.list { + // Do not copy if we are just going to overwrite. + if k == member.key { + continue + } + list[k] = v + } + + list[member.key] = baggage.Item{ + Value: member.value, + Properties: member.properties.asInternal(), + } + + return Baggage{list: list}, nil +} + +// DeleteMember returns a copy of the Baggage with the list-member identified +// by key removed. +func (b Baggage) DeleteMember(key string) Baggage { + n := len(b.list) + if _, ok := b.list[key]; ok { + n-- + } + list := make(baggage.List, n) + + for k, v := range b.list { + if k == key { + continue + } + list[k] = v + } + + return Baggage{list: list} +} + +// Len returns the number of list-members in the Baggage. +func (b Baggage) Len() int { + return len(b.list) +} + +// String encodes Baggage into a header string compliant with the W3C Baggage +// specification. +func (b Baggage) String() string { + members := make([]string, 0, len(b.list)) + for k, v := range b.list { + members = append(members, Member{ + key: k, + value: v.Value, + properties: fromInternalProperties(v.Properties), + }.String()) + } + return strings.Join(members, listDelimiter) +} + +// parsePropertyInternal attempts to decode a Property from the passed string. +// It follows the spec at https://www.w3.org/TR/baggage/#definition. +func parsePropertyInternal(s string) (p Property, ok bool) { + // For the entire function we will use " key = value " as an example. + // Attempting to parse the key. + // First skip spaces at the beginning "< >key = value " (they could be empty). + index := skipSpace(s, 0) + + // Parse the key: " = value ". + keyStart := index + keyEnd := index + for _, c := range s[keyStart:] { + if !validateKeyChar(c) { + break + } + keyEnd++ + } + + // If we couldn't find any valid key character, + // it means the key is either empty or invalid. + if keyStart == keyEnd { + return + } + + // Skip spaces after the key: " key< >= value ". + index = skipSpace(s, keyEnd) + + if index == len(s) { + // A key can have no value, like: " key ". + ok = true + p.key = s[keyStart:keyEnd] + return + } + + // If we have not reached the end and we can't find the '=' delimiter, + // it means the property is invalid. + if s[index] != keyValueDelimiter[0] { + return + } + + // Attempting to parse the value. + // Match: " key =< >value ". + index = skipSpace(s, index+1) + + // Match the value string: " key = ". + // A valid property can be: " key =". + // Therefore, we don't have to check if the value is empty. + valueStart := index + valueEnd := index + for _, c := range s[valueStart:] { + if !validateValueChar(c) { + break + } + valueEnd++ + } + + // Skip all trailing whitespaces: " key = value< >". + index = skipSpace(s, valueEnd) + + // If after looking for the value and skipping whitespaces + // we have not reached the end, it means the property is + // invalid, something like: " key = value value1". + if index != len(s) { + return + } + + // Decode a precent-encoded value. + value, err := url.PathUnescape(s[valueStart:valueEnd]) + if err != nil { + return + } + + ok = true + p.key = s[keyStart:keyEnd] + p.hasValue = true + + p.value = value + return +} + +func skipSpace(s string, offset int) int { + i := offset + for ; i < len(s); i++ { + c := s[i] + if c != ' ' && c != '\t' { + break + } + } + return i +} + +func validateKey(s string) bool { + if len(s) == 0 { + return false + } + + for _, c := range s { + if !validateKeyChar(c) { + return false + } + } + + return true +} + +func validateKeyChar(c int32) bool { + return (c >= 0x23 && c <= 0x27) || + (c >= 0x30 && c <= 0x39) || + (c >= 0x41 && c <= 0x5a) || + (c >= 0x5e && c <= 0x7a) || + c == 0x21 || + c == 0x2a || + c == 0x2b || + c == 0x2d || + c == 0x2e || + c == 0x7c || + c == 0x7e +} + +func validateValue(s string) bool { + for _, c := range s { + if !validateValueChar(c) { + return false + } + } + + return true +} + +func validateValueChar(c int32) bool { + return c == 0x21 || + (c >= 0x23 && c <= 0x2b) || + (c >= 0x2d && c <= 0x3a) || + (c >= 0x3c && c <= 0x5b) || + (c >= 0x5d && c <= 0x7e) +} + +// valueEscape escapes the string so it can be safely placed inside a baggage value, +// replacing special characters with %XX sequences as needed. +// +// The implementation is based on: +// https://github.com/golang/go/blob/f6509cf5cdbb5787061b784973782933c47f1782/src/net/url/url.go#L285. +func valueEscape(s string) string { + hexCount := 0 + for i := 0; i < len(s); i++ { + c := s[i] + if shouldEscape(c) { + hexCount++ + } + } + + if hexCount == 0 { + return s + } + + var buf [64]byte + var t []byte + + required := len(s) + 2*hexCount + if required <= len(buf) { + t = buf[:required] + } else { + t = make([]byte, required) + } + + j := 0 + for i := 0; i < len(s); i++ { + c := s[i] + if shouldEscape(s[i]) { + const upperhex = "0123456789ABCDEF" + t[j] = '%' + t[j+1] = upperhex[c>>4] + t[j+2] = upperhex[c&15] + j += 3 + } else { + t[j] = c + j++ + } + } + + return string(t) +} + +// shouldEscape returns true if the specified byte should be escaped when +// appearing in a baggage value string. +func shouldEscape(c byte) bool { + if c == '%' { + // The percent character must be encoded so that percent-encoding can work. + return true + } + return !validateValueChar(int32(c)) +} diff --git a/vendor/go.opentelemetry.io/otel/baggage/context.go b/vendor/go.opentelemetry.io/otel/baggage/context.go new file mode 100644 index 000000000..24b34b756 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/baggage/context.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package baggage // import "go.opentelemetry.io/otel/baggage" + +import ( + "context" + + "go.opentelemetry.io/otel/internal/baggage" +) + +// ContextWithBaggage returns a copy of parent with baggage. +func ContextWithBaggage(parent context.Context, b Baggage) context.Context { + // Delegate so any hooks for the OpenTracing bridge are handled. + return baggage.ContextWithList(parent, b.list) +} + +// ContextWithoutBaggage returns a copy of parent with no baggage. +func ContextWithoutBaggage(parent context.Context) context.Context { + // Delegate so any hooks for the OpenTracing bridge are handled. + return baggage.ContextWithList(parent, nil) +} + +// FromContext returns the baggage contained in ctx. +func FromContext(ctx context.Context) Baggage { + // Delegate so any hooks for the OpenTracing bridge are handled. + return Baggage{list: baggage.ListFromContext(ctx)} +} diff --git a/vendor/go.opentelemetry.io/otel/baggage/doc.go b/vendor/go.opentelemetry.io/otel/baggage/doc.go new file mode 100644 index 000000000..4545100df --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/baggage/doc.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package baggage provides functionality for storing and retrieving +baggage items in Go context. For propagating the baggage, see the +go.opentelemetry.io/otel/propagation package. +*/ +package baggage // import "go.opentelemetry.io/otel/baggage" diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go new file mode 100644 index 000000000..587ebae4e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -0,0 +1,116 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package codes // import "go.opentelemetry.io/otel/codes" + +import ( + "encoding/json" + "fmt" + "strconv" +) + +const ( + // Unset is the default status code. + Unset Code = 0 + + // Error indicates the operation contains an error. + // + // NOTE: The error code in OTLP is 2. + // The value of this enum is only relevant to the internals + // of the Go SDK. + Error Code = 1 + + // Ok indicates operation has been validated by an Application developers + // or Operator to have completed successfully, or contain no error. + // + // NOTE: The Ok code in OTLP is 1. + // The value of this enum is only relevant to the internals + // of the Go SDK. + Ok Code = 2 + + maxCode = 3 +) + +// Code is an 32-bit representation of a status state. +type Code uint32 + +var codeToStr = map[Code]string{ + Unset: "Unset", + Error: "Error", + Ok: "Ok", +} + +var strToCode = map[string]Code{ + `"Unset"`: Unset, + `"Error"`: Error, + `"Ok"`: Ok, +} + +// String returns the Code as a string. +func (c Code) String() string { + return codeToStr[c] +} + +// UnmarshalJSON unmarshals b into the Code. +// +// This is based on the functionality in the gRPC codes package: +// https://github.com/grpc/grpc-go/blob/bb64fee312b46ebee26be43364a7a966033521b1/codes/codes.go#L218-L244 +func (c *Code) UnmarshalJSON(b []byte) error { + // From json.Unmarshaler: By convention, to approximate the behavior of + // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as + // a no-op. + if string(b) == "null" { + return nil + } + if c == nil { + return fmt.Errorf("nil receiver passed to UnmarshalJSON") + } + + var x interface{} + if err := json.Unmarshal(b, &x); err != nil { + return err + } + switch x.(type) { + case string: + if jc, ok := strToCode[string(b)]; ok { + *c = jc + return nil + } + return fmt.Errorf("invalid code: %q", string(b)) + case float64: + if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { + if ci >= maxCode { + return fmt.Errorf("invalid code: %q", ci) + } + + *c = Code(ci) + return nil + } + return fmt.Errorf("invalid code: %q", string(b)) + default: + return fmt.Errorf("invalid code: %q", string(b)) + } +} + +// MarshalJSON returns c as the JSON encoding of c. +func (c *Code) MarshalJSON() ([]byte, error) { + if c == nil { + return []byte("null"), nil + } + str, ok := codeToStr[*c] + if !ok { + return nil, fmt.Errorf("invalid code: %d", *c) + } + return []byte(fmt.Sprintf("%q", str)), nil +} diff --git a/vendor/go.opentelemetry.io/otel/codes/doc.go b/vendor/go.opentelemetry.io/otel/codes/doc.go new file mode 100644 index 000000000..4e328fbb4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/codes/doc.go @@ -0,0 +1,21 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package codes defines the canonical error codes used by OpenTelemetry. + +It conforms to [the OpenTelemetry +specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/trace/api.md#set-status). +*/ +package codes // import "go.opentelemetry.io/otel/codes" diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go new file mode 100644 index 000000000..36d7c24e8 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/doc.go @@ -0,0 +1,34 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package otel provides global access to the OpenTelemetry API. The subpackages of +the otel package provide an implementation of the OpenTelemetry API. + +The provided API is used to instrument code and measure data about that code's +performance and operation. The measured data, by default, is not processed or +transmitted anywhere. An implementation of the OpenTelemetry SDK, like the +default SDK implementation (go.opentelemetry.io/otel/sdk), and associated +exporters are used to process and transport this data. + +To read the getting started guide, see https://opentelemetry.io/docs/languages/go/getting-started/. + +To read more about tracing, see go.opentelemetry.io/otel/trace. + +To read more about metrics, see go.opentelemetry.io/otel/metric. + +To read more about propagation, see go.opentelemetry.io/otel/propagation and +go.opentelemetry.io/otel/baggage. +*/ +package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/error_handler.go b/vendor/go.opentelemetry.io/otel/error_handler.go new file mode 100644 index 000000000..72fad8541 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/error_handler.go @@ -0,0 +1,38 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +// ErrorHandler handles irremediable events. +type ErrorHandler interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Handle handles any error deemed irremediable by an OpenTelemetry + // component. + Handle(error) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// ErrorHandlerFunc is a convenience adapter to allow the use of a function +// as an ErrorHandler. +type ErrorHandlerFunc func(error) + +var _ ErrorHandler = ErrorHandlerFunc(nil) + +// Handle handles the irremediable error by calling the ErrorHandlerFunc itself. +func (f ErrorHandlerFunc) Handle(err error) { + f(err) +} diff --git a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh new file mode 100644 index 000000000..9a58fb1d3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh @@ -0,0 +1,41 @@ +#!/usr/bin/env bash + +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -euo pipefail + +top_dir='.' +if [[ $# -gt 0 ]]; then + top_dir="${1}" +fi + +p=$(pwd) +mod_dirs=() + +# Note `mapfile` does not exist in older bash versions: +# https://stackoverflow.com/questions/41475261/need-alternative-to-readarray-mapfile-for-script-on-older-version-of-bash + +while IFS= read -r line; do + mod_dirs+=("$line") +done < <(find "${top_dir}" -type f -name 'go.mod' -exec dirname {} \; | sort) + +for mod_dir in "${mod_dirs[@]}"; do + cd "${mod_dir}" + + while IFS= read -r line; do + echo ".${line#${p}}" + done < <(go list --find -f '{{.Name}}|{{.Dir}}' ./... | grep '^main|' | cut -f 2- -d '|') + cd "${p}" +done diff --git a/vendor/go.opentelemetry.io/otel/handler.go b/vendor/go.opentelemetry.io/otel/handler.go new file mode 100644 index 000000000..4115fe3bb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/handler.go @@ -0,0 +1,48 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +import ( + "go.opentelemetry.io/otel/internal/global" +) + +var ( + // Compile-time check global.ErrDelegator implements ErrorHandler. + _ ErrorHandler = (*global.ErrDelegator)(nil) + // Compile-time check global.ErrLogger implements ErrorHandler. + _ ErrorHandler = (*global.ErrLogger)(nil) +) + +// GetErrorHandler returns the global ErrorHandler instance. +// +// The default ErrorHandler instance returned will log all errors to STDERR +// until an override ErrorHandler is set with SetErrorHandler. All +// ErrorHandler returned prior to this will automatically forward errors to +// the set instance instead of logging. +// +// Subsequent calls to SetErrorHandler after the first will not forward errors +// to the new ErrorHandler for prior returned instances. +func GetErrorHandler() ErrorHandler { return global.GetErrorHandler() } + +// SetErrorHandler sets the global ErrorHandler to h. +// +// The first time this is called all ErrorHandler previously returned from +// GetErrorHandler will send errors to h instead of the default logging +// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not +// delegate errors to h. +func SetErrorHandler(h ErrorHandler) { global.SetErrorHandler(h) } + +// Handle is a convenience function for ErrorHandler().Handle(err). +func Handle(err error) { global.Handle(err) } diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go new file mode 100644 index 000000000..622c3ee3f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go @@ -0,0 +1,111 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package attribute provide several helper functions for some commonly used +logic of processing attributes. +*/ +package attribute // import "go.opentelemetry.io/otel/internal/attribute" + +import ( + "reflect" +) + +// BoolSliceValue converts a bool slice into an array with same elements as slice. +func BoolSliceValue(v []bool) interface{} { + var zero bool + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]bool), v) + return cp.Elem().Interface() +} + +// Int64SliceValue converts an int64 slice into an array with same elements as slice. +func Int64SliceValue(v []int64) interface{} { + var zero int64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]int64), v) + return cp.Elem().Interface() +} + +// Float64SliceValue converts a float64 slice into an array with same elements as slice. +func Float64SliceValue(v []float64) interface{} { + var zero float64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]float64), v) + return cp.Elem().Interface() +} + +// StringSliceValue converts a string slice into an array with same elements as slice. +func StringSliceValue(v []string) interface{} { + var zero string + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]string), v) + return cp.Elem().Interface() +} + +// AsBoolSlice converts a bool array into a slice into with same elements as array. +func AsBoolSlice(v interface{}) []bool { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero bool + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]bool) +} + +// AsInt64Slice converts an int64 array into a slice into with same elements as array. +func AsInt64Slice(v interface{}) []int64 { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero int64 + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]int64) +} + +// AsFloat64Slice converts a float64 array into a slice into with same elements as array. +func AsFloat64Slice(v interface{}) []float64 { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero float64 + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]float64) +} + +// AsStringSlice converts a string array into a slice into with same elements as array. +func AsStringSlice(v interface{}) []string { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero string + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]string) +} diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go new file mode 100644 index 000000000..b96e5408e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go @@ -0,0 +1,43 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package baggage provides base types and functionality to store and retrieve +baggage in Go context. This package exists because the OpenTracing bridge to +OpenTelemetry needs to synchronize state whenever baggage for a context is +modified and that context contains an OpenTracing span. If it were not for +this need this package would not need to exist and the +`go.opentelemetry.io/otel/baggage` package would be the singular place where +W3C baggage is handled. +*/ +package baggage // import "go.opentelemetry.io/otel/internal/baggage" + +// List is the collection of baggage members. The W3C allows for duplicates, +// but OpenTelemetry does not, therefore, this is represented as a map. +type List map[string]Item + +// Item is the value and metadata properties part of a list-member. +type Item struct { + Value string + Properties []Property +} + +// Property is a metadata entry for a list-member. +type Property struct { + Key, Value string + + // HasValue indicates if a zero-value value means the property does not + // have a value or if it was the zero-value. + HasValue bool +} diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go new file mode 100644 index 000000000..4469700d9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go @@ -0,0 +1,92 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package baggage // import "go.opentelemetry.io/otel/internal/baggage" + +import "context" + +type baggageContextKeyType int + +const baggageKey baggageContextKeyType = iota + +// SetHookFunc is a callback called when storing baggage in the context. +type SetHookFunc func(context.Context, List) context.Context + +// GetHookFunc is a callback called when getting baggage from the context. +type GetHookFunc func(context.Context, List) List + +type baggageState struct { + list List + + setHook SetHookFunc + getHook GetHookFunc +} + +// ContextWithSetHook returns a copy of parent with hook configured to be +// invoked every time ContextWithBaggage is called. +// +// Passing nil SetHookFunc creates a context with no set hook to call. +func ContextWithSetHook(parent context.Context, hook SetHookFunc) context.Context { + var s baggageState + if v, ok := parent.Value(baggageKey).(baggageState); ok { + s = v + } + + s.setHook = hook + return context.WithValue(parent, baggageKey, s) +} + +// ContextWithGetHook returns a copy of parent with hook configured to be +// invoked every time FromContext is called. +// +// Passing nil GetHookFunc creates a context with no get hook to call. +func ContextWithGetHook(parent context.Context, hook GetHookFunc) context.Context { + var s baggageState + if v, ok := parent.Value(baggageKey).(baggageState); ok { + s = v + } + + s.getHook = hook + return context.WithValue(parent, baggageKey, s) +} + +// ContextWithList returns a copy of parent with baggage. Passing nil list +// returns a context without any baggage. +func ContextWithList(parent context.Context, list List) context.Context { + var s baggageState + if v, ok := parent.Value(baggageKey).(baggageState); ok { + s = v + } + + s.list = list + ctx := context.WithValue(parent, baggageKey, s) + if s.setHook != nil { + ctx = s.setHook(ctx, list) + } + + return ctx +} + +// ListFromContext returns the baggage contained in ctx. +func ListFromContext(ctx context.Context) List { + switch v := ctx.Value(baggageKey).(type) { + case baggageState: + if v.getHook != nil { + return v.getHook(ctx, v.list) + } + return v.list + default: + return nil + } +} diff --git a/vendor/go.opentelemetry.io/otel/internal/gen.go b/vendor/go.opentelemetry.io/otel/internal/gen.go new file mode 100644 index 000000000..f532f07e9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/gen.go @@ -0,0 +1,29 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opentelemetry.io/otel/internal" + +//go:generate gotmpl --body=./shared/matchers/expectation.go.tmpl "--data={}" --out=matchers/expectation.go +//go:generate gotmpl --body=./shared/matchers/expecter.go.tmpl "--data={}" --out=matchers/expecter.go +//go:generate gotmpl --body=./shared/matchers/temporal_matcher.go.tmpl "--data={}" --out=matchers/temporal_matcher.go + +//go:generate gotmpl --body=./shared/internaltest/alignment.go.tmpl "--data={}" --out=internaltest/alignment.go +//go:generate gotmpl --body=./shared/internaltest/env.go.tmpl "--data={}" --out=internaltest/env.go +//go:generate gotmpl --body=./shared/internaltest/env_test.go.tmpl "--data={}" --out=internaltest/env_test.go +//go:generate gotmpl --body=./shared/internaltest/errors.go.tmpl "--data={}" --out=internaltest/errors.go +//go:generate gotmpl --body=./shared/internaltest/harness.go.tmpl "--data={\"matchersImportPath\": \"go.opentelemetry.io/otel/internal/matchers\"}" --out=internaltest/harness.go +//go:generate gotmpl --body=./shared/internaltest/text_map_carrier.go.tmpl "--data={}" --out=internaltest/text_map_carrier.go +//go:generate gotmpl --body=./shared/internaltest/text_map_carrier_test.go.tmpl "--data={}" --out=internaltest/text_map_carrier_test.go +//go:generate gotmpl --body=./shared/internaltest/text_map_propagator.go.tmpl "--data={}" --out=internaltest/text_map_propagator.go +//go:generate gotmpl --body=./shared/internaltest/text_map_propagator_test.go.tmpl "--data={}" --out=internaltest/text_map_propagator_test.go diff --git a/vendor/go.opentelemetry.io/otel/internal/global/handler.go b/vendor/go.opentelemetry.io/otel/internal/global/handler.go new file mode 100644 index 000000000..5e9b83047 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/handler.go @@ -0,0 +1,102 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "log" + "os" + "sync/atomic" +) + +var ( + // GlobalErrorHandler provides an ErrorHandler that can be used + // throughout an OpenTelemetry instrumented project. When a user + // specified ErrorHandler is registered (`SetErrorHandler`) all calls to + // `Handle` and will be delegated to the registered ErrorHandler. + GlobalErrorHandler = defaultErrorHandler() + + // Compile-time check that delegator implements ErrorHandler. + _ ErrorHandler = (*ErrDelegator)(nil) + // Compile-time check that errLogger implements ErrorHandler. + _ ErrorHandler = (*ErrLogger)(nil) +) + +// ErrorHandler handles irremediable events. +type ErrorHandler interface { + // Handle handles any error deemed irremediable by an OpenTelemetry + // component. + Handle(error) +} + +type ErrDelegator struct { + delegate atomic.Pointer[ErrorHandler] +} + +func (d *ErrDelegator) Handle(err error) { + d.getDelegate().Handle(err) +} + +func (d *ErrDelegator) getDelegate() ErrorHandler { + return *d.delegate.Load() +} + +// setDelegate sets the ErrorHandler delegate. +func (d *ErrDelegator) setDelegate(eh ErrorHandler) { + d.delegate.Store(&eh) +} + +func defaultErrorHandler() *ErrDelegator { + d := &ErrDelegator{} + d.setDelegate(&ErrLogger{l: log.New(os.Stderr, "", log.LstdFlags)}) + return d +} + +// ErrLogger logs errors if no delegate is set, otherwise they are delegated. +type ErrLogger struct { + l *log.Logger +} + +// Handle logs err if no delegate is set, otherwise it is delegated. +func (h *ErrLogger) Handle(err error) { + h.l.Print(err) +} + +// GetErrorHandler returns the global ErrorHandler instance. +// +// The default ErrorHandler instance returned will log all errors to STDERR +// until an override ErrorHandler is set with SetErrorHandler. All +// ErrorHandler returned prior to this will automatically forward errors to +// the set instance instead of logging. +// +// Subsequent calls to SetErrorHandler after the first will not forward errors +// to the new ErrorHandler for prior returned instances. +func GetErrorHandler() ErrorHandler { + return GlobalErrorHandler +} + +// SetErrorHandler sets the global ErrorHandler to h. +// +// The first time this is called all ErrorHandler previously returned from +// GetErrorHandler will send errors to h instead of the default logging +// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not +// delegate errors to h. +func SetErrorHandler(h ErrorHandler) { + GlobalErrorHandler.setDelegate(h) +} + +// Handle is a convenience function for ErrorHandler().Handle(err). +func Handle(err error) { + GetErrorHandler().Handle(err) +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go new file mode 100644 index 000000000..ebb13c206 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go @@ -0,0 +1,371 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "context" + "sync/atomic" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" +) + +// unwrapper unwraps to return the underlying instrument implementation. +type unwrapper interface { + Unwrap() metric.Observable +} + +type afCounter struct { + embedded.Float64ObservableCounter + metric.Float64Observable + + name string + opts []metric.Float64ObservableCounterOption + + delegate atomic.Value // metric.Float64ObservableCounter +} + +var ( + _ unwrapper = (*afCounter)(nil) + _ metric.Float64ObservableCounter = (*afCounter)(nil) +) + +func (i *afCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64ObservableCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *afCounter) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Float64ObservableCounter) + } + return nil +} + +type afUpDownCounter struct { + embedded.Float64ObservableUpDownCounter + metric.Float64Observable + + name string + opts []metric.Float64ObservableUpDownCounterOption + + delegate atomic.Value // metric.Float64ObservableUpDownCounter +} + +var ( + _ unwrapper = (*afUpDownCounter)(nil) + _ metric.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil) +) + +func (i *afUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64ObservableUpDownCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *afUpDownCounter) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Float64ObservableUpDownCounter) + } + return nil +} + +type afGauge struct { + embedded.Float64ObservableGauge + metric.Float64Observable + + name string + opts []metric.Float64ObservableGaugeOption + + delegate atomic.Value // metric.Float64ObservableGauge +} + +var ( + _ unwrapper = (*afGauge)(nil) + _ metric.Float64ObservableGauge = (*afGauge)(nil) +) + +func (i *afGauge) setDelegate(m metric.Meter) { + ctr, err := m.Float64ObservableGauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *afGauge) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Float64ObservableGauge) + } + return nil +} + +type aiCounter struct { + embedded.Int64ObservableCounter + metric.Int64Observable + + name string + opts []metric.Int64ObservableCounterOption + + delegate atomic.Value // metric.Int64ObservableCounter +} + +var ( + _ unwrapper = (*aiCounter)(nil) + _ metric.Int64ObservableCounter = (*aiCounter)(nil) +) + +func (i *aiCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64ObservableCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *aiCounter) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Int64ObservableCounter) + } + return nil +} + +type aiUpDownCounter struct { + embedded.Int64ObservableUpDownCounter + metric.Int64Observable + + name string + opts []metric.Int64ObservableUpDownCounterOption + + delegate atomic.Value // metric.Int64ObservableUpDownCounter +} + +var ( + _ unwrapper = (*aiUpDownCounter)(nil) + _ metric.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil) +) + +func (i *aiUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64ObservableUpDownCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *aiUpDownCounter) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Int64ObservableUpDownCounter) + } + return nil +} + +type aiGauge struct { + embedded.Int64ObservableGauge + metric.Int64Observable + + name string + opts []metric.Int64ObservableGaugeOption + + delegate atomic.Value // metric.Int64ObservableGauge +} + +var ( + _ unwrapper = (*aiGauge)(nil) + _ metric.Int64ObservableGauge = (*aiGauge)(nil) +) + +func (i *aiGauge) setDelegate(m metric.Meter) { + ctr, err := m.Int64ObservableGauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *aiGauge) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Int64ObservableGauge) + } + return nil +} + +// Sync Instruments. +type sfCounter struct { + embedded.Float64Counter + + name string + opts []metric.Float64CounterOption + + delegate atomic.Value // metric.Float64Counter +} + +var _ metric.Float64Counter = (*sfCounter)(nil) + +func (i *sfCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64Counter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Float64Counter).Add(ctx, incr, opts...) + } +} + +type sfUpDownCounter struct { + embedded.Float64UpDownCounter + + name string + opts []metric.Float64UpDownCounterOption + + delegate atomic.Value // metric.Float64UpDownCounter +} + +var _ metric.Float64UpDownCounter = (*sfUpDownCounter)(nil) + +func (i *sfUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64UpDownCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfUpDownCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Float64UpDownCounter).Add(ctx, incr, opts...) + } +} + +type sfHistogram struct { + embedded.Float64Histogram + + name string + opts []metric.Float64HistogramOption + + delegate atomic.Value // metric.Float64Histogram +} + +var _ metric.Float64Histogram = (*sfHistogram)(nil) + +func (i *sfHistogram) setDelegate(m metric.Meter) { + ctr, err := m.Float64Histogram(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfHistogram) Record(ctx context.Context, x float64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Float64Histogram).Record(ctx, x, opts...) + } +} + +type siCounter struct { + embedded.Int64Counter + + name string + opts []metric.Int64CounterOption + + delegate atomic.Value // metric.Int64Counter +} + +var _ metric.Int64Counter = (*siCounter)(nil) + +func (i *siCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64Counter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Int64Counter).Add(ctx, x, opts...) + } +} + +type siUpDownCounter struct { + embedded.Int64UpDownCounter + + name string + opts []metric.Int64UpDownCounterOption + + delegate atomic.Value // metric.Int64UpDownCounter +} + +var _ metric.Int64UpDownCounter = (*siUpDownCounter)(nil) + +func (i *siUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64UpDownCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siUpDownCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Int64UpDownCounter).Add(ctx, x, opts...) + } +} + +type siHistogram struct { + embedded.Int64Histogram + + name string + opts []metric.Int64HistogramOption + + delegate atomic.Value // metric.Int64Histogram +} + +var _ metric.Int64Histogram = (*siHistogram)(nil) + +func (i *siHistogram) setDelegate(m metric.Meter) { + ctr, err := m.Int64Histogram(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siHistogram) Record(ctx context.Context, x int64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Int64Histogram).Record(ctx, x, opts...) + } +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go new file mode 100644 index 000000000..c6f305a2b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go @@ -0,0 +1,69 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "log" + "os" + "sync/atomic" + + "github.com/go-logr/logr" + "github.com/go-logr/stdr" +) + +// globalLogger is the logging interface used within the otel api and sdk provide details of the internals. +// +// The default logger uses stdr which is backed by the standard `log.Logger` +// interface. This logger will only show messages at the Error Level. +var globalLogger atomic.Pointer[logr.Logger] + +func init() { + SetLogger(stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) +} + +// SetLogger overrides the globalLogger with l. +// +// To see Warn messages use a logger with `l.V(1).Enabled() == true` +// To see Info messages use a logger with `l.V(4).Enabled() == true` +// To see Debug messages use a logger with `l.V(8).Enabled() == true`. +func SetLogger(l logr.Logger) { + globalLogger.Store(&l) +} + +func getLogger() logr.Logger { + return *globalLogger.Load() +} + +// Info prints messages about the general state of the API or SDK. +// This should usually be less than 5 messages a minute. +func Info(msg string, keysAndValues ...interface{}) { + getLogger().V(4).Info(msg, keysAndValues...) +} + +// Error prints messages about exceptional states of the API or SDK. +func Error(err error, msg string, keysAndValues ...interface{}) { + getLogger().Error(err, msg, keysAndValues...) +} + +// Debug prints messages about all internal changes in the API or SDK. +func Debug(msg string, keysAndValues ...interface{}) { + getLogger().V(8).Info(msg, keysAndValues...) +} + +// Warn prints messages about warnings in the API or SDK. +// Not an error but is likely more important than an informational event. +func Warn(msg string, keysAndValues ...interface{}) { + getLogger().V(1).Info(msg, keysAndValues...) +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go new file mode 100644 index 000000000..0097db478 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -0,0 +1,354 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "container/list" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" +) + +// meterProvider is a placeholder for a configured SDK MeterProvider. +// +// All MeterProvider functionality is forwarded to a delegate once +// configured. +type meterProvider struct { + embedded.MeterProvider + + mtx sync.Mutex + meters map[il]*meter + + delegate metric.MeterProvider +} + +// setDelegate configures p to delegate all MeterProvider functionality to +// provider. +// +// All Meters provided prior to this function call are switched out to be +// Meters provided by provider. All instruments and callbacks are recreated and +// delegated. +// +// It is guaranteed by the caller that this happens only once. +func (p *meterProvider) setDelegate(provider metric.MeterProvider) { + p.mtx.Lock() + defer p.mtx.Unlock() + + p.delegate = provider + + if len(p.meters) == 0 { + return + } + + for _, meter := range p.meters { + meter.setDelegate(provider) + } + + p.meters = nil +} + +// Meter implements MeterProvider. +func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Meter { + p.mtx.Lock() + defer p.mtx.Unlock() + + if p.delegate != nil { + return p.delegate.Meter(name, opts...) + } + + // At this moment it is guaranteed that no sdk is installed, save the meter in the meters map. + + c := metric.NewMeterConfig(opts...) + key := il{ + name: name, + version: c.InstrumentationVersion(), + } + + if p.meters == nil { + p.meters = make(map[il]*meter) + } + + if val, ok := p.meters[key]; ok { + return val + } + + t := &meter{name: name, opts: opts} + p.meters[key] = t + return t +} + +// meter is a placeholder for a metric.Meter. +// +// All Meter functionality is forwarded to a delegate once configured. +// Otherwise, all functionality is forwarded to a NoopMeter. +type meter struct { + embedded.Meter + + name string + opts []metric.MeterOption + + mtx sync.Mutex + instruments []delegatedInstrument + + registry list.List + + delegate atomic.Value // metric.Meter +} + +type delegatedInstrument interface { + setDelegate(metric.Meter) +} + +// setDelegate configures m to delegate all Meter functionality to Meters +// created by provider. +// +// All subsequent calls to the Meter methods will be passed to the delegate. +// +// It is guaranteed by the caller that this happens only once. +func (m *meter) setDelegate(provider metric.MeterProvider) { + meter := provider.Meter(m.name, m.opts...) + m.delegate.Store(meter) + + m.mtx.Lock() + defer m.mtx.Unlock() + + for _, inst := range m.instruments { + inst.setDelegate(meter) + } + + for e := m.registry.Front(); e != nil; e = e.Next() { + r := e.Value.(*registration) + r.setDelegate(meter) + m.registry.Remove(e) + } + + m.instruments = nil + m.registry.Init() +} + +func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64Counter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &siCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64UpDownCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &siUpDownCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64Histogram(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &siHistogram{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64ObservableCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &aiCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64ObservableUpDownCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &aiUpDownCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64ObservableGauge(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &aiGauge{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64Counter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &sfCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64UpDownCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &sfUpDownCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64Histogram(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &sfHistogram{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64ObservableCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &afCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64ObservableUpDownCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &afUpDownCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64ObservableGauge(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &afGauge{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +// RegisterCallback captures the function that will be called during Collect. +func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + insts = unwrapInstruments(insts) + return del.RegisterCallback(f, insts...) + } + + m.mtx.Lock() + defer m.mtx.Unlock() + + reg := ®istration{instruments: insts, function: f} + e := m.registry.PushBack(reg) + reg.unreg = func() error { + m.mtx.Lock() + _ = m.registry.Remove(e) + m.mtx.Unlock() + return nil + } + return reg, nil +} + +type wrapped interface { + unwrap() metric.Observable +} + +func unwrapInstruments(instruments []metric.Observable) []metric.Observable { + out := make([]metric.Observable, 0, len(instruments)) + + for _, inst := range instruments { + if in, ok := inst.(wrapped); ok { + out = append(out, in.unwrap()) + } else { + out = append(out, inst) + } + } + + return out +} + +type registration struct { + embedded.Registration + + instruments []metric.Observable + function metric.Callback + + unreg func() error + unregMu sync.Mutex +} + +func (c *registration) setDelegate(m metric.Meter) { + insts := unwrapInstruments(c.instruments) + + c.unregMu.Lock() + defer c.unregMu.Unlock() + + if c.unreg == nil { + // Unregister already called. + return + } + + reg, err := m.RegisterCallback(c.function, insts...) + if err != nil { + GetErrorHandler().Handle(err) + } + + c.unreg = reg.Unregister +} + +func (c *registration) Unregister() error { + c.unregMu.Lock() + defer c.unregMu.Unlock() + if c.unreg == nil { + // Unregister already called. + return nil + } + + var err error + err, c.unreg = c.unreg(), nil + return err +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/propagator.go b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go new file mode 100644 index 000000000..06bac35c2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go @@ -0,0 +1,82 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/propagation" +) + +// textMapPropagator is a default TextMapPropagator that delegates calls to a +// registered delegate if one is set, otherwise it defaults to delegating the +// calls to a the default no-op propagation.TextMapPropagator. +type textMapPropagator struct { + mtx sync.Mutex + once sync.Once + delegate propagation.TextMapPropagator + noop propagation.TextMapPropagator +} + +// Compile-time guarantee that textMapPropagator implements the +// propagation.TextMapPropagator interface. +var _ propagation.TextMapPropagator = (*textMapPropagator)(nil) + +func newTextMapPropagator() *textMapPropagator { + return &textMapPropagator{ + noop: propagation.NewCompositeTextMapPropagator(), + } +} + +// SetDelegate sets a delegate propagation.TextMapPropagator that all calls are +// forwarded to. Delegation can only be performed once, all subsequent calls +// perform no delegation. +func (p *textMapPropagator) SetDelegate(delegate propagation.TextMapPropagator) { + if delegate == nil { + return + } + + p.mtx.Lock() + p.once.Do(func() { p.delegate = delegate }) + p.mtx.Unlock() +} + +// effectiveDelegate returns the current delegate of p if one is set, +// otherwise the default noop TextMapPropagator is returned. This method +// can be called concurrently. +func (p *textMapPropagator) effectiveDelegate() propagation.TextMapPropagator { + p.mtx.Lock() + defer p.mtx.Unlock() + if p.delegate != nil { + return p.delegate + } + return p.noop +} + +// Inject set cross-cutting concerns from the Context into the carrier. +func (p *textMapPropagator) Inject(ctx context.Context, carrier propagation.TextMapCarrier) { + p.effectiveDelegate().Inject(ctx, carrier) +} + +// Extract reads cross-cutting concerns from the carrier into a Context. +func (p *textMapPropagator) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context { + return p.effectiveDelegate().Extract(ctx, carrier) +} + +// Fields returns the keys whose values are set with Inject. +func (p *textMapPropagator) Fields() []string { + return p.effectiveDelegate().Fields() +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/state.go b/vendor/go.opentelemetry.io/otel/internal/global/state.go new file mode 100644 index 000000000..7985005bc --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/state.go @@ -0,0 +1,156 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "errors" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" +) + +type ( + tracerProviderHolder struct { + tp trace.TracerProvider + } + + propagatorsHolder struct { + tm propagation.TextMapPropagator + } + + meterProviderHolder struct { + mp metric.MeterProvider + } +) + +var ( + globalTracer = defaultTracerValue() + globalPropagators = defaultPropagatorsValue() + globalMeterProvider = defaultMeterProvider() + + delegateTraceOnce sync.Once + delegateTextMapPropagatorOnce sync.Once + delegateMeterOnce sync.Once +) + +// TracerProvider is the internal implementation for global.TracerProvider. +func TracerProvider() trace.TracerProvider { + return globalTracer.Load().(tracerProviderHolder).tp +} + +// SetTracerProvider is the internal implementation for global.SetTracerProvider. +func SetTracerProvider(tp trace.TracerProvider) { + current := TracerProvider() + + if _, cOk := current.(*tracerProvider); cOk { + if _, tpOk := tp.(*tracerProvider); tpOk && current == tp { + // Do not assign the default delegating TracerProvider to delegate + // to itself. + Error( + errors.New("no delegate configured in tracer provider"), + "Setting tracer provider to it's current value. No delegate will be configured", + ) + return + } + } + + delegateTraceOnce.Do(func() { + if def, ok := current.(*tracerProvider); ok { + def.setDelegate(tp) + } + }) + globalTracer.Store(tracerProviderHolder{tp: tp}) +} + +// TextMapPropagator is the internal implementation for global.TextMapPropagator. +func TextMapPropagator() propagation.TextMapPropagator { + return globalPropagators.Load().(propagatorsHolder).tm +} + +// SetTextMapPropagator is the internal implementation for global.SetTextMapPropagator. +func SetTextMapPropagator(p propagation.TextMapPropagator) { + current := TextMapPropagator() + + if _, cOk := current.(*textMapPropagator); cOk { + if _, pOk := p.(*textMapPropagator); pOk && current == p { + // Do not assign the default delegating TextMapPropagator to + // delegate to itself. + Error( + errors.New("no delegate configured in text map propagator"), + "Setting text map propagator to it's current value. No delegate will be configured", + ) + return + } + } + + // For the textMapPropagator already returned by TextMapPropagator + // delegate to p. + delegateTextMapPropagatorOnce.Do(func() { + if def, ok := current.(*textMapPropagator); ok { + def.SetDelegate(p) + } + }) + // Return p when subsequent calls to TextMapPropagator are made. + globalPropagators.Store(propagatorsHolder{tm: p}) +} + +// MeterProvider is the internal implementation for global.MeterProvider. +func MeterProvider() metric.MeterProvider { + return globalMeterProvider.Load().(meterProviderHolder).mp +} + +// SetMeterProvider is the internal implementation for global.SetMeterProvider. +func SetMeterProvider(mp metric.MeterProvider) { + current := MeterProvider() + if _, cOk := current.(*meterProvider); cOk { + if _, mpOk := mp.(*meterProvider); mpOk && current == mp { + // Do not assign the default delegating MeterProvider to delegate + // to itself. + Error( + errors.New("no delegate configured in meter provider"), + "Setting meter provider to it's current value. No delegate will be configured", + ) + return + } + } + + delegateMeterOnce.Do(func() { + if def, ok := current.(*meterProvider); ok { + def.setDelegate(mp) + } + }) + globalMeterProvider.Store(meterProviderHolder{mp: mp}) +} + +func defaultTracerValue() *atomic.Value { + v := &atomic.Value{} + v.Store(tracerProviderHolder{tp: &tracerProvider{}}) + return v +} + +func defaultPropagatorsValue() *atomic.Value { + v := &atomic.Value{} + v.Store(propagatorsHolder{tm: newTextMapPropagator()}) + return v +} + +func defaultMeterProvider() *atomic.Value { + v := &atomic.Value{} + v.Store(meterProviderHolder{mp: &meterProvider{}}) + return v +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go new file mode 100644 index 000000000..3f61ec12a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -0,0 +1,199 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +/* +This file contains the forwarding implementation of the TracerProvider used as +the default global instance. Prior to initialization of an SDK, Tracers +returned by the global TracerProvider will provide no-op functionality. This +means that all Span created prior to initialization are no-op Spans. + +Once an SDK has been initialized, all provided no-op Tracers are swapped for +Tracers provided by the SDK defined TracerProvider. However, any Span started +prior to this initialization does not change its behavior. Meaning, the Span +remains a no-op Span. + +The implementation to track and swap Tracers locks all new Tracer creation +until the swap is complete. This assumes that this operation is not +performance-critical. If that assumption is incorrect, be sure to configure an +SDK prior to any Tracer creation. +*/ + +import ( + "context" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/embedded" +) + +// tracerProvider is a placeholder for a configured SDK TracerProvider. +// +// All TracerProvider functionality is forwarded to a delegate once +// configured. +type tracerProvider struct { + embedded.TracerProvider + + mtx sync.Mutex + tracers map[il]*tracer + delegate trace.TracerProvider +} + +// Compile-time guarantee that tracerProvider implements the TracerProvider +// interface. +var _ trace.TracerProvider = &tracerProvider{} + +// setDelegate configures p to delegate all TracerProvider functionality to +// provider. +// +// All Tracers provided prior to this function call are switched out to be +// Tracers provided by provider. +// +// It is guaranteed by the caller that this happens only once. +func (p *tracerProvider) setDelegate(provider trace.TracerProvider) { + p.mtx.Lock() + defer p.mtx.Unlock() + + p.delegate = provider + + if len(p.tracers) == 0 { + return + } + + for _, t := range p.tracers { + t.setDelegate(provider) + } + + p.tracers = nil +} + +// Tracer implements TracerProvider. +func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + p.mtx.Lock() + defer p.mtx.Unlock() + + if p.delegate != nil { + return p.delegate.Tracer(name, opts...) + } + + // At this moment it is guaranteed that no sdk is installed, save the tracer in the tracers map. + + c := trace.NewTracerConfig(opts...) + key := il{ + name: name, + version: c.InstrumentationVersion(), + } + + if p.tracers == nil { + p.tracers = make(map[il]*tracer) + } + + if val, ok := p.tracers[key]; ok { + return val + } + + t := &tracer{name: name, opts: opts, provider: p} + p.tracers[key] = t + return t +} + +type il struct { + name string + version string +} + +// tracer is a placeholder for a trace.Tracer. +// +// All Tracer functionality is forwarded to a delegate once configured. +// Otherwise, all functionality is forwarded to a NoopTracer. +type tracer struct { + embedded.Tracer + + name string + opts []trace.TracerOption + provider *tracerProvider + + delegate atomic.Value +} + +// Compile-time guarantee that tracer implements the trace.Tracer interface. +var _ trace.Tracer = &tracer{} + +// setDelegate configures t to delegate all Tracer functionality to Tracers +// created by provider. +// +// All subsequent calls to the Tracer methods will be passed to the delegate. +// +// It is guaranteed by the caller that this happens only once. +func (t *tracer) setDelegate(provider trace.TracerProvider) { + t.delegate.Store(provider.Tracer(t.name, t.opts...)) +} + +// Start implements trace.Tracer by forwarding the call to t.delegate if +// set, otherwise it forwards the call to a NoopTracer. +func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + delegate := t.delegate.Load() + if delegate != nil { + return delegate.(trace.Tracer).Start(ctx, name, opts...) + } + + s := nonRecordingSpan{sc: trace.SpanContextFromContext(ctx), tracer: t} + ctx = trace.ContextWithSpan(ctx, s) + return ctx, s +} + +// nonRecordingSpan is a minimal implementation of a Span that wraps a +// SpanContext. It performs no operations other than to return the wrapped +// SpanContext. +type nonRecordingSpan struct { + embedded.Span + + sc trace.SpanContext + tracer *tracer +} + +var _ trace.Span = nonRecordingSpan{} + +// SpanContext returns the wrapped SpanContext. +func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc } + +// IsRecording always returns false. +func (nonRecordingSpan) IsRecording() bool { return false } + +// SetStatus does nothing. +func (nonRecordingSpan) SetStatus(codes.Code, string) {} + +// SetError does nothing. +func (nonRecordingSpan) SetError(bool) {} + +// SetAttributes does nothing. +func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {} + +// End does nothing. +func (nonRecordingSpan) End(...trace.SpanEndOption) {} + +// RecordError does nothing. +func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {} + +// AddEvent does nothing. +func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {} + +// SetName does nothing. +func (nonRecordingSpan) SetName(string) {} + +func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider } diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go new file mode 100644 index 000000000..e07e79400 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go @@ -0,0 +1,55 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opentelemetry.io/otel/internal" + +import ( + "math" + "unsafe" +) + +func BoolToRaw(b bool) uint64 { // nolint:revive // b is not a control flag. + if b { + return 1 + } + return 0 +} + +func RawToBool(r uint64) bool { + return r != 0 +} + +func Int64ToRaw(i int64) uint64 { + return uint64(i) +} + +func RawToInt64(r uint64) int64 { + return int64(r) +} + +func Float64ToRaw(f float64) uint64 { + return math.Float64bits(f) +} + +func RawToFloat64(r uint64) float64 { + return math.Float64frombits(r) +} + +func RawPtrToFloat64Ptr(r *uint64) *float64 { + return (*float64)(unsafe.Pointer(r)) +} + +func RawPtrToInt64Ptr(r *uint64) *int64 { + return (*int64)(unsafe.Pointer(r)) +} diff --git a/vendor/go.opentelemetry.io/otel/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal_logging.go new file mode 100644 index 000000000..c4f8acd5d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal_logging.go @@ -0,0 +1,26 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +import ( + "github.com/go-logr/logr" + + "go.opentelemetry.io/otel/internal/global" +) + +// SetLogger configures the logger used internally to opentelemetry. +func SetLogger(logger logr.Logger) { + global.SetLogger(logger) +} diff --git a/vendor/go.opentelemetry.io/otel/metric.go b/vendor/go.opentelemetry.io/otel/metric.go new file mode 100644 index 000000000..f95517195 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric.go @@ -0,0 +1,53 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +import ( + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/metric" +) + +// Meter returns a Meter from the global MeterProvider. The name must be the +// name of the library providing instrumentation. This name may be the same as +// the instrumented code only if that code provides built-in instrumentation. +// If the name is empty, then a implementation defined default name will be +// used instead. +// +// If this is called before a global MeterProvider is registered the returned +// Meter will be a No-op implementation of a Meter. When a global MeterProvider +// is registered for the first time, the returned Meter, and all the +// instruments it has created or will create, are recreated automatically from +// the new MeterProvider. +// +// This is short for GetMeterProvider().Meter(name). +func Meter(name string, opts ...metric.MeterOption) metric.Meter { + return GetMeterProvider().Meter(name, opts...) +} + +// GetMeterProvider returns the registered global meter provider. +// +// If no global GetMeterProvider has been registered, a No-op GetMeterProvider +// implementation is returned. When a global GetMeterProvider is registered for +// the first time, the returned GetMeterProvider, and all the Meters it has +// created or will create, are recreated automatically from the new +// GetMeterProvider. +func GetMeterProvider() metric.MeterProvider { + return global.MeterProvider() +} + +// SetMeterProvider registers mp as the global MeterProvider. +func SetMeterProvider(mp metric.MeterProvider) { + global.SetMeterProvider(mp) +} diff --git a/vendor/go.opentelemetry.io/otel/metric/LICENSE b/vendor/go.opentelemetry.io/otel/metric/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go new file mode 100644 index 000000000..072baa8e8 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go @@ -0,0 +1,271 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// Float64Observable describes a set of instruments used asynchronously to +// record float64 measurements once per collection cycle. Observations of +// these instruments are only made within a callback. +// +// Warning: Methods may be added to this interface in minor releases. +type Float64Observable interface { + Observable + + float64Observable() +} + +// Float64ObservableCounter is an instrument used to asynchronously record +// increasing float64 measurements once per collection cycle. Observations are +// only made within a callback for this instrument. The value observed is +// assumed the to be the cumulative sum of the count. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for +// unimplemented methods. +type Float64ObservableCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64ObservableCounter + + Float64Observable +} + +// Float64ObservableCounterConfig contains options for asynchronous counter +// instruments that record int64 values. +type Float64ObservableCounterConfig struct { + description string + unit string + callbacks []Float64Callback +} + +// NewFloat64ObservableCounterConfig returns a new +// [Float64ObservableCounterConfig] with all opts applied. +func NewFloat64ObservableCounterConfig(opts ...Float64ObservableCounterOption) Float64ObservableCounterConfig { + var config Float64ObservableCounterConfig + for _, o := range opts { + config = o.applyFloat64ObservableCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Float64ObservableCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64ObservableCounterConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Float64ObservableCounterConfig) Callbacks() []Float64Callback { + return c.callbacks +} + +// Float64ObservableCounterOption applies options to a +// [Float64ObservableCounterConfig]. See [Float64ObservableOption] and +// [InstrumentOption] for other options that can be used as a +// Float64ObservableCounterOption. +type Float64ObservableCounterOption interface { + applyFloat64ObservableCounter(Float64ObservableCounterConfig) Float64ObservableCounterConfig +} + +// Float64ObservableUpDownCounter is an instrument used to asynchronously +// record float64 measurements once per collection cycle. Observations are only +// made within a callback for this instrument. The value observed is assumed +// the to be the cumulative sum of the count. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64ObservableUpDownCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64ObservableUpDownCounter + + Float64Observable +} + +// Float64ObservableUpDownCounterConfig contains options for asynchronous +// counter instruments that record int64 values. +type Float64ObservableUpDownCounterConfig struct { + description string + unit string + callbacks []Float64Callback +} + +// NewFloat64ObservableUpDownCounterConfig returns a new +// [Float64ObservableUpDownCounterConfig] with all opts applied. +func NewFloat64ObservableUpDownCounterConfig(opts ...Float64ObservableUpDownCounterOption) Float64ObservableUpDownCounterConfig { + var config Float64ObservableUpDownCounterConfig + for _, o := range opts { + config = o.applyFloat64ObservableUpDownCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Float64ObservableUpDownCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64ObservableUpDownCounterConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Float64ObservableUpDownCounterConfig) Callbacks() []Float64Callback { + return c.callbacks +} + +// Float64ObservableUpDownCounterOption applies options to a +// [Float64ObservableUpDownCounterConfig]. See [Float64ObservableOption] and +// [InstrumentOption] for other options that can be used as a +// Float64ObservableUpDownCounterOption. +type Float64ObservableUpDownCounterOption interface { + applyFloat64ObservableUpDownCounter(Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig +} + +// Float64ObservableGauge is an instrument used to asynchronously record +// instantaneous float64 measurements once per collection cycle. Observations +// are only made within a callback for this instrument. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64ObservableGauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64ObservableGauge + + Float64Observable +} + +// Float64ObservableGaugeConfig contains options for asynchronous counter +// instruments that record int64 values. +type Float64ObservableGaugeConfig struct { + description string + unit string + callbacks []Float64Callback +} + +// NewFloat64ObservableGaugeConfig returns a new [Float64ObservableGaugeConfig] +// with all opts applied. +func NewFloat64ObservableGaugeConfig(opts ...Float64ObservableGaugeOption) Float64ObservableGaugeConfig { + var config Float64ObservableGaugeConfig + for _, o := range opts { + config = o.applyFloat64ObservableGauge(config) + } + return config +} + +// Description returns the configured description. +func (c Float64ObservableGaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64ObservableGaugeConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Float64ObservableGaugeConfig) Callbacks() []Float64Callback { + return c.callbacks +} + +// Float64ObservableGaugeOption applies options to a +// [Float64ObservableGaugeConfig]. See [Float64ObservableOption] and +// [InstrumentOption] for other options that can be used as a +// Float64ObservableGaugeOption. +type Float64ObservableGaugeOption interface { + applyFloat64ObservableGauge(Float64ObservableGaugeConfig) Float64ObservableGaugeConfig +} + +// Float64Observer is a recorder of float64 measurements. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Observer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Observer + + // Observe records the float64 value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Observe(value float64, options ...ObserveOption) +} + +// Float64Callback is a function registered with a Meter that makes +// observations for a Float64Observerable instrument it is registered with. +// Calls to the Float64Observer record measurement values for the +// Float64Observable. +// +// The function needs to complete in a finite amount of time and the deadline +// of the passed context is expected to be honored. +// +// The function needs to make unique observations across all registered +// Float64Callbacks. Meaning, it should not report measurements with the same +// attributes as another Float64Callbacks also registered for the same +// instrument. +// +// The function needs to be concurrent safe. +type Float64Callback func(context.Context, Float64Observer) error + +// Float64ObservableOption applies options to float64 Observer instruments. +type Float64ObservableOption interface { + Float64ObservableCounterOption + Float64ObservableUpDownCounterOption + Float64ObservableGaugeOption +} + +type float64CallbackOpt struct { + cback Float64Callback +} + +func (o float64CallbackOpt) applyFloat64ObservableCounter(cfg Float64ObservableCounterConfig) Float64ObservableCounterConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +func (o float64CallbackOpt) applyFloat64ObservableUpDownCounter(cfg Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +func (o float64CallbackOpt) applyFloat64ObservableGauge(cfg Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +// WithFloat64Callback adds callback to be called for an instrument. +func WithFloat64Callback(callback Float64Callback) Float64ObservableOption { + return float64CallbackOpt{callback} +} diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go new file mode 100644 index 000000000..9bd6ebf02 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go @@ -0,0 +1,269 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// Int64Observable describes a set of instruments used asynchronously to record +// int64 measurements once per collection cycle. Observations of these +// instruments are only made within a callback. +// +// Warning: Methods may be added to this interface in minor releases. +type Int64Observable interface { + Observable + + int64Observable() +} + +// Int64ObservableCounter is an instrument used to asynchronously record +// increasing int64 measurements once per collection cycle. Observations are +// only made within a callback for this instrument. The value observed is +// assumed the to be the cumulative sum of the count. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64ObservableCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64ObservableCounter + + Int64Observable +} + +// Int64ObservableCounterConfig contains options for asynchronous counter +// instruments that record int64 values. +type Int64ObservableCounterConfig struct { + description string + unit string + callbacks []Int64Callback +} + +// NewInt64ObservableCounterConfig returns a new [Int64ObservableCounterConfig] +// with all opts applied. +func NewInt64ObservableCounterConfig(opts ...Int64ObservableCounterOption) Int64ObservableCounterConfig { + var config Int64ObservableCounterConfig + for _, o := range opts { + config = o.applyInt64ObservableCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Int64ObservableCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64ObservableCounterConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Int64ObservableCounterConfig) Callbacks() []Int64Callback { + return c.callbacks +} + +// Int64ObservableCounterOption applies options to a +// [Int64ObservableCounterConfig]. See [Int64ObservableOption] and +// [InstrumentOption] for other options that can be used as an +// Int64ObservableCounterOption. +type Int64ObservableCounterOption interface { + applyInt64ObservableCounter(Int64ObservableCounterConfig) Int64ObservableCounterConfig +} + +// Int64ObservableUpDownCounter is an instrument used to asynchronously record +// int64 measurements once per collection cycle. Observations are only made +// within a callback for this instrument. The value observed is assumed the to +// be the cumulative sum of the count. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64ObservableUpDownCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64ObservableUpDownCounter + + Int64Observable +} + +// Int64ObservableUpDownCounterConfig contains options for asynchronous counter +// instruments that record int64 values. +type Int64ObservableUpDownCounterConfig struct { + description string + unit string + callbacks []Int64Callback +} + +// NewInt64ObservableUpDownCounterConfig returns a new +// [Int64ObservableUpDownCounterConfig] with all opts applied. +func NewInt64ObservableUpDownCounterConfig(opts ...Int64ObservableUpDownCounterOption) Int64ObservableUpDownCounterConfig { + var config Int64ObservableUpDownCounterConfig + for _, o := range opts { + config = o.applyInt64ObservableUpDownCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Int64ObservableUpDownCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64ObservableUpDownCounterConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Int64ObservableUpDownCounterConfig) Callbacks() []Int64Callback { + return c.callbacks +} + +// Int64ObservableUpDownCounterOption applies options to a +// [Int64ObservableUpDownCounterConfig]. See [Int64ObservableOption] and +// [InstrumentOption] for other options that can be used as an +// Int64ObservableUpDownCounterOption. +type Int64ObservableUpDownCounterOption interface { + applyInt64ObservableUpDownCounter(Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig +} + +// Int64ObservableGauge is an instrument used to asynchronously record +// instantaneous int64 measurements once per collection cycle. Observations are +// only made within a callback for this instrument. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64ObservableGauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64ObservableGauge + + Int64Observable +} + +// Int64ObservableGaugeConfig contains options for asynchronous counter +// instruments that record int64 values. +type Int64ObservableGaugeConfig struct { + description string + unit string + callbacks []Int64Callback +} + +// NewInt64ObservableGaugeConfig returns a new [Int64ObservableGaugeConfig] +// with all opts applied. +func NewInt64ObservableGaugeConfig(opts ...Int64ObservableGaugeOption) Int64ObservableGaugeConfig { + var config Int64ObservableGaugeConfig + for _, o := range opts { + config = o.applyInt64ObservableGauge(config) + } + return config +} + +// Description returns the configured description. +func (c Int64ObservableGaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64ObservableGaugeConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Int64ObservableGaugeConfig) Callbacks() []Int64Callback { + return c.callbacks +} + +// Int64ObservableGaugeOption applies options to a +// [Int64ObservableGaugeConfig]. See [Int64ObservableOption] and +// [InstrumentOption] for other options that can be used as an +// Int64ObservableGaugeOption. +type Int64ObservableGaugeOption interface { + applyInt64ObservableGauge(Int64ObservableGaugeConfig) Int64ObservableGaugeConfig +} + +// Int64Observer is a recorder of int64 measurements. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Observer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Observer + + // Observe records the int64 value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Observe(value int64, options ...ObserveOption) +} + +// Int64Callback is a function registered with a Meter that makes observations +// for an Int64Observerable instrument it is registered with. Calls to the +// Int64Observer record measurement values for the Int64Observable. +// +// The function needs to complete in a finite amount of time and the deadline +// of the passed context is expected to be honored. +// +// The function needs to make unique observations across all registered +// Int64Callbacks. Meaning, it should not report measurements with the same +// attributes as another Int64Callbacks also registered for the same +// instrument. +// +// The function needs to be concurrent safe. +type Int64Callback func(context.Context, Int64Observer) error + +// Int64ObservableOption applies options to int64 Observer instruments. +type Int64ObservableOption interface { + Int64ObservableCounterOption + Int64ObservableUpDownCounterOption + Int64ObservableGaugeOption +} + +type int64CallbackOpt struct { + cback Int64Callback +} + +func (o int64CallbackOpt) applyInt64ObservableCounter(cfg Int64ObservableCounterConfig) Int64ObservableCounterConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +func (o int64CallbackOpt) applyInt64ObservableUpDownCounter(cfg Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +func (o int64CallbackOpt) applyInt64ObservableGauge(cfg Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +// WithInt64Callback adds callback to be called for an instrument. +func WithInt64Callback(callback Int64Callback) Int64ObservableOption { + return int64CallbackOpt{callback} +} diff --git a/vendor/go.opentelemetry.io/otel/metric/config.go b/vendor/go.opentelemetry.io/otel/metric/config.go new file mode 100644 index 000000000..778ad2d74 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/config.go @@ -0,0 +1,92 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import "go.opentelemetry.io/otel/attribute" + +// MeterConfig contains options for Meters. +type MeterConfig struct { + instrumentationVersion string + schemaURL string + attrs attribute.Set + + // Ensure forward compatibility by explicitly making this not comparable. + noCmp [0]func() //nolint: unused // This is indeed used. +} + +// InstrumentationVersion returns the version of the library providing +// instrumentation. +func (cfg MeterConfig) InstrumentationVersion() string { + return cfg.instrumentationVersion +} + +// InstrumentationAttributes returns the attributes associated with the library +// providing instrumentation. +func (cfg MeterConfig) InstrumentationAttributes() attribute.Set { + return cfg.attrs +} + +// SchemaURL is the schema_url of the library providing instrumentation. +func (cfg MeterConfig) SchemaURL() string { + return cfg.schemaURL +} + +// MeterOption is an interface for applying Meter options. +type MeterOption interface { + // applyMeter is used to set a MeterOption value of a MeterConfig. + applyMeter(MeterConfig) MeterConfig +} + +// NewMeterConfig creates a new MeterConfig and applies +// all the given options. +func NewMeterConfig(opts ...MeterOption) MeterConfig { + var config MeterConfig + for _, o := range opts { + config = o.applyMeter(config) + } + return config +} + +type meterOptionFunc func(MeterConfig) MeterConfig + +func (fn meterOptionFunc) applyMeter(cfg MeterConfig) MeterConfig { + return fn(cfg) +} + +// WithInstrumentationVersion sets the instrumentation version. +func WithInstrumentationVersion(version string) MeterOption { + return meterOptionFunc(func(config MeterConfig) MeterConfig { + config.instrumentationVersion = version + return config + }) +} + +// WithInstrumentationAttributes sets the instrumentation attributes. +// +// The passed attributes will be de-duplicated. +func WithInstrumentationAttributes(attr ...attribute.KeyValue) MeterOption { + return meterOptionFunc(func(config MeterConfig) MeterConfig { + config.attrs = attribute.NewSet(attr...) + return config + }) +} + +// WithSchemaURL sets the schema URL. +func WithSchemaURL(schemaURL string) MeterOption { + return meterOptionFunc(func(config MeterConfig) MeterConfig { + config.schemaURL = schemaURL + return config + }) +} diff --git a/vendor/go.opentelemetry.io/otel/metric/doc.go b/vendor/go.opentelemetry.io/otel/metric/doc.go new file mode 100644 index 000000000..54716e13b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/doc.go @@ -0,0 +1,170 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package metric provides the OpenTelemetry API used to measure metrics about +source code operation. + +This API is separate from its implementation so the instrumentation built from +it is reusable. See [go.opentelemetry.io/otel/sdk/metric] for the official +OpenTelemetry implementation of this API. + +All measurements made with this package are made via instruments. These +instruments are created by a [Meter] which itself is created by a +[MeterProvider]. Applications need to accept a [MeterProvider] implementation +as a starting point when instrumenting. This can be done directly, or by using +the OpenTelemetry global MeterProvider via [GetMeterProvider]. Using an +appropriately named [Meter] from the accepted [MeterProvider], instrumentation +can then be built from the [Meter]'s instruments. + +# Instruments + +Each instrument is designed to make measurements of a particular type. Broadly, +all instruments fall into two overlapping logical categories: asynchronous or +synchronous, and int64 or float64. + +All synchronous instruments ([Int64Counter], [Int64UpDownCounter], +[Int64Histogram], [Float64Counter], [Float64UpDownCounter], and +[Float64Histogram]) are used to measure the operation and performance of source +code during the source code execution. These instruments only make measurements +when the source code they instrument is run. + +All asynchronous instruments ([Int64ObservableCounter], +[Int64ObservableUpDownCounter], [Int64ObservableGauge], +[Float64ObservableCounter], [Float64ObservableUpDownCounter], and +[Float64ObservableGauge]) are used to measure metrics outside of the execution +of source code. They are said to make "observations" via a callback function +called once every measurement collection cycle. + +Each instrument is also grouped by the value type it measures. Either int64 or +float64. The value being measured will dictate which instrument in these +categories to use. + +Outside of these two broad categories, instruments are described by the +function they are designed to serve. All Counters ([Int64Counter], +[Float64Counter], [Int64ObservableCounter], and [Float64ObservableCounter]) are +designed to measure values that never decrease in value, but instead only +incrementally increase in value. UpDownCounters ([Int64UpDownCounter], +[Float64UpDownCounter], [Int64ObservableUpDownCounter], and +[Float64ObservableUpDownCounter]) on the other hand, are designed to measure +values that can increase and decrease. When more information needs to be +conveyed about all the synchronous measurements made during a collection cycle, +a Histogram ([Int64Histogram] and [Float64Histogram]) should be used. Finally, +when just the most recent measurement needs to be conveyed about an +asynchronous measurement, a Gauge ([Int64ObservableGauge] and +[Float64ObservableGauge]) should be used. + +See the [OpenTelemetry documentation] for more information about instruments +and their intended use. + +# Measurements + +Measurements are made by recording values and information about the values with +an instrument. How these measurements are recorded depends on the instrument. + +Measurements for synchronous instruments ([Int64Counter], [Int64UpDownCounter], +[Int64Histogram], [Float64Counter], [Float64UpDownCounter], and +[Float64Histogram]) are recorded using the instrument methods directly. All +counter instruments have an Add method that is used to measure an increment +value, and all histogram instruments have a Record method to measure a data +point. + +Asynchronous instruments ([Int64ObservableCounter], +[Int64ObservableUpDownCounter], [Int64ObservableGauge], +[Float64ObservableCounter], [Float64ObservableUpDownCounter], and +[Float64ObservableGauge]) record measurements within a callback function. The +callback is registered with the Meter which ensures the callback is called once +per collection cycle. A callback can be registered two ways: during the +instrument's creation using an option, or later using the RegisterCallback +method of the [Meter] that created the instrument. + +If the following criteria are met, an option ([WithInt64Callback] or +[WithFloat64Callback]) can be used during the asynchronous instrument's +creation to register a callback ([Int64Callback] or [Float64Callback], +respectively): + + - The measurement process is known when the instrument is created + - Only that instrument will make a measurement within the callback + - The callback never needs to be unregistered + +If the criteria are not met, use the RegisterCallback method of the [Meter] that +created the instrument to register a [Callback]. + +# API Implementations + +This package does not conform to the standard Go versioning policy, all of its +interfaces may have methods added to them without a package major version bump. +This non-standard API evolution could surprise an uninformed implementation +author. They could unknowingly build their implementation in a way that would +result in a runtime panic for their users that update to the new API. + +The API is designed to help inform an instrumentation author about this +non-standard API evolution. It requires them to choose a default behavior for +unimplemented interface methods. There are three behavior choices they can +make: + + - Compilation failure + - Panic + - Default to another implementation + +All interfaces in this API embed a corresponding interface from +[go.opentelemetry.io/otel/metric/embedded]. If an author wants the default +behavior of their implementations to be a compilation failure, signaling to +their users they need to update to the latest version of that implementation, +they need to embed the corresponding interface from +[go.opentelemetry.io/otel/metric/embedded] in their implementation. For +example, + + import "go.opentelemetry.io/otel/metric/embedded" + + type MeterProvider struct { + embedded.MeterProvider + // ... + } + +If an author wants the default behavior of their implementations to a panic, +they need to embed the API interface directly. + + import "go.opentelemetry.io/otel/metric" + + type MeterProvider struct { + metric.MeterProvider + // ... + } + +This is not a recommended behavior as it could lead to publishing packages that +contain runtime panics when users update other package that use newer versions +of [go.opentelemetry.io/otel/metric]. + +Finally, an author can embed another implementation in theirs. The embedded +implementation will be used for methods not defined by the author. For example, +an author who wants to default to silently dropping the call can use +[go.opentelemetry.io/otel/metric/noop]: + + import "go.opentelemetry.io/otel/metric/noop" + + type MeterProvider struct { + noop.MeterProvider + // ... + } + +It is strongly recommended that authors only embed +[go.opentelemetry.io/otel/metric/noop] if they choose this default behavior. +That implementation is the only one OpenTelemetry authors can guarantee will +fully implement all the API interfaces when a user updates their API. + +[OpenTelemetry documentation]: https://opentelemetry.io/docs/concepts/signals/metrics/ +[GetMeterProvider]: https://pkg.go.dev/go.opentelemetry.io/otel#GetMeterProvider +*/ +package metric // import "go.opentelemetry.io/otel/metric" diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go new file mode 100644 index 000000000..ae0bdbd2e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go @@ -0,0 +1,234 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package embedded provides interfaces embedded within the [OpenTelemetry +// metric API]. +// +// Implementers of the [OpenTelemetry metric API] can embed the relevant type +// from this package into their implementation directly. Doing so will result +// in a compilation error for users when the [OpenTelemetry metric API] is +// extended (which is something that can happen without a major version bump of +// the API package). +// +// [OpenTelemetry metric API]: https://pkg.go.dev/go.opentelemetry.io/otel/metric +package embedded // import "go.opentelemetry.io/otel/metric/embedded" + +// MeterProvider is embedded in +// [go.opentelemetry.io/otel/metric.MeterProvider]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.MeterProvider] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.MeterProvider] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type MeterProvider interface{ meterProvider() } + +// Meter is embedded in [go.opentelemetry.io/otel/metric.Meter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Meter] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Meter] interface +// is extended (which is something that can happen without a major version bump +// of the API package). +type Meter interface{ meter() } + +// Float64Observer is embedded in +// [go.opentelemetry.io/otel/metric.Float64Observer]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Observer] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64Observer] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Float64Observer interface{ float64Observer() } + +// Int64Observer is embedded in +// [go.opentelemetry.io/otel/metric.Int64Observer]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Observer] if you want users +// to experience a compilation error, signaling they need to update to your +// latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64Observer] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Int64Observer interface{ int64Observer() } + +// Observer is embedded in [go.opentelemetry.io/otel/metric.Observer]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Observer] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Observer] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Observer interface{ observer() } + +// Registration is embedded in [go.opentelemetry.io/otel/metric.Registration]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Registration] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Registration] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Registration interface{ registration() } + +// Float64Counter is embedded in +// [go.opentelemetry.io/otel/metric.Float64Counter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Counter] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64Counter] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Float64Counter interface{ float64Counter() } + +// Float64Histogram is embedded in +// [go.opentelemetry.io/otel/metric.Float64Histogram]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Histogram] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64Histogram] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Float64Histogram interface{ float64Histogram() } + +// Float64ObservableCounter is embedded in +// [go.opentelemetry.io/otel/metric.Float64ObservableCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64ObservableCounter] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64ObservableCounter] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64ObservableCounter interface{ float64ObservableCounter() } + +// Float64ObservableGauge is embedded in +// [go.opentelemetry.io/otel/metric.Float64ObservableGauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64ObservableGauge] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64ObservableGauge] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64ObservableGauge interface{ float64ObservableGauge() } + +// Float64ObservableUpDownCounter is embedded in +// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter] +// if you want users to experience a compilation error, signaling they need to +// update to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64ObservableUpDownCounter interface{ float64ObservableUpDownCounter() } + +// Float64UpDownCounter is embedded in +// [go.opentelemetry.io/otel/metric.Float64UpDownCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64UpDownCounter] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64UpDownCounter] interface +// is extended (which is something that can happen without a major version bump +// of the API package). +type Float64UpDownCounter interface{ float64UpDownCounter() } + +// Int64Counter is embedded in +// [go.opentelemetry.io/otel/metric.Int64Counter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Counter] if you want users +// to experience a compilation error, signaling they need to update to your +// latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64Counter] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Int64Counter interface{ int64Counter() } + +// Int64Histogram is embedded in +// [go.opentelemetry.io/otel/metric.Int64Histogram]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Histogram] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64Histogram] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Int64Histogram interface{ int64Histogram() } + +// Int64ObservableCounter is embedded in +// [go.opentelemetry.io/otel/metric.Int64ObservableCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64ObservableCounter] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64ObservableCounter] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Int64ObservableCounter interface{ int64ObservableCounter() } + +// Int64ObservableGauge is embedded in +// [go.opentelemetry.io/otel/metric.Int64ObservableGauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64ObservableGauge] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64ObservableGauge] interface +// is extended (which is something that can happen without a major version bump +// of the API package). +type Int64ObservableGauge interface{ int64ObservableGauge() } + +// Int64ObservableUpDownCounter is embedded in +// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter] if +// you want users to experience a compilation error, signaling they need to +// update to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Int64ObservableUpDownCounter interface{ int64ObservableUpDownCounter() } + +// Int64UpDownCounter is embedded in +// [go.opentelemetry.io/otel/metric.Int64UpDownCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64UpDownCounter] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64UpDownCounter] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Int64UpDownCounter interface{ int64UpDownCounter() } diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go new file mode 100644 index 000000000..be89cd533 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go @@ -0,0 +1,357 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import "go.opentelemetry.io/otel/attribute" + +// Observable is used as a grouping mechanism for all instruments that are +// updated within a Callback. +type Observable interface { + observable() +} + +// InstrumentOption applies options to all instruments. +type InstrumentOption interface { + Int64CounterOption + Int64UpDownCounterOption + Int64HistogramOption + Int64ObservableCounterOption + Int64ObservableUpDownCounterOption + Int64ObservableGaugeOption + + Float64CounterOption + Float64UpDownCounterOption + Float64HistogramOption + Float64ObservableCounterOption + Float64ObservableUpDownCounterOption + Float64ObservableGaugeOption +} + +// HistogramOption applies options to histogram instruments. +type HistogramOption interface { + Int64HistogramOption + Float64HistogramOption +} + +type descOpt string + +func (o descOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64UpDownCounter(c Float64UpDownCounterConfig) Float64UpDownCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64ObservableGauge(c Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64Counter(c Int64CounterConfig) Int64CounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64UpDownCounter(c Int64UpDownCounterConfig) Int64UpDownCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { + c.description = string(o) + return c +} + +// WithDescription sets the instrument description. +func WithDescription(desc string) InstrumentOption { return descOpt(desc) } + +type unitOpt string + +func (o unitOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64UpDownCounter(c Float64UpDownCounterConfig) Float64UpDownCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64ObservableGauge(c Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64Counter(c Int64CounterConfig) Int64CounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64UpDownCounter(c Int64UpDownCounterConfig) Int64UpDownCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { + c.unit = string(o) + return c +} + +// WithUnit sets the instrument unit. +// +// The unit u should be defined using the appropriate [UCUM](https://ucum.org) case-sensitive code. +func WithUnit(u string) InstrumentOption { return unitOpt(u) } + +// WithExplicitBucketBoundaries sets the instrument explicit bucket boundaries. +// +// This option is considered "advisory", and may be ignored by API implementations. +func WithExplicitBucketBoundaries(bounds ...float64) HistogramOption { return bucketOpt(bounds) } + +type bucketOpt []float64 + +func (o bucketOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { + c.explicitBucketBoundaries = o + return c +} + +func (o bucketOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { + c.explicitBucketBoundaries = o + return c +} + +// AddOption applies options to an addition measurement. See +// [MeasurementOption] for other options that can be used as an AddOption. +type AddOption interface { + applyAdd(AddConfig) AddConfig +} + +// AddConfig contains options for an addition measurement. +type AddConfig struct { + attrs attribute.Set +} + +// NewAddConfig returns a new [AddConfig] with all opts applied. +func NewAddConfig(opts []AddOption) AddConfig { + config := AddConfig{attrs: *attribute.EmptySet()} + for _, o := range opts { + config = o.applyAdd(config) + } + return config +} + +// Attributes returns the configured attribute set. +func (c AddConfig) Attributes() attribute.Set { + return c.attrs +} + +// RecordOption applies options to an addition measurement. See +// [MeasurementOption] for other options that can be used as a RecordOption. +type RecordOption interface { + applyRecord(RecordConfig) RecordConfig +} + +// RecordConfig contains options for a recorded measurement. +type RecordConfig struct { + attrs attribute.Set +} + +// NewRecordConfig returns a new [RecordConfig] with all opts applied. +func NewRecordConfig(opts []RecordOption) RecordConfig { + config := RecordConfig{attrs: *attribute.EmptySet()} + for _, o := range opts { + config = o.applyRecord(config) + } + return config +} + +// Attributes returns the configured attribute set. +func (c RecordConfig) Attributes() attribute.Set { + return c.attrs +} + +// ObserveOption applies options to an addition measurement. See +// [MeasurementOption] for other options that can be used as a ObserveOption. +type ObserveOption interface { + applyObserve(ObserveConfig) ObserveConfig +} + +// ObserveConfig contains options for an observed measurement. +type ObserveConfig struct { + attrs attribute.Set +} + +// NewObserveConfig returns a new [ObserveConfig] with all opts applied. +func NewObserveConfig(opts []ObserveOption) ObserveConfig { + config := ObserveConfig{attrs: *attribute.EmptySet()} + for _, o := range opts { + config = o.applyObserve(config) + } + return config +} + +// Attributes returns the configured attribute set. +func (c ObserveConfig) Attributes() attribute.Set { + return c.attrs +} + +// MeasurementOption applies options to all instrument measurement. +type MeasurementOption interface { + AddOption + RecordOption + ObserveOption +} + +type attrOpt struct { + set attribute.Set +} + +// mergeSets returns the union of keys between a and b. Any duplicate keys will +// use the value associated with b. +func mergeSets(a, b attribute.Set) attribute.Set { + // NewMergeIterator uses the first value for any duplicates. + iter := attribute.NewMergeIterator(&b, &a) + merged := make([]attribute.KeyValue, 0, a.Len()+b.Len()) + for iter.Next() { + merged = append(merged, iter.Attribute()) + } + return attribute.NewSet(merged...) +} + +func (o attrOpt) applyAdd(c AddConfig) AddConfig { + switch { + case o.set.Len() == 0: + case c.attrs.Len() == 0: + c.attrs = o.set + default: + c.attrs = mergeSets(c.attrs, o.set) + } + return c +} + +func (o attrOpt) applyRecord(c RecordConfig) RecordConfig { + switch { + case o.set.Len() == 0: + case c.attrs.Len() == 0: + c.attrs = o.set + default: + c.attrs = mergeSets(c.attrs, o.set) + } + return c +} + +func (o attrOpt) applyObserve(c ObserveConfig) ObserveConfig { + switch { + case o.set.Len() == 0: + case c.attrs.Len() == 0: + c.attrs = o.set + default: + c.attrs = mergeSets(c.attrs, o.set) + } + return c +} + +// WithAttributeSet sets the attribute Set associated with a measurement is +// made with. +// +// If multiple WithAttributeSet or WithAttributes options are passed the +// attributes will be merged together in the order they are passed. Attributes +// with duplicate keys will use the last value passed. +func WithAttributeSet(attributes attribute.Set) MeasurementOption { + return attrOpt{set: attributes} +} + +// WithAttributes converts attributes into an attribute Set and sets the Set to +// be associated with a measurement. This is shorthand for: +// +// cp := make([]attribute.KeyValue, len(attributes)) +// copy(cp, attributes) +// WithAttributes(attribute.NewSet(cp...)) +// +// [attribute.NewSet] may modify the passed attributes so this will make a copy +// of attributes before creating a set in order to ensure this function is +// concurrent safe. This makes this option function less optimized in +// comparison to [WithAttributeSet]. Therefore, [WithAttributeSet] should be +// preferred for performance sensitive code. +// +// See [WithAttributeSet] for information about how multiple WithAttributes are +// merged. +func WithAttributes(attributes ...attribute.KeyValue) MeasurementOption { + cp := make([]attribute.KeyValue, len(attributes)) + copy(cp, attributes) + return attrOpt{set: attribute.NewSet(cp...)} +} diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go new file mode 100644 index 000000000..2520bc74a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -0,0 +1,212 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// MeterProvider provides access to named Meter instances, for instrumenting +// an application or package. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type MeterProvider interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.MeterProvider + + // Meter returns a new Meter with the provided name and configuration. + // + // A Meter should be scoped at most to a single package. The name needs to + // be unique so it does not collide with other names used by + // an application, nor other applications. To achieve this, the import path + // of the instrumentation package is recommended to be used as name. + // + // If the name is empty, then an implementation defined default name will + // be used instead. + Meter(name string, opts ...MeterOption) Meter +} + +// Meter provides access to instrument instances for recording metrics. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Meter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Meter + + // Int64Counter returns a new Int64Counter instrument identified by name + // and configured with options. The instrument is used to synchronously + // record increasing int64 measurements during a computational operation. + Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error) + // Int64UpDownCounter returns a new Int64UpDownCounter instrument + // identified by name and configured with options. The instrument is used + // to synchronously record int64 measurements during a computational + // operation. + Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error) + // Int64Histogram returns a new Int64Histogram instrument identified by + // name and configured with options. The instrument is used to + // synchronously record the distribution of int64 measurements during a + // computational operation. + Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error) + // Int64ObservableCounter returns a new Int64ObservableCounter identified + // by name and configured with options. The instrument is used to + // asynchronously record increasing int64 measurements once per a + // measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithInt64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error) + // Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter + // instrument identified by name and configured with options. The + // instrument is used to asynchronously record int64 measurements once per + // a measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithInt64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error) + // Int64ObservableGauge returns a new Int64ObservableGauge instrument + // identified by name and configured with options. The instrument is used + // to asynchronously record instantaneous int64 measurements once per a + // measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithInt64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + Int64ObservableGauge(name string, options ...Int64ObservableGaugeOption) (Int64ObservableGauge, error) + + // Float64Counter returns a new Float64Counter instrument identified by + // name and configured with options. The instrument is used to + // synchronously record increasing float64 measurements during a + // computational operation. + Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error) + // Float64UpDownCounter returns a new Float64UpDownCounter instrument + // identified by name and configured with options. The instrument is used + // to synchronously record float64 measurements during a computational + // operation. + Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error) + // Float64Histogram returns a new Float64Histogram instrument identified by + // name and configured with options. The instrument is used to + // synchronously record the distribution of float64 measurements during a + // computational operation. + Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error) + // Float64ObservableCounter returns a new Float64ObservableCounter + // instrument identified by name and configured with options. The + // instrument is used to asynchronously record increasing float64 + // measurements once per a measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithFloat64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error) + // Float64ObservableUpDownCounter returns a new + // Float64ObservableUpDownCounter instrument identified by name and + // configured with options. The instrument is used to asynchronously record + // float64 measurements once per a measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithFloat64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error) + // Float64ObservableGauge returns a new Float64ObservableGauge instrument + // identified by name and configured with options. The instrument is used + // to asynchronously record instantaneous float64 measurements once per a + // measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithFloat64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + Float64ObservableGauge(name string, options ...Float64ObservableGaugeOption) (Float64ObservableGauge, error) + + // RegisterCallback registers f to be called during the collection of a + // measurement cycle. + // + // If Unregister of the returned Registration is called, f needs to be + // unregistered and not called during collection. + // + // The instruments f is registered with are the only instruments that f may + // observe values for. + // + // If no instruments are passed, f should not be registered nor called + // during collection. + // + // The function f needs to be concurrent safe. + RegisterCallback(f Callback, instruments ...Observable) (Registration, error) +} + +// Callback is a function registered with a Meter that makes observations for +// the set of instruments it is registered with. The Observer parameter is used +// to record measurement observations for these instruments. +// +// The function needs to complete in a finite amount of time and the deadline +// of the passed context is expected to be honored. +// +// The function needs to make unique observations across all registered +// Callbacks. Meaning, it should not report measurements for an instrument with +// the same attributes as another Callback will report. +// +// The function needs to be concurrent safe. +type Callback func(context.Context, Observer) error + +// Observer records measurements for multiple instruments in a Callback. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Observer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Observer + + // ObserveFloat64 records the float64 value for obsrv. + ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption) + // ObserveInt64 records the int64 value for obsrv. + ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption) +} + +// Registration is an token representing the unique registration of a callback +// for a set of instruments with a Meter. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Registration interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Registration + + // Unregister removes the callback registration from a Meter. + // + // This method needs to be idempotent and concurrent safe. + Unregister() error +} diff --git a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go new file mode 100644 index 000000000..0a4825ae6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go @@ -0,0 +1,185 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// Float64Counter is an instrument that records increasing float64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Counter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Counter + + // Add records a change to the counter. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Add(ctx context.Context, incr float64, options ...AddOption) +} + +// Float64CounterConfig contains options for synchronous counter instruments that +// record int64 values. +type Float64CounterConfig struct { + description string + unit string +} + +// NewFloat64CounterConfig returns a new [Float64CounterConfig] with all opts +// applied. +func NewFloat64CounterConfig(opts ...Float64CounterOption) Float64CounterConfig { + var config Float64CounterConfig + for _, o := range opts { + config = o.applyFloat64Counter(config) + } + return config +} + +// Description returns the configured description. +func (c Float64CounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64CounterConfig) Unit() string { + return c.unit +} + +// Float64CounterOption applies options to a [Float64CounterConfig]. See +// [InstrumentOption] for other options that can be used as a +// Float64CounterOption. +type Float64CounterOption interface { + applyFloat64Counter(Float64CounterConfig) Float64CounterConfig +} + +// Float64UpDownCounter is an instrument that records increasing or decreasing +// float64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64UpDownCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64UpDownCounter + + // Add records a change to the counter. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Add(ctx context.Context, incr float64, options ...AddOption) +} + +// Float64UpDownCounterConfig contains options for synchronous counter +// instruments that record int64 values. +type Float64UpDownCounterConfig struct { + description string + unit string +} + +// NewFloat64UpDownCounterConfig returns a new [Float64UpDownCounterConfig] +// with all opts applied. +func NewFloat64UpDownCounterConfig(opts ...Float64UpDownCounterOption) Float64UpDownCounterConfig { + var config Float64UpDownCounterConfig + for _, o := range opts { + config = o.applyFloat64UpDownCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Float64UpDownCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64UpDownCounterConfig) Unit() string { + return c.unit +} + +// Float64UpDownCounterOption applies options to a +// [Float64UpDownCounterConfig]. See [InstrumentOption] for other options that +// can be used as a Float64UpDownCounterOption. +type Float64UpDownCounterOption interface { + applyFloat64UpDownCounter(Float64UpDownCounterConfig) Float64UpDownCounterConfig +} + +// Float64Histogram is an instrument that records a distribution of float64 +// values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Histogram interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Histogram + + // Record adds an additional value to the distribution. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, incr float64, options ...RecordOption) +} + +// Float64HistogramConfig contains options for synchronous counter instruments +// that record int64 values. +type Float64HistogramConfig struct { + description string + unit string + explicitBucketBoundaries []float64 +} + +// NewFloat64HistogramConfig returns a new [Float64HistogramConfig] with all +// opts applied. +func NewFloat64HistogramConfig(opts ...Float64HistogramOption) Float64HistogramConfig { + var config Float64HistogramConfig + for _, o := range opts { + config = o.applyFloat64Histogram(config) + } + return config +} + +// Description returns the configured description. +func (c Float64HistogramConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64HistogramConfig) Unit() string { + return c.unit +} + +// ExplicitBucketBoundaries returns the configured explicit bucket boundaries. +func (c Float64HistogramConfig) ExplicitBucketBoundaries() []float64 { + return c.explicitBucketBoundaries +} + +// Float64HistogramOption applies options to a [Float64HistogramConfig]. See +// [InstrumentOption] for other options that can be used as a +// Float64HistogramOption. +type Float64HistogramOption interface { + applyFloat64Histogram(Float64HistogramConfig) Float64HistogramConfig +} diff --git a/vendor/go.opentelemetry.io/otel/metric/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/syncint64.go new file mode 100644 index 000000000..56667d32f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/syncint64.go @@ -0,0 +1,185 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// Int64Counter is an instrument that records increasing int64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Counter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Counter + + // Add records a change to the counter. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Add(ctx context.Context, incr int64, options ...AddOption) +} + +// Int64CounterConfig contains options for synchronous counter instruments that +// record int64 values. +type Int64CounterConfig struct { + description string + unit string +} + +// NewInt64CounterConfig returns a new [Int64CounterConfig] with all opts +// applied. +func NewInt64CounterConfig(opts ...Int64CounterOption) Int64CounterConfig { + var config Int64CounterConfig + for _, o := range opts { + config = o.applyInt64Counter(config) + } + return config +} + +// Description returns the configured description. +func (c Int64CounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64CounterConfig) Unit() string { + return c.unit +} + +// Int64CounterOption applies options to a [Int64CounterConfig]. See +// [InstrumentOption] for other options that can be used as an +// Int64CounterOption. +type Int64CounterOption interface { + applyInt64Counter(Int64CounterConfig) Int64CounterConfig +} + +// Int64UpDownCounter is an instrument that records increasing or decreasing +// int64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64UpDownCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64UpDownCounter + + // Add records a change to the counter. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Add(ctx context.Context, incr int64, options ...AddOption) +} + +// Int64UpDownCounterConfig contains options for synchronous counter +// instruments that record int64 values. +type Int64UpDownCounterConfig struct { + description string + unit string +} + +// NewInt64UpDownCounterConfig returns a new [Int64UpDownCounterConfig] with +// all opts applied. +func NewInt64UpDownCounterConfig(opts ...Int64UpDownCounterOption) Int64UpDownCounterConfig { + var config Int64UpDownCounterConfig + for _, o := range opts { + config = o.applyInt64UpDownCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Int64UpDownCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64UpDownCounterConfig) Unit() string { + return c.unit +} + +// Int64UpDownCounterOption applies options to a [Int64UpDownCounterConfig]. +// See [InstrumentOption] for other options that can be used as an +// Int64UpDownCounterOption. +type Int64UpDownCounterOption interface { + applyInt64UpDownCounter(Int64UpDownCounterConfig) Int64UpDownCounterConfig +} + +// Int64Histogram is an instrument that records a distribution of int64 +// values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Histogram interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Histogram + + // Record adds an additional value to the distribution. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, incr int64, options ...RecordOption) +} + +// Int64HistogramConfig contains options for synchronous counter instruments +// that record int64 values. +type Int64HistogramConfig struct { + description string + unit string + explicitBucketBoundaries []float64 +} + +// NewInt64HistogramConfig returns a new [Int64HistogramConfig] with all opts +// applied. +func NewInt64HistogramConfig(opts ...Int64HistogramOption) Int64HistogramConfig { + var config Int64HistogramConfig + for _, o := range opts { + config = o.applyInt64Histogram(config) + } + return config +} + +// Description returns the configured description. +func (c Int64HistogramConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64HistogramConfig) Unit() string { + return c.unit +} + +// ExplicitBucketBoundaries returns the configured explicit bucket boundaries. +func (c Int64HistogramConfig) ExplicitBucketBoundaries() []float64 { + return c.explicitBucketBoundaries +} + +// Int64HistogramOption applies options to a [Int64HistogramConfig]. See +// [InstrumentOption] for other options that can be used as an +// Int64HistogramOption. +type Int64HistogramOption interface { + applyInt64Histogram(Int64HistogramConfig) Int64HistogramConfig +} diff --git a/vendor/go.opentelemetry.io/otel/propagation.go b/vendor/go.opentelemetry.io/otel/propagation.go new file mode 100644 index 000000000..d29aaa32c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +import ( + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/propagation" +) + +// GetTextMapPropagator returns the global TextMapPropagator. If none has been +// set, a No-Op TextMapPropagator is returned. +func GetTextMapPropagator() propagation.TextMapPropagator { + return global.TextMapPropagator() +} + +// SetTextMapPropagator sets propagator as the global TextMapPropagator. +func SetTextMapPropagator(propagator propagation.TextMapPropagator) { + global.SetTextMapPropagator(propagator) +} diff --git a/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/vendor/go.opentelemetry.io/otel/propagation/baggage.go new file mode 100644 index 000000000..303cdf1cb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/baggage.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package propagation // import "go.opentelemetry.io/otel/propagation" + +import ( + "context" + + "go.opentelemetry.io/otel/baggage" +) + +const baggageHeader = "baggage" + +// Baggage is a propagator that supports the W3C Baggage format. +// +// This propagates user-defined baggage associated with a trace. The complete +// specification is defined at https://www.w3.org/TR/baggage/. +type Baggage struct{} + +var _ TextMapPropagator = Baggage{} + +// Inject sets baggage key-values from ctx into the carrier. +func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) { + bStr := baggage.FromContext(ctx).String() + if bStr != "" { + carrier.Set(baggageHeader, bStr) + } +} + +// Extract returns a copy of parent with the baggage from the carrier added. +func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context { + bStr := carrier.Get(baggageHeader) + if bStr == "" { + return parent + } + + bag, err := baggage.Parse(bStr) + if err != nil { + return parent + } + return baggage.ContextWithBaggage(parent, bag) +} + +// Fields returns the keys who's values are set with Inject. +func (b Baggage) Fields() []string { + return []string{baggageHeader} +} diff --git a/vendor/go.opentelemetry.io/otel/propagation/doc.go b/vendor/go.opentelemetry.io/otel/propagation/doc.go new file mode 100644 index 000000000..c119eb285 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/doc.go @@ -0,0 +1,24 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package propagation contains OpenTelemetry context propagators. + +OpenTelemetry propagators are used to extract and inject context data from and +into messages exchanged by applications. The propagator supported by this +package is the W3C Trace Context encoding +(https://www.w3.org/TR/trace-context/), and W3C Baggage +(https://www.w3.org/TR/baggage/). +*/ +package propagation // import "go.opentelemetry.io/otel/propagation" diff --git a/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/vendor/go.opentelemetry.io/otel/propagation/propagation.go new file mode 100644 index 000000000..c94438f73 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/propagation.go @@ -0,0 +1,153 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package propagation // import "go.opentelemetry.io/otel/propagation" + +import ( + "context" + "net/http" +) + +// TextMapCarrier is the storage medium used by a TextMapPropagator. +type TextMapCarrier interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Get returns the value associated with the passed key. + Get(key string) string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Set stores the key-value pair. + Set(key string, value string) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Keys lists the keys stored in this carrier. + Keys() []string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// MapCarrier is a TextMapCarrier that uses a map held in memory as a storage +// medium for propagated key-value pairs. +type MapCarrier map[string]string + +// Compile time check that MapCarrier implements the TextMapCarrier. +var _ TextMapCarrier = MapCarrier{} + +// Get returns the value associated with the passed key. +func (c MapCarrier) Get(key string) string { + return c[key] +} + +// Set stores the key-value pair. +func (c MapCarrier) Set(key, value string) { + c[key] = value +} + +// Keys lists the keys stored in this carrier. +func (c MapCarrier) Keys() []string { + keys := make([]string, 0, len(c)) + for k := range c { + keys = append(keys, k) + } + return keys +} + +// HeaderCarrier adapts http.Header to satisfy the TextMapCarrier interface. +type HeaderCarrier http.Header + +// Get returns the value associated with the passed key. +func (hc HeaderCarrier) Get(key string) string { + return http.Header(hc).Get(key) +} + +// Set stores the key-value pair. +func (hc HeaderCarrier) Set(key string, value string) { + http.Header(hc).Set(key, value) +} + +// Keys lists the keys stored in this carrier. +func (hc HeaderCarrier) Keys() []string { + keys := make([]string, 0, len(hc)) + for k := range hc { + keys = append(keys, k) + } + return keys +} + +// TextMapPropagator propagates cross-cutting concerns as key-value text +// pairs within a carrier that travels in-band across process boundaries. +type TextMapPropagator interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Inject set cross-cutting concerns from the Context into the carrier. + Inject(ctx context.Context, carrier TextMapCarrier) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Extract reads cross-cutting concerns from the carrier into a Context. + Extract(ctx context.Context, carrier TextMapCarrier) context.Context + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Fields returns the keys whose values are set with Inject. + Fields() []string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +type compositeTextMapPropagator []TextMapPropagator + +func (p compositeTextMapPropagator) Inject(ctx context.Context, carrier TextMapCarrier) { + for _, i := range p { + i.Inject(ctx, carrier) + } +} + +func (p compositeTextMapPropagator) Extract(ctx context.Context, carrier TextMapCarrier) context.Context { + for _, i := range p { + ctx = i.Extract(ctx, carrier) + } + return ctx +} + +func (p compositeTextMapPropagator) Fields() []string { + unique := make(map[string]struct{}) + for _, i := range p { + for _, k := range i.Fields() { + unique[k] = struct{}{} + } + } + + fields := make([]string, 0, len(unique)) + for k := range unique { + fields = append(fields, k) + } + return fields +} + +// NewCompositeTextMapPropagator returns a unified TextMapPropagator from the +// group of passed TextMapPropagator. This allows different cross-cutting +// concerns to be propagates in a unified manner. +// +// The returned TextMapPropagator will inject and extract cross-cutting +// concerns in the order the TextMapPropagators were provided. Additionally, +// the Fields method will return a de-duplicated slice of the keys that are +// set with the Inject method. +func NewCompositeTextMapPropagator(p ...TextMapPropagator) TextMapPropagator { + return compositeTextMapPropagator(p) +} diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go new file mode 100644 index 000000000..63e5d6222 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go @@ -0,0 +1,167 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package propagation // import "go.opentelemetry.io/otel/propagation" + +import ( + "context" + "encoding/hex" + "fmt" + "strings" + + "go.opentelemetry.io/otel/trace" +) + +const ( + supportedVersion = 0 + maxVersion = 254 + traceparentHeader = "traceparent" + tracestateHeader = "tracestate" + delimiter = "-" +) + +// TraceContext is a propagator that supports the W3C Trace Context format +// (https://www.w3.org/TR/trace-context/) +// +// This propagator will propagate the traceparent and tracestate headers to +// guarantee traces are not broken. It is up to the users of this propagator +// to choose if they want to participate in a trace by modifying the +// traceparent header and relevant parts of the tracestate header containing +// their proprietary information. +type TraceContext struct{} + +var ( + _ TextMapPropagator = TraceContext{} + versionPart = fmt.Sprintf("%.2X", supportedVersion) +) + +// Inject set tracecontext from the Context into the carrier. +func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { + sc := trace.SpanContextFromContext(ctx) + if !sc.IsValid() { + return + } + + if ts := sc.TraceState().String(); ts != "" { + carrier.Set(tracestateHeader, ts) + } + + // Clear all flags other than the trace-context supported sampling bit. + flags := sc.TraceFlags() & trace.FlagsSampled + + var sb strings.Builder + sb.Grow(2 + 32 + 16 + 2 + 3) + _, _ = sb.WriteString(versionPart) + traceID := sc.TraceID() + spanID := sc.SpanID() + flagByte := [1]byte{byte(flags)} + var buf [32]byte + for _, src := range [][]byte{traceID[:], spanID[:], flagByte[:]} { + _ = sb.WriteByte(delimiter[0]) + n := hex.Encode(buf[:], src) + _, _ = sb.Write(buf[:n]) + } + carrier.Set(traceparentHeader, sb.String()) +} + +// Extract reads tracecontext from the carrier into a returned Context. +// +// The returned Context will be a copy of ctx and contain the extracted +// tracecontext as the remote SpanContext. If the extracted tracecontext is +// invalid, the passed ctx will be returned directly instead. +func (tc TraceContext) Extract(ctx context.Context, carrier TextMapCarrier) context.Context { + sc := tc.extract(carrier) + if !sc.IsValid() { + return ctx + } + return trace.ContextWithRemoteSpanContext(ctx, sc) +} + +func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { + h := carrier.Get(traceparentHeader) + if h == "" { + return trace.SpanContext{} + } + + var ver [1]byte + if !extractPart(ver[:], &h, 2) { + return trace.SpanContext{} + } + version := int(ver[0]) + if version > maxVersion { + return trace.SpanContext{} + } + + var scc trace.SpanContextConfig + if !extractPart(scc.TraceID[:], &h, 32) { + return trace.SpanContext{} + } + if !extractPart(scc.SpanID[:], &h, 16) { + return trace.SpanContext{} + } + + var opts [1]byte + if !extractPart(opts[:], &h, 2) { + return trace.SpanContext{} + } + if version == 0 && (h != "" || opts[0] > 2) { + // version 0 not allow extra + // version 0 not allow other flag + return trace.SpanContext{} + } + + // Clear all flags other than the trace-context supported sampling bit. + scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled + + // Ignore the error returned here. Failure to parse tracestate MUST NOT + // affect the parsing of traceparent according to the W3C tracecontext + // specification. + scc.TraceState, _ = trace.ParseTraceState(carrier.Get(tracestateHeader)) + scc.Remote = true + + sc := trace.NewSpanContext(scc) + if !sc.IsValid() { + return trace.SpanContext{} + } + + return sc +} + +// upperHex detect hex is upper case Unicode characters. +func upperHex(v string) bool { + for _, c := range v { + if c >= 'A' && c <= 'F' { + return true + } + } + return false +} + +func extractPart(dst []byte, h *string, n int) bool { + part, left, _ := strings.Cut(*h, delimiter) + *h = left + // hex.Decode decodes unsupported upper-case characters, so exclude explicitly. + if len(part) != n || upperHex(part) { + return false + } + if p, err := hex.Decode(dst, []byte(part)); err != nil || p != n/2 { + return false + } + return true +} + +// Fields returns the keys who's values are set with Inject. +func (tc TraceContext) Fields() []string { + return []string{traceparentHeader, tracestateHeader} +} diff --git a/vendor/go.opentelemetry.io/otel/requirements.txt b/vendor/go.opentelemetry.io/otel/requirements.txt new file mode 100644 index 000000000..e0a43e138 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/requirements.txt @@ -0,0 +1 @@ +codespell==2.2.6 diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go new file mode 100644 index 000000000..71a1f7748 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the conventions +// as of the v1.17.0 version of the OpenTelemetry specification. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go new file mode 100644 index 000000000..679c40c4d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go @@ -0,0 +1,199 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +import "go.opentelemetry.io/otel/attribute" + +// This semantic convention defines the attributes used to represent a feature +// flag evaluation as an event. +const ( + // FeatureFlagKeyKey is the attribute Key conforming to the + // "feature_flag.key" semantic conventions. It represents the unique + // identifier of the feature flag. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'logo-color' + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider_name" semantic conventions. It represents the + // name of the service provider that performs the flag evaluation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'Flag Manager' + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") + + // FeatureFlagVariantKey is the attribute Key conforming to the + // "feature_flag.variant" semantic conventions. It represents the sHOULD be + // a semantic identifier for a value. If one is unavailable, a stringified + // version of the value can be used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides + // a means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + // + // A stringified version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + FeatureFlagVariantKey = attribute.Key("feature_flag.variant") +) + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the unique identifier +// of the feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider_name" semantic conventions. It represents the name of +// the service provider that performs the flag evaluation. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagVariant returns an attribute KeyValue conforming to the +// "feature_flag.variant" semantic conventions. It represents the sHOULD be a +// semantic identifier for a value. If one is unavailable, a stringified +// version of the value can be used. +func FeatureFlagVariant(val string) attribute.KeyValue { + return FeatureFlagVariantKey.String(val) +} + +// RPC received/sent message. +const ( + // MessageTypeKey is the attribute Key conforming to the "message.type" + // semantic conventions. It represents the whether this is a received or + // sent message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessageTypeKey = attribute.Key("message.type") + + // MessageIDKey is the attribute Key conforming to the "message.id" + // semantic conventions. It represents the mUST be calculated as two + // different counters starting from `1` one for sent messages and one for + // received message. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Note: This way we guarantee that the values will be consistent between + // different implementations. + MessageIDKey = attribute.Key("message.id") + + // MessageCompressedSizeKey is the attribute Key conforming to the + // "message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + MessageCompressedSizeKey = attribute.Key("message.compressed_size") + + // MessageUncompressedSizeKey is the attribute Key conforming to the + // "message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") +) + +var ( + // sent + MessageTypeSent = MessageTypeKey.String("SENT") + // received + MessageTypeReceived = MessageTypeKey.String("RECEIVED") +) + +// MessageID returns an attribute KeyValue conforming to the "message.id" +// semantic conventions. It represents the mUST be calculated as two different +// counters starting from `1` one for sent messages and one for received +// message. +func MessageID(val int) attribute.KeyValue { + return MessageIDKey.Int(val) +} + +// MessageCompressedSize returns an attribute KeyValue conforming to the +// "message.compressed_size" semantic conventions. It represents the compressed +// size of the message in bytes. +func MessageCompressedSize(val int) attribute.KeyValue { + return MessageCompressedSizeKey.Int(val) +} + +// MessageUncompressedSize returns an attribute KeyValue conforming to the +// "message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func MessageUncompressedSize(val int) attribute.KeyValue { + return MessageUncompressedSizeKey.Int(val) +} + +// The attributes used to report a single exception associated with a span. +const ( + // ExceptionEscapedKey is the attribute Key conforming to the + // "exception.escaped" semantic conventions. It represents the sHOULD be + // set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of + // a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most + // languages. + // + // It is usually not possible to determine at the point where an exception + // is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending + // the span, + // as done in the [example above](#recording-an-exception). + // + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") +) + +// ExceptionEscaped returns an attribute KeyValue conforming to the +// "exception.escaped" semantic conventions. It represents the sHOULD be set to +// true if the exception event is recorded at a point where it is known that +// the exception is escaping the scope of the span. +func ExceptionEscaped(val bool) attribute.KeyValue { + return ExceptionEscapedKey.Bool(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go new file mode 100644 index 000000000..9b8c559de --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go new file mode 100644 index 000000000..d5c4b5c13 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go @@ -0,0 +1,21 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +// HTTP scheme attributes. +var ( + HTTPSchemeHTTP = HTTPSchemeKey.String("http") + HTTPSchemeHTTPS = HTTPSchemeKey.String("https") +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go new file mode 100644 index 000000000..39a2eab3a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go @@ -0,0 +1,2010 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +import "go.opentelemetry.io/otel/attribute" + +// The web browser in which the application represented by the resource is +// running. The `browser.*` attributes MUST be used only for resources that +// represent applications running in a web browser (regardless of whether +// running on a mobile or desktop device). +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserPlatformKey is the attribute Key conforming to the + // "browser.platform" semantic conventions. It represents the platform on + // which the browser is running + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute + // SHOULD be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client + // Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in + // the [`os.type` and `os.name` attributes](./os.md). However, for + // consistency, the values in the `browser.platform` attribute should + // capture the exact value that the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the + // browser is running on a mobile device + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute + // SHOULD be left unset. + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserUserAgentKey is the attribute Key conforming to the + // "browser.user_agent" semantic conventions. It represents the full + // user-agent string provided by the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) + // AppleWebKit/537.36 (KHTML, ' + // 'like Gecko) Chrome/95.0.4638.54 Safari/537.36' + // Note: The user-agent value SHOULD be provided only from browsers that do + // not have a mechanism to retrieve brands and platform individually from + // the User-Agent Client Hints API. To retrieve the value, the legacy + // `navigator.userAgent` API can be used. + BrowserUserAgentKey = attribute.Key("browser.user_agent") + + // BrowserLanguageKey is the attribute Key conforming to the + // "browser.language" semantic conventions. It represents the preferred + // language of the user using the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") +) + +// BrowserBrands returns an attribute KeyValue conforming to the +// "browser.brands" semantic conventions. It represents the array of brand name +// and version separated by a space +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the +// "browser.mobile" semantic conventions. It represents a boolean that is true +// if the browser is running on a mobile device +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserUserAgent returns an attribute KeyValue conforming to the +// "browser.user_agent" semantic conventions. It represents the full user-agent +// string provided by the browser +func BrowserUserAgent(val string) attribute.KeyValue { + return BrowserUserAgentKey.String(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred +// language of the user using the browser +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// A cloud environment (e.g. GCP, Azure, AWS) +const ( + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudAccountIDKey is the attribute Key conforming to the + // "cloud.account.id" semantic conventions. It represents the cloud account + // ID the resource is assigned to. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" + // semantic conventions. It represents the geographical region the resource + // is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for + // example [Alibaba Cloud + // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure + // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), + // [Google Cloud regions](https://cloud.google.com/about/locations), or + // [Tencent Cloud + // regions](https://intl.cloud.tencent.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the + // resource is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // IBM Cloud + CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + CloudPlatformGoogleCloudOpenshift = CloudPlatformKey.String("google_cloud_openshift") + // Red Hat OpenShift on IBM Cloud + CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the +// "cloud.region" semantic conventions. It represents the geographical region +// the resource is running. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container + // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS + // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch + // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the + // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an + // [ECS task + // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the task + // definition family this task definition is a member of. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision + // for this task definition. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container +// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS +// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS +// task +// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the task +// definition family this task definition is a member of. +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// this task definition. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an + // EKS cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// Resources specific to Amazon Web Services. +const ( + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of + // the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like + // multi-container applications, where a single application has sidecar + // containers, and each write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon + // Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) + // of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of + // the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + // One log group can contain several log streams, so these ARNs necessarily + // identify both a log group and a log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") +) + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of +// the AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// A container instance. +const ( + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + + // ContainerIDKey is the attribute Key conforming to the "container.id" + // semantic conventions. It represents the container ID. Usually a UUID, as + // for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container-identification). + // The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of + // the image the container was built on. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageTagKey is the attribute Key conforming to the + // "container.image.tag" semantic conventions. It represents the container + // image tag. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + ContainerImageTagKey = attribute.Key("container.image.tag") +) + +// ContainerName returns an attribute KeyValue conforming to the +// "container.name" semantic conventions. It represents the container name used +// by container runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the +// "container.id" semantic conventions. It represents the container ID. Usually +// a UUID, as for example used to [identify Docker +// containers](https://docs.docker.com/engine/reference/run/#container-identification). +// The UUID might be abbreviated. +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageTag returns an attribute KeyValue conforming to the +// "container.image.tag" semantic conventions. It represents the container +// image tag. +func ContainerImageTag(val string) attribute.KeyValue { + return ContainerImageTagKey.String(val) +} + +// The software deployment. +const ( + // DeploymentEnvironmentKey is the attribute Key conforming to the + // "deployment.environment" semantic conventions. It represents the name of + // the [deployment + // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'staging', 'production' + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// DeploymentEnvironment returns an attribute KeyValue conforming to the +// "deployment.environment" semantic conventions. It represents the name of the +// [deployment +// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka +// deployment tier). +func DeploymentEnvironment(val string) attribute.KeyValue { + return DeploymentEnvironmentKey.String(val) +} + +// The device on which the process represented by this resource is running. +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values + // outlined below. This value is not an advertising identifier and MUST NOT + // be used as such. On iOS (Swift or Objective-C), this value MUST be equal + // to the [vendor + // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). + // On Android (Java or Kotlin), this value MUST be equal to the Firebase + // Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on + // best practices and exact implementation details. Caution should be taken + // when storing personal data or anything which can identify a user. GDPR + // and data protection laws may apply, ensure you do your own due + // diligence. + DeviceIDKey = attribute.Key("device.id") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine readable version + // of the model identifier rather than the market or consumer-friendly name + // of the device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the + // "device.model.name" semantic conventions. It represents the marketing + // name for the device model + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human readable version of + // the device model rather than a machine readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of + // the device manufacturer + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" +// semantic conventions. It represents a unique identifier representing the +// device +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name +// for the device model +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// A serverless instance. +const ( + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this + // runtime instance executes. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the + // FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes) + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The + // following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud + // providers/products: + // + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `faas.id` attribute). + FaaSNameKey = attribute.Key("faas.name") + + // FaaSIDKey is the attribute Key conforming to the "faas.id" semantic + // conventions. It represents the unique ID of the single function that + // this runtime instance executes. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' + // Note: On some cloud providers, it may not be possible to determine the + // full ID at startup, + // so consider setting `faas.id` as a span attribute instead. + // + // The exact value to use for `faas.id` depends on the cloud provider: + // + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias + // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + // with the resolved function version, as the same runtime instance may + // be invokable with + // multiple different aliases. + // * **GCP:** The [URI of the + // resource](https://cloud.google.com/iam/docs/full-resource-names) + // * **Azure:** The [Fully Qualified Resource + // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) + // of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider. + FaaSIDKey = attribute.Key("faas.id") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" + // semantic conventions. It represents the immutable version of the + // function being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + // + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run:** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a + // string, that will be potentially reused for other invocations to the + // same function/function version. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSMaxMemoryKey is the attribute Key conforming to the + // "faas.max_memory" semantic conventions. It represents the amount of + // memory available to the serverless function in MiB. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 128 + // Note: It's recommended to set this attribute since e.g. too little + // memory can easily stop a Java AWS Lambda function from working + // correctly. On AWS Lambda, the environment variable + // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information. + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") +) + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" +// semantic conventions. It represents the name of the single function that +// this runtime instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSID returns an attribute KeyValue conforming to the "faas.id" semantic +// conventions. It represents the unique ID of the single function that this +// runtime instance executes. +func FaaSID(val string) attribute.KeyValue { + return FaaSIDKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the +// "faas.version" semantic conventions. It represents the immutable version of +// the function being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the +// "faas.instance" semantic conventions. It represents the execution +// environment ID as a string, that will be potentially reused for other +// invocations to the same function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function in MiB. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// A host is defined as a general computing instance. +const ( + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be + // the instance_id assigned by the cloud provider. For non-containerized + // Linux systems, the `machine-id` located in `/etc/machine-id` or + // `/var/lib/dbus/machine-id` may be used. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + HostIDKey = attribute.Key("host.id") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified + // hostname, or another name specified by the user. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") + + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is + // running on. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + HostArchKey = attribute.Key("host.arch") + + // HostImageNameKey is the attribute Key conforming to the + // "host.image.name" semantic conventions. It represents the name of the VM + // image or OS install the host was instantiated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the vM image ID. For Cloud, this + // value is from the provider. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version + // string of the VM image as defined in [Version + // Attributes](README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized Linux +// systems, the `machine-id` located in `/etc/machine-id` or +// `/var/lib/dbus/machine-id` may be used. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" +// semantic conventions. It represents the name of the host. On Unix systems, +// it may contain what the hostname command returns, or the fully qualified +// hostname, or another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" +// semantic conventions. It represents the type of host. For Cloud, this must +// be the machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM +// image or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the +// "host.image.id" semantic conventions. It represents the vM image ID. For +// Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string +// of the VM image as defined in [Version +// Attributes](README.md#version-attributes). +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// A Kubernetes Cluster. +const ( + // K8SClusterNameKey is the attribute Key conforming to the + // "k8s.cluster.name" semantic conventions. It represents the name of the + // cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// A Kubernetes Node object. +const ( + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" + // semantic conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") +) + +// K8SNodeName returns an attribute KeyValue conforming to the +// "k8s.node.name" semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// A Kubernetes Namespace. +const ( + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") +) + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// A Kubernetes Pod object. +const ( + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" + // semantic conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" + // semantic conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") +) + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// A container in a +// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). +const ( + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the + // number of times the container was restarted. This attribute can be used + // to identify a particular container (running or stopped) within a + // container spec. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") +) + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify +// a particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// A Kubernetes ReplicaSet object. +const ( + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of + // the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") +) + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// A Kubernetes Deployment object. +const ( + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of + // the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") +) + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// A Kubernetes StatefulSet object. +const ( + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of + // the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") +) + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// A Kubernetes DaemonSet object. +const ( + // K8SDaemonSetUIDKey is the attribute Key conforming to the + // "k8s.daemonset.uid" semantic conventions. It represents the UID of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") +) + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// A Kubernetes Job object. +const ( + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" + // semantic conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" + // semantic conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") +) + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// A Kubernetes CronJob object. +const ( + // K8SCronJobUIDKey is the attribute Key conforming to the + // "k8s.cronjob.uid" semantic conventions. It represents the UID of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SCronJobNameKey is the attribute Key conforming to the + // "k8s.cronjob.name" semantic conventions. It represents the name of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") +) + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the +// CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + OSTypeKey = attribute.Key("os.type") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to + // be parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 + // LTS' + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + + // OSVersionKey is the attribute Key conforming to the "os.version" + // semantic conventions. It represents the version string of the operating + // system as defined in [Version + // Attributes](../../resource/semantic_conventions/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// OSDescription returns an attribute KeyValue conforming to the +// "os.description" semantic conventions. It represents the human readable (not +// intended to be parsed) OS version information, like e.g. reported by `ver` +// or `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating +// system as defined in [Version +// Attributes](../../resource/semantic_conventions/README.md#version-attributes). +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// An operating system process. +const ( + // ProcessPIDKey is the attribute Key conforming to the "process.pid" + // semantic conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent + // Process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name + // of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name + // of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full + // path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessCommandKey is the attribute Key conforming to the + // "process.command" semantic conventions. It represents the command used + // to launch the process (i.e. the command name). On Linux based systems, + // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can + // be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full + // command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. + // Do not set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, + // this would be the full argv vector passed to `main`. + // + // Type: string[] + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns + // the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") +) + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of +// the process executable. On Linux based systems, can be set to the `Name` in +// `proc/[pid]/status`. On Windows, can be set to the base name of +// `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path +// to the process executable. On Linux based systems, can be set to the target +// of `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be +// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to +// the first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this +// if you have to assemble it just for monitoring; use `process.command_args` +// instead. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) as received by +// the process. On Linux-based systems (and some other Unixoid systems +// supporting procfs), can be set according to the list of null-delimited +// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, +// this would be the full argv vector passed to `main`. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the +// "process.owner" semantic conventions. It represents the username of the user +// that owns the process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// The single (language) runtime instance which is monitored. +const ( + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of + // the runtime of this process. For compiled native binaries, this SHOULD + // be the name of the compiler. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the + // version of the runtime of this process, as returned by the runtime + // without modification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") +) + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. For compiled native binaries, this SHOULD be the +// name of the compiler. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without +// modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// A service instance. +const ( + // ServiceNameKey is the attribute Key conforming to the "service.name" + // semantic conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled + // services. If the value was not specified, SDKs MUST fallback to + // `unknown_service:` concatenated with + // [`process.executable.name`](process.md#process), e.g. + // `unknown_service:bash`. If `process.executable.name` is not available, + // the value MUST be set to `unknown_service`. + ServiceNameKey = attribute.Key("service.name") + + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group + // of services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` + // is expected to be unique for all services that have no explicit + // namespace defined (so the empty/unspecified namespace is simply one more + // valid namespace). Zero-length namespace string is assumed equal to + // unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID + // of the service instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be + // globally unique). The ID helps to distinguish instances of the same + // service that exist at the same time (e.g. instances of a horizontally + // scaled service). It is preferable for the ID to be persistent and stay + // the same for the lifetime of the service instance, however it is + // acceptable that the ID is ephemeral and changes during important + // lifetime events for the service (e.g. service restarts). If the service + // has no inherent unique ID that can be used as the value of this + // attribute it is recommended to generate a random Version 1 or Version 4 + // RFC 4122 UUID (services aiming for reproducible UUIDs may also use + // Version 5, see RFC 4122 for more recommendations). + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceVersionKey is the attribute Key conforming to the + // "service.version" semantic conventions. It represents the version string + // of the service API or implementation. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2.0.0' + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceName returns an attribute KeyValue conforming to the +// "service.name" semantic conventions. It represents the logical name of the +// service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of +// the service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the + // language of the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") + + // TelemetryAutoVersionKey is the attribute Key conforming to the + // "telemetry.auto.version" semantic conventions. It represents the version + // string of the auto instrumentation agent, if used. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.2.3' + TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") +) + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version +// string of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// TelemetryAutoVersion returns an attribute KeyValue conforming to the +// "telemetry.auto.version" semantic conventions. It represents the version +// string of the auto instrumentation agent, if used. +func TelemetryAutoVersion(val string) attribute.KeyValue { + return TelemetryAutoVersionKey.String(val) +} + +// Resource describing the packaged software running the application code. Web +// engines are typically executed using process.runtime. +const ( + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of + // the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") + + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the + // additional description of the web engine (e.g. detailed version and + // edition information). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") +) + +// WebEngineName returns an attribute KeyValue conforming to the +// "webengine.name" semantic conventions. It represents the name of the web +// engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the +// web engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition +// information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // OtelScopeNameKey is the attribute Key conforming to the + // "otel.scope.name" semantic conventions. It represents the name of the + // instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + OtelScopeNameKey = attribute.Key("otel.scope.name") + + // OtelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of + // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0.0' + OtelScopeVersionKey = attribute.Key("otel.scope.version") +) + +// OtelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OtelScopeName(val string) attribute.KeyValue { + return OtelScopeNameKey.String(val) +} + +// OtelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OtelScopeVersion(val string) attribute.KeyValue { + return OtelScopeVersionKey.String(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry +// Scope's concepts. +const ( + // OtelLibraryNameKey is the attribute Key conforming to the + // "otel.library.name" semantic conventions. It represents the deprecated, + // use the `otel.scope.name` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'io.opentelemetry.contrib.mongodb' + OtelLibraryNameKey = attribute.Key("otel.library.name") + + // OtelLibraryVersionKey is the attribute Key conforming to the + // "otel.library.version" semantic conventions. It represents the + // deprecated, use the `otel.scope.version` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '1.0.0' + OtelLibraryVersionKey = attribute.Key("otel.library.version") +) + +// OtelLibraryName returns an attribute KeyValue conforming to the +// "otel.library.name" semantic conventions. It represents the deprecated, use +// the `otel.scope.name` attribute. +func OtelLibraryName(val string) attribute.KeyValue { + return OtelLibraryNameKey.String(val) +} + +// OtelLibraryVersion returns an attribute KeyValue conforming to the +// "otel.library.version" semantic conventions. It represents the deprecated, +// use the `otel.scope.version` attribute. +func OtelLibraryVersion(val string) attribute.KeyValue { + return OtelLibraryVersionKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go new file mode 100644 index 000000000..42fc525d1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.17.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go new file mode 100644 index 000000000..8c4a7299d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go @@ -0,0 +1,3375 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +import "go.opentelemetry.io/otel/attribute" + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the + // exception should be preferred over the static type in languages that + // support it. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") + + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str + // implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace + // as a string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") +) + +// ExceptionType returns an attribute KeyValue conforming to the +// "exception.type" semantic conventions. It represents the type of the +// exception (its fully-qualified class name, if applicable). The dynamic type +// of the exception should be preferred over the static type in languages that +// support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception +// message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// Attributes for Events represented using Log Records. +const ( + // EventNameKey is the attribute Key conforming to the "event.name" + // semantic conventions. It represents the name identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'click', 'exception' + EventNameKey = attribute.Key("event.name") + + // EventDomainKey is the attribute Key conforming to the "event.domain" + // semantic conventions. It represents the domain identifies the business + // context for the events. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: Events across different domains may have same `event.name`, yet be + // unrelated events. + EventDomainKey = attribute.Key("event.domain") +) + +var ( + // Events from browser apps + EventDomainBrowser = EventDomainKey.String("browser") + // Events from mobile apps + EventDomainDevice = EventDomainKey.String("device") + // Events from Kubernetes + EventDomainK8S = EventDomainKey.String("k8s") +) + +// EventName returns an attribute KeyValue conforming to the "event.name" +// semantic conventions. It represents the name identifies the event. +func EventName(val string) attribute.KeyValue { + return EventNameKey.String(val) +} + +// Span attributes used by AWS Lambda (in addition to general `faas` +// attributes). +const ( + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full + // invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `faas.id` if an alias is involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full +// invoked ARN as provided on the `Context` passed to the function +// (`Lambda-Runtime-Invoked-Function-ARN` header on the +// `/runtime/invocation/next` applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// Attributes for CloudEvents. CloudEvents is a specification on how to define +// event data in a standard way. These attributes can be attached to spans when +// performing operations with CloudEvents, regardless of the protocol being +// used. +const ( + // CloudeventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the + // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudeventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the + // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://github.com/cloudevents', + // '/cloudevents/spec/pull/123', 'my-service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudeventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents + // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) + // which the event uses. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudeventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the + // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.github.pull_request.opened', + // 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") + + // CloudeventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the + // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) + // of the event in the context of the event producer (identified by + // source). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") +) + +// CloudeventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the +// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) +// uniquely identifies the event. +func CloudeventsEventID(val string) attribute.KeyValue { + return CloudeventsEventIDKey.String(val) +} + +// CloudeventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the +// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) +// identifies the context in which an event happened. +func CloudeventsEventSource(val string) attribute.KeyValue { + return CloudeventsEventSourceKey.String(val) +} + +// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to +// the "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents +// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) +// which the event uses. +func CloudeventsEventSpecVersion(val string) attribute.KeyValue { + return CloudeventsEventSpecVersionKey.String(val) +} + +// CloudeventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the +// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) +// contains a value describing the type of event related to the originating +// occurrence. +func CloudeventsEventType(val string) attribute.KeyValue { + return CloudeventsEventTypeKey.String(val) +} + +// CloudeventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the +// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) +// of the event in the context of the event producer (identified by source). +func CloudeventsEventSubject(val string) attribute.KeyValue { + return CloudeventsEventSubjectKey.String(val) +} + +// Semantic conventions for the OpenTracing Shim +const ( + // OpentracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the + // parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span does not depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// The attributes used to perform database client calls. +const ( + // DBSystemKey is the attribute Key conforming to the "db.system" semantic + // conventions. It represents an identifier for the database management + // system (DBMS) product being used. See below for a list of well-known + // identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + DBSystemKey = attribute.Key("db.system") + + // DBConnectionStringKey is the attribute Key conforming to the + // "db.connection_string" semantic conventions. It represents the + // connection string used to connect to the database. It is recommended to + // remove embedded credentials. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + DBConnectionStringKey = attribute.Key("db.connection_string") + + // DBUserKey is the attribute Key conforming to the "db.user" semantic + // conventions. It represents the username for accessing the database. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'readonly_user', 'reporting_user' + DBUserKey = attribute.Key("db.user") + + // DBJDBCDriverClassnameKey is the attribute Key conforming to the + // "db.jdbc.driver_classname" semantic conventions. It represents the + // fully-qualified class name of the [Java Database Connectivity + // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) + // driver used to connect. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") + + // DBNameKey is the attribute Key conforming to the "db.name" semantic + // conventions. It represents the this attribute is used to report the name + // of the database being accessed. For commands that switch the database, + // this should be set to the target database (even if the command fails). + // + // Type: string + // RequirementLevel: ConditionallyRequired (If applicable.) + // Stability: stable + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called + // "schema name". In case there are multiple layers that could be + // considered for database name (e.g. Oracle instance name and schema + // name), the database name to be used is the more specific layer (e.g. + // Oracle schema name). + DBNameKey = attribute.Key("db.name") + + // DBStatementKey is the attribute Key conforming to the "db.statement" + // semantic conventions. It represents the database statement being + // executed. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If applicable and not + // explicitly disabled via instrumentation configuration.) + // Stability: stable + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + // Note: The value may be sanitized to exclude sensitive information. + DBStatementKey = attribute.Key("db.statement") + + // DBOperationKey is the attribute Key conforming to the "db.operation" + // semantic conventions. It represents the name of the operation being + // executed, e.g. the [MongoDB command + // name](https://docs.mongodb.com/manual/reference/command/#database-operations) + // such as `findAndModify`, or the SQL keyword. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If `db.statement` is not + // applicable.) + // Stability: stable + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to + // attempt any client-side parsing of `db.statement` just to get this + // property, but it should be set if the operation name is provided by the + // library being instrumented. If the SQL statement has an ambiguous + // operation, or performs more than one operation, this value may be + // omitted. + DBOperationKey = attribute.Key("db.operation") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") + // OpenSearch + DBSystemOpensearch = DBSystemKey.String("opensearch") + // ClickHouse + DBSystemClickhouse = DBSystemKey.String("clickhouse") +) + +// DBConnectionString returns an attribute KeyValue conforming to the +// "db.connection_string" semantic conventions. It represents the connection +// string used to connect to the database. It is recommended to remove embedded +// credentials. +func DBConnectionString(val string) attribute.KeyValue { + return DBConnectionStringKey.String(val) +} + +// DBUser returns an attribute KeyValue conforming to the "db.user" semantic +// conventions. It represents the username for accessing the database. +func DBUser(val string) attribute.KeyValue { + return DBUserKey.String(val) +} + +// DBJDBCDriverClassname returns an attribute KeyValue conforming to the +// "db.jdbc.driver_classname" semantic conventions. It represents the +// fully-qualified class name of the [Java Database Connectivity +// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver +// used to connect. +func DBJDBCDriverClassname(val string) attribute.KeyValue { + return DBJDBCDriverClassnameKey.String(val) +} + +// DBName returns an attribute KeyValue conforming to the "db.name" semantic +// conventions. It represents the this attribute is used to report the name of +// the database being accessed. For commands that switch the database, this +// should be set to the target database (even if the command fails). +func DBName(val string) attribute.KeyValue { + return DBNameKey.String(val) +} + +// DBStatement returns an attribute KeyValue conforming to the +// "db.statement" semantic conventions. It represents the database statement +// being executed. +func DBStatement(val string) attribute.KeyValue { + return DBStatementKey.String(val) +} + +// DBOperation returns an attribute KeyValue conforming to the +// "db.operation" semantic conventions. It represents the name of the operation +// being executed, e.g. the [MongoDB command +// name](https://docs.mongodb.com/manual/reference/command/#database-operations) +// such as `findAndModify`, or the SQL keyword. +func DBOperation(val string) attribute.KeyValue { + return DBOperationKey.String(val) +} + +// Connection-level attributes for Microsoft SQL Server +const ( + // DBMSSQLInstanceNameKey is the attribute Key conforming to the + // "db.mssql.instance_name" semantic conventions. It represents the + // Microsoft SQL Server [instance + // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) + // connecting to. This name is used to determine the port of a named + // instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MSSQLSERVER' + // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no + // longer required (but still recommended if non-standard). + DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") +) + +// DBMSSQLInstanceName returns an attribute KeyValue conforming to the +// "db.mssql.instance_name" semantic conventions. It represents the Microsoft +// SQL Server [instance +// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) +// connecting to. This name is used to determine the port of a named instance. +func DBMSSQLInstanceName(val string) attribute.KeyValue { + return DBMSSQLInstanceNameKey.String(val) +} + +// Call-level attributes for Cassandra +const ( + // DBCassandraPageSizeKey is the attribute Key conforming to the + // "db.cassandra.page_size" semantic conventions. It represents the fetch + // size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + + // DBCassandraConsistencyLevelKey is the attribute Key conforming to the + // "db.cassandra.consistency_level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + + // DBCassandraTableKey is the attribute Key conforming to the + // "db.cassandra.table" semantic conventions. It represents the name of the + // primary table that the operation is acting upon, including the keyspace + // name (if applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra + // rather than sql. It is not recommended to attempt any client-side + // parsing of `db.statement` just to get this property, but it should be + // set if it is provided by the library being instrumented. If the + // operation is acting upon an anonymous table, or more than one table, + // this value MUST NOT be set. + DBCassandraTableKey = attribute.Key("db.cassandra.table") + + // DBCassandraIdempotenceKey is the attribute Key conforming to the + // "db.cassandra.idempotence" semantic conventions. It represents the + // whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + + // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming + // to the "db.cassandra.speculative_execution_count" semantic conventions. + // It represents the number of times a query was speculatively executed. + // Not set or `0` if the query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") + + // DBCassandraCoordinatorIDKey is the attribute Key conforming to the + // "db.cassandra.coordinator.id" semantic conventions. It represents the ID + // of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + + // DBCassandraCoordinatorDCKey is the attribute Key conforming to the + // "db.cassandra.coordinator.dc" semantic conventions. It represents the + // data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +// DBCassandraPageSize returns an attribute KeyValue conforming to the +// "db.cassandra.page_size" semantic conventions. It represents the fetch size +// used for paging, i.e. how many rows will be returned at once. +func DBCassandraPageSize(val int) attribute.KeyValue { + return DBCassandraPageSizeKey.Int(val) +} + +// DBCassandraTable returns an attribute KeyValue conforming to the +// "db.cassandra.table" semantic conventions. It represents the name of the +// primary table that the operation is acting upon, including the keyspace name +// (if applicable). +func DBCassandraTable(val string) attribute.KeyValue { + return DBCassandraTableKey.String(val) +} + +// DBCassandraIdempotence returns an attribute KeyValue conforming to the +// "db.cassandra.idempotence" semantic conventions. It represents the whether +// or not the query is idempotent. +func DBCassandraIdempotence(val bool) attribute.KeyValue { + return DBCassandraIdempotenceKey.Bool(val) +} + +// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue +// conforming to the "db.cassandra.speculative_execution_count" semantic +// conventions. It represents the number of times a query was speculatively +// executed. Not set or `0` if the query was not executed speculatively. +func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return DBCassandraSpeculativeExecutionCountKey.Int(val) +} + +// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of +// the coordinating node for a query. +func DBCassandraCoordinatorID(val string) attribute.KeyValue { + return DBCassandraCoordinatorIDKey.String(val) +} + +// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.dc" semantic conventions. It represents the data +// center of the coordinating node for a query. +func DBCassandraCoordinatorDC(val string) attribute.KeyValue { + return DBCassandraCoordinatorDCKey.String(val) +} + +// Call-level attributes for Redis +const ( + // DBRedisDBIndexKey is the attribute Key conforming to the + // "db.redis.database_index" semantic conventions. It represents the index + // of the database being accessed as used in the [`SELECT` + // command](https://redis.io/commands/select), provided as an integer. To + // be used instead of the generic `db.name` attribute. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If other than the default + // database (`0`).) + // Stability: stable + // Examples: 0, 1, 15 + DBRedisDBIndexKey = attribute.Key("db.redis.database_index") +) + +// DBRedisDBIndex returns an attribute KeyValue conforming to the +// "db.redis.database_index" semantic conventions. It represents the index of +// the database being accessed as used in the [`SELECT` +// command](https://redis.io/commands/select), provided as an integer. To be +// used instead of the generic `db.name` attribute. +func DBRedisDBIndex(val int) attribute.KeyValue { + return DBRedisDBIndexKey.Int(val) +} + +// Call-level attributes for MongoDB +const ( + // DBMongoDBCollectionKey is the attribute Key conforming to the + // "db.mongodb.collection" semantic conventions. It represents the + // collection being accessed within the database stated in `db.name`. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'customers', 'products' + DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") +) + +// DBMongoDBCollection returns an attribute KeyValue conforming to the +// "db.mongodb.collection" semantic conventions. It represents the collection +// being accessed within the database stated in `db.name`. +func DBMongoDBCollection(val string) attribute.KeyValue { + return DBMongoDBCollectionKey.String(val) +} + +// Call-level attributes for SQL databases +const ( + // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" + // semantic conventions. It represents the name of the primary table that + // the operation is acting upon, including the database name (if + // applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting + // upon an anonymous table, or more than one table, this value MUST NOT be + // set. + DBSQLTableKey = attribute.Key("db.sql.table") +) + +// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" +// semantic conventions. It represents the name of the primary table that the +// operation is acting upon, including the database name (if applicable). +func DBSQLTable(val string) attribute.KeyValue { + return DBSQLTableKey.String(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's +// concepts. +const ( + // OtelStatusCodeKey is the attribute Key conforming to the + // "otel.status_code" semantic conventions. It represents the name of the + // code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + OtelStatusCodeKey = attribute.Key("otel.status_code") + + // OtelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the + // description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'resource not found' + OtelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OtelStatusCodeOk = OtelStatusCodeKey.String("OK") + // The operation contains an error + OtelStatusCodeError = OtelStatusCodeKey.String("ERROR") +) + +// OtelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the +// description of the Status if it has a value, otherwise not set. +func OtelStatusDescription(val string) attribute.KeyValue { + return OtelStatusDescriptionKey.String(val) +} + +// This semantic convention describes an instance of a function that runs +// without provisioning or managing of servers (also known as serverless +// functions or Function as a Service (FaaS)) with spans. +const ( + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" + // semantic conventions. It represents the type of the trigger which caused + // this function execution. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: For the server/consumer span on the incoming side, + // `faas.trigger` MUST be set. + // + // Clients invoking FaaS instances usually cannot set `faas.trigger`, + // since they would typically need to look in the payload to determine + // the event type. If clients set it, it should be the same as the + // trigger that corresponding incoming would have (i.e., this has + // nothing to do with the underlying transport used to make the API + // call to invoke the lambda, which is often HTTP). + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSExecutionKey is the attribute Key conforming to the "faas.execution" + // semantic conventions. It represents the execution ID of the current + // function execution. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSExecutionKey = attribute.Key("faas.execution") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// FaaSExecution returns an attribute KeyValue conforming to the +// "faas.execution" semantic conventions. It represents the execution ID of the +// current function execution. +func FaaSExecution(val string) attribute.KeyValue { + return FaaSExecutionKey.String(val) +} + +// Semantic Convention for FaaS triggered as a response to some data source +// operation such as a database or filesystem read/write. +const ( + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name + // of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in + // Cosmos DB to the database name. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the + // describes the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string + // containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or + // S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of +// the source on which the triggering operation was performed. For example, in +// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the +// database name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 +// is the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// Semantic Convention for FaaS scheduled to be executed regularly. +const ( + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation + // time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron + // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") +) + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" +// semantic conventions. It represents a string containing the function +// invocation time in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" +// semantic conventions. It represents a string containing the schedule period +// as [Cron +// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// Contains additional attributes for incoming FaaS spans. +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the + // serverless function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + FaaSColdstartKey = attribute.Key("faas.coldstart") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the +// "faas.coldstart" semantic conventions. It represents a boolean that is true +// if the serverless function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// Contains additional attributes for outgoing FaaS spans. +const ( + // FaaSInvokedNameKey is the attribute Key conforming to the + // "faas.invoked_name" semantic conventions. It represents the name of the + // invoked function. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the + // invoked function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud + // region of the invoked function. + // + // Type: string + // RequirementLevel: ConditionallyRequired (For some cloud providers, like + // AWS or GCP, the region in which a function is hosted is essential to + // uniquely identify the function and also part of its endpoint. Since it's + // part of the endpoint being called, the region is always known to + // clients. In these cases, `faas.invoked_region` MUST be set accordingly. + // If the region is unknown to the client or not required for identifying + // the invoked function, setting `faas.invoked_region` is optional.) + // Stability: stable + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the + // invoked function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region +// of the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetTransportKey is the attribute Key conforming to the "net.transport" + // semantic conventions. It represents the transport protocol used. See + // note below. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + NetTransportKey = attribute.Key("net.transport") + + // NetAppProtocolNameKey is the attribute Key conforming to the + // "net.app.protocol.name" semantic conventions. It represents the + // application layer protocol used. The value SHOULD be normalized to + // lowercase. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + NetAppProtocolNameKey = attribute.Key("net.app.protocol.name") + + // NetAppProtocolVersionKey is the attribute Key conforming to the + // "net.app.protocol.version" semantic conventions. It represents the + // version of the application layer protocol used. See note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '3.1.1' + // Note: `net.app.protocol.version` refers to the version of the protocol + // used and might be different from the protocol client's version. If the + // HTTP client used has a version of `0.27.2`, but sends HTTP version + // `1.1`, this attribute should be set to `1.1`. + NetAppProtocolVersionKey = attribute.Key("net.app.protocol.version") + + // NetSockPeerNameKey is the attribute Key conforming to the + // "net.sock.peer.name" semantic conventions. It represents the remote + // socket peer name. + // + // Type: string + // RequirementLevel: Recommended (If available and different from + // `net.peer.name` and if `net.sock.peer.addr` is set.) + // Stability: stable + // Examples: 'proxy.example.com' + NetSockPeerNameKey = attribute.Key("net.sock.peer.name") + + // NetSockPeerAddrKey is the attribute Key conforming to the + // "net.sock.peer.addr" semantic conventions. It represents the remote + // socket peer address: IPv4 or IPv6 for internet protocols, path for local + // communication, + // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '127.0.0.1', '/tmp/mysql.sock' + NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") + + // NetSockPeerPortKey is the attribute Key conforming to the + // "net.sock.peer.port" semantic conventions. It represents the remote + // socket peer port. + // + // Type: int + // RequirementLevel: Recommended (If defined for the address family and if + // different than `net.peer.port` and if `net.sock.peer.addr` is set.) + // Stability: stable + // Examples: 16456 + NetSockPeerPortKey = attribute.Key("net.sock.peer.port") + + // NetSockFamilyKey is the attribute Key conforming to the + // "net.sock.family" semantic conventions. It represents the protocol + // [address + // family](https://man7.org/linux/man-pages/man7/address_families.7.html) + // which is used for communication. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (If different than `inet` and if + // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers + // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in + // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support + // instrumentations that follow previous versions of this document.) + // Stability: stable + // Examples: 'inet6', 'bluetooth' + NetSockFamilyKey = attribute.Key("net.sock.family") + + // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" + // semantic conventions. It represents the logical remote hostname, see + // note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'example.com' + // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an + // extra DNS lookup. + NetPeerNameKey = attribute.Key("net.peer.name") + + // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" + // semantic conventions. It represents the logical remote port number + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 80, 8080, 443 + NetPeerPortKey = attribute.Key("net.peer.port") + + // NetHostNameKey is the attribute Key conforming to the "net.host.name" + // semantic conventions. It represents the logical local hostname or + // similar, see note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'localhost' + NetHostNameKey = attribute.Key("net.host.name") + + // NetHostPortKey is the attribute Key conforming to the "net.host.port" + // semantic conventions. It represents the logical local port number, + // preferably the one that the peer used to connect + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 8080 + NetHostPortKey = attribute.Key("net.host.port") + + // NetSockHostAddrKey is the attribute Key conforming to the + // "net.sock.host.addr" semantic conventions. It represents the local + // socket address. Useful in case of a multi-IP host. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '192.168.0.1' + NetSockHostAddrKey = attribute.Key("net.sock.host.addr") + + // NetSockHostPortKey is the attribute Key conforming to the + // "net.sock.host.port" semantic conventions. It represents the local + // socket port number. + // + // Type: int + // RequirementLevel: Recommended (If defined for the address family and if + // different than `net.host.port` and if `net.sock.host.addr` is set.) + // Stability: stable + // Examples: 35555 + NetSockHostPortKey = attribute.Key("net.sock.host.port") + + // NetHostConnectionTypeKey is the attribute Key conforming to the + // "net.host.connection.type" semantic conventions. It represents the + // internet connection type currently being used by the host. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'wifi' + NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") + + // NetHostConnectionSubtypeKey is the attribute Key conforming to the + // "net.host.connection.subtype" semantic conventions. It represents the + // this describes more details regarding the connection.type. It may be the + // type of cell technology connection, but it could be used for describing + // details about a wifi connection. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'LTE' + NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") + + // NetHostCarrierNameKey is the attribute Key conforming to the + // "net.host.carrier.name" semantic conventions. It represents the name of + // the mobile carrier. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'sprint' + NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") + + // NetHostCarrierMccKey is the attribute Key conforming to the + // "net.host.carrier.mcc" semantic conventions. It represents the mobile + // carrier country code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '310' + NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") + + // NetHostCarrierMncKey is the attribute Key conforming to the + // "net.host.carrier.mnc" semantic conventions. It represents the mobile + // carrier network code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '001' + NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") + + // NetHostCarrierIccKey is the attribute Key conforming to the + // "net.host.carrier.icc" semantic conventions. It represents the ISO + // 3166-1 alpha-2 2-character country code associated with the mobile + // carrier network. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'DE' + NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") +) + +var ( + // ip_tcp + NetTransportTCP = NetTransportKey.String("ip_tcp") + // ip_udp + NetTransportUDP = NetTransportKey.String("ip_udp") + // Named or anonymous pipe. See note below + NetTransportPipe = NetTransportKey.String("pipe") + // In-process communication + NetTransportInProc = NetTransportKey.String("inproc") + // Something else (non IP-based) + NetTransportOther = NetTransportKey.String("other") +) + +var ( + // IPv4 address + NetSockFamilyInet = NetSockFamilyKey.String("inet") + // IPv6 address + NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") + // Unix domain socket path + NetSockFamilyUnix = NetSockFamilyKey.String("unix") +) + +var ( + // wifi + NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") + // wired + NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") + // cell + NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") + // unavailable + NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") + // unknown + NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") +) + +var ( + // GPRS + NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") + // EDGE + NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") + // UMTS + NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") + // CDMA + NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") + // HSPA + NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") + // IDEN + NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") + // LTE + NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") + // EHRPD + NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") + // GSM + NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") +) + +// NetAppProtocolName returns an attribute KeyValue conforming to the +// "net.app.protocol.name" semantic conventions. It represents the application +// layer protocol used. The value SHOULD be normalized to lowercase. +func NetAppProtocolName(val string) attribute.KeyValue { + return NetAppProtocolNameKey.String(val) +} + +// NetAppProtocolVersion returns an attribute KeyValue conforming to the +// "net.app.protocol.version" semantic conventions. It represents the version +// of the application layer protocol used. See note below. +func NetAppProtocolVersion(val string) attribute.KeyValue { + return NetAppProtocolVersionKey.String(val) +} + +// NetSockPeerName returns an attribute KeyValue conforming to the +// "net.sock.peer.name" semantic conventions. It represents the remote socket +// peer name. +func NetSockPeerName(val string) attribute.KeyValue { + return NetSockPeerNameKey.String(val) +} + +// NetSockPeerAddr returns an attribute KeyValue conforming to the +// "net.sock.peer.addr" semantic conventions. It represents the remote socket +// peer address: IPv4 or IPv6 for internet protocols, path for local +// communication, +// [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). +func NetSockPeerAddr(val string) attribute.KeyValue { + return NetSockPeerAddrKey.String(val) +} + +// NetSockPeerPort returns an attribute KeyValue conforming to the +// "net.sock.peer.port" semantic conventions. It represents the remote socket +// peer port. +func NetSockPeerPort(val int) attribute.KeyValue { + return NetSockPeerPortKey.Int(val) +} + +// NetPeerName returns an attribute KeyValue conforming to the +// "net.peer.name" semantic conventions. It represents the logical remote +// hostname, see note below. +func NetPeerName(val string) attribute.KeyValue { + return NetPeerNameKey.String(val) +} + +// NetPeerPort returns an attribute KeyValue conforming to the +// "net.peer.port" semantic conventions. It represents the logical remote port +// number +func NetPeerPort(val int) attribute.KeyValue { + return NetPeerPortKey.Int(val) +} + +// NetHostName returns an attribute KeyValue conforming to the +// "net.host.name" semantic conventions. It represents the logical local +// hostname or similar, see note below. +func NetHostName(val string) attribute.KeyValue { + return NetHostNameKey.String(val) +} + +// NetHostPort returns an attribute KeyValue conforming to the +// "net.host.port" semantic conventions. It represents the logical local port +// number, preferably the one that the peer used to connect +func NetHostPort(val int) attribute.KeyValue { + return NetHostPortKey.Int(val) +} + +// NetSockHostAddr returns an attribute KeyValue conforming to the +// "net.sock.host.addr" semantic conventions. It represents the local socket +// address. Useful in case of a multi-IP host. +func NetSockHostAddr(val string) attribute.KeyValue { + return NetSockHostAddrKey.String(val) +} + +// NetSockHostPort returns an attribute KeyValue conforming to the +// "net.sock.host.port" semantic conventions. It represents the local socket +// port number. +func NetSockHostPort(val int) attribute.KeyValue { + return NetSockHostPortKey.Int(val) +} + +// NetHostCarrierName returns an attribute KeyValue conforming to the +// "net.host.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetHostCarrierName(val string) attribute.KeyValue { + return NetHostCarrierNameKey.String(val) +} + +// NetHostCarrierMcc returns an attribute KeyValue conforming to the +// "net.host.carrier.mcc" semantic conventions. It represents the mobile +// carrier country code. +func NetHostCarrierMcc(val string) attribute.KeyValue { + return NetHostCarrierMccKey.String(val) +} + +// NetHostCarrierMnc returns an attribute KeyValue conforming to the +// "net.host.carrier.mnc" semantic conventions. It represents the mobile +// carrier network code. +func NetHostCarrierMnc(val string) attribute.KeyValue { + return NetHostCarrierMncKey.String(val) +} + +// NetHostCarrierIcc returns an attribute KeyValue conforming to the +// "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetHostCarrierIcc(val string) attribute.KeyValue { + return NetHostCarrierIccKey.String(val) +} + +// Operations that access some remote service. +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" + // semantic conventions. It represents the + // [`service.name`](../../resource/semantic_conventions/README.md#service) + // of the remote service. SHOULD be equal to the actual `service.name` + // resource attribute of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the +// "peer.service" semantic conventions. It represents the +// [`service.name`](../../resource/semantic_conventions/README.md#service) of +// the remote service. SHOULD be equal to the actual `service.name` resource +// attribute of the remote service if any. +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// These attributes may be used for any operation with an authenticated and/or +// authorized enduser. +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" + // semantic conventions. It represents the username or client_id extracted + // from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header + // in the inbound request from outside the system. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserRoleKey is the attribute Key conforming to the "enduser.role" + // semantic conventions. It represents the actual/assumed role the client + // is making the request under extracted from token or application security + // context. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + + // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" + // semantic conventions. It represents the scopes or granted authorities + // the client currently possesses extracted from token or application + // security context. The value would come from the scope associated with an + // [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute + // value in a [SAML 2.0 + // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the username or client_id extracted from +// the access token or +// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in +// the inbound request from outside the system. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserRole returns an attribute KeyValue conforming to the +// "enduser.role" semantic conventions. It represents the actual/assumed role +// the client is making the request under extracted from token or application +// security context. +func EnduserRole(val string) attribute.KeyValue { + return EnduserRoleKey.String(val) +} + +// EnduserScope returns an attribute KeyValue conforming to the +// "enduser.scope" semantic conventions. It represents the scopes or granted +// authorities the client currently possesses extracted from token or +// application security context. The value would come from the scope associated +// with an [OAuth 2.0 Access +// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute +// value in a [SAML 2.0 +// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). +func EnduserScope(val string) attribute.KeyValue { + return EnduserScopeKey.String(val) +} + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed + // to OS thread ID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" + // semantic conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" +// semantic conventions. It represents the current "managed" thread ID (as +// opposed to OS thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // CodeFunctionKey is the attribute Key conforming to the "code.function" + // semantic conventions. It represents the method or function name, or + // equivalent (usually rightmost part of the code unit's name). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + + // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" + // semantic conventions. It represents the "namespace" within which + // `code.function` is defined. Usually the qualified class or module name, + // such that `code.namespace` + some separator + `code.function` form a + // unique identifier for the code unit. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + + // CodeFilepathKey is the attribute Key conforming to the "code.filepath" + // semantic conventions. It represents the source code file name that + // identifies the code unit as uniquely as possible (preferably an absolute + // file path). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + + // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" + // semantic conventions. It represents the line number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") + + // CodeColumnKey is the attribute Key conforming to the "code.column" + // semantic conventions. It represents the column number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 16 + CodeColumnKey = attribute.Key("code.column") +) + +// CodeFunction returns an attribute KeyValue conforming to the +// "code.function" semantic conventions. It represents the method or function +// name, or equivalent (usually rightmost part of the code unit's name). +func CodeFunction(val string) attribute.KeyValue { + return CodeFunctionKey.String(val) +} + +// CodeNamespace returns an attribute KeyValue conforming to the +// "code.namespace" semantic conventions. It represents the "namespace" within +// which `code.function` is defined. Usually the qualified class or module +// name, such that `code.namespace` + some separator + `code.function` form a +// unique identifier for the code unit. +func CodeNamespace(val string) attribute.KeyValue { + return CodeNamespaceKey.String(val) +} + +// CodeFilepath returns an attribute KeyValue conforming to the +// "code.filepath" semantic conventions. It represents the source code file +// name that identifies the code unit as uniquely as possible (preferably an +// absolute file path). +func CodeFilepath(val string) attribute.KeyValue { + return CodeFilepathKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" +// semantic conventions. It represents the line number in `code.filepath` best +// representing the operation. It SHOULD point within the code unit named in +// `code.function`. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeColumn returns an attribute KeyValue conforming to the "code.column" +// semantic conventions. It represents the column number in `code.filepath` +// best representing the operation. It SHOULD point within the code unit named +// in `code.function`. +func CodeColumn(val int) attribute.KeyValue { + return CodeColumnKey.Int(val) +} + +// Semantic conventions for HTTP client and server Spans. +const ( + // HTTPMethodKey is the attribute Key conforming to the "http.method" + // semantic conventions. It represents the hTTP request method. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + HTTPMethodKey = attribute.Key("http.method") + + // HTTPStatusCodeKey is the attribute Key conforming to the + // "http.status_code" semantic conventions. It represents the [HTTP + // response status code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // RequirementLevel: ConditionallyRequired (If and only if one was + // received/sent.) + // Stability: stable + // Examples: 200 + HTTPStatusCodeKey = attribute.Key("http.status_code") + + // HTTPFlavorKey is the attribute Key conforming to the "http.flavor" + // semantic conventions. It represents the kind of HTTP protocol used. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: If `net.transport` is not specified, it can be assumed to be + // `IP.TCP` except if `http.flavor` is `QUIC`, in which case `IP.UDP` is + // assumed. + HTTPFlavorKey = attribute.Key("http.flavor") + + // HTTPUserAgentKey is the attribute Key conforming to the + // "http.user_agent" semantic conventions. It represents the value of the + // [HTTP + // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) + // header sent by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' + HTTPUserAgentKey = attribute.Key("http.user_agent") + + // HTTPRequestContentLengthKey is the attribute Key conforming to the + // "http.request_content_length" semantic conventions. It represents the + // size of the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") + + // HTTPResponseContentLengthKey is the attribute Key conforming to the + // "http.response_content_length" semantic conventions. It represents the + // size of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") +) + +var ( + // HTTP/1.0 + HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") + // HTTP/1.1 + HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") + // HTTP/2 + HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") + // HTTP/3 + HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") + // SPDY protocol + HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") + // QUIC protocol + HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") +) + +// HTTPMethod returns an attribute KeyValue conforming to the "http.method" +// semantic conventions. It represents the hTTP request method. +func HTTPMethod(val string) attribute.KeyValue { + return HTTPMethodKey.String(val) +} + +// HTTPStatusCode returns an attribute KeyValue conforming to the +// "http.status_code" semantic conventions. It represents the [HTTP response +// status code](https://tools.ietf.org/html/rfc7231#section-6). +func HTTPStatusCode(val int) attribute.KeyValue { + return HTTPStatusCodeKey.Int(val) +} + +// HTTPUserAgent returns an attribute KeyValue conforming to the +// "http.user_agent" semantic conventions. It represents the value of the [HTTP +// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) +// header sent by the client. +func HTTPUserAgent(val string) attribute.KeyValue { + return HTTPUserAgentKey.String(val) +} + +// HTTPRequestContentLength returns an attribute KeyValue conforming to the +// "http.request_content_length" semantic conventions. It represents the size +// of the request payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPRequestContentLength(val int) attribute.KeyValue { + return HTTPRequestContentLengthKey.Int(val) +} + +// HTTPResponseContentLength returns an attribute KeyValue conforming to the +// "http.response_content_length" semantic conventions. It represents the size +// of the response payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPResponseContentLength(val int) attribute.KeyValue { + return HTTPResponseContentLengthKey.Int(val) +} + +// Semantic Convention for HTTP Client +const ( + // HTTPURLKey is the attribute Key conforming to the "http.url" semantic + // conventions. It represents the full HTTP request URL in the form + // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is + // not transmitted over HTTP, but if it is known, it should be included + // nevertheless. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + // Note: `http.url` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case the + // attribute's value should be `https://www.example.com/`. + HTTPURLKey = attribute.Key("http.url") + + // HTTPResendCountKey is the attribute Key conforming to the + // "http.resend_count" semantic conventions. It represents the ordinal + // number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // RequirementLevel: Recommended (if and only if request was retried.) + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending + // (e.g. redirection, authorization failure, 503 Server Unavailable, + // network issues, or any other). + HTTPResendCountKey = attribute.Key("http.resend_count") +) + +// HTTPURL returns an attribute KeyValue conforming to the "http.url" +// semantic conventions. It represents the full HTTP request URL in the form +// `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not +// transmitted over HTTP, but if it is known, it should be included +// nevertheless. +func HTTPURL(val string) attribute.KeyValue { + return HTTPURLKey.String(val) +} + +// HTTPResendCount returns an attribute KeyValue conforming to the +// "http.resend_count" semantic conventions. It represents the ordinal number +// of request resending attempt (for any reason, including redirects). +func HTTPResendCount(val int) attribute.KeyValue { + return HTTPResendCountKey.Int(val) +} + +// Semantic Convention for HTTP Server +const ( + // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" + // semantic conventions. It represents the URI scheme identifying the used + // protocol. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'http', 'https' + HTTPSchemeKey = attribute.Key("http.scheme") + + // HTTPTargetKey is the attribute Key conforming to the "http.target" + // semantic conventions. It represents the full request target as passed in + // a HTTP request line or equivalent. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '/path/12314/?q=ddds' + HTTPTargetKey = attribute.Key("http.target") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" + // semantic conventions. It represents the matched route (path template in + // the format used by the respective server framework). See note below + // + // Type: string + // RequirementLevel: ConditionallyRequired (If and only if it's available) + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: 'http.route' MUST NOT be populated when this is not supported by + // the HTTP server framework as the route attribute should have + // low-cardinality and the URI path can NOT substitute it. + HTTPRouteKey = attribute.Key("http.route") + + // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip" + // semantic conventions. It represents the IP address of the original + // client behind all proxies, if known (e.g. from + // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '83.164.160.102' + // Note: This is not necessarily the same as `net.sock.peer.addr`, which + // would + // identify the network-level peer, which may be a proxy. + // + // This attribute should be set when a source of information different + // from the one used for `net.sock.peer.addr`, is available even if that + // other + // source just confirms the same value as `net.sock.peer.addr`. + // Rationale: For `net.sock.peer.addr`, one typically does not know if it + // comes from a proxy, reverse proxy, or the actual client. Setting + // `http.client_ip` when it's the same as `net.sock.peer.addr` means that + // one is at least somewhat confident that the address is not that of + // the closest proxy. + HTTPClientIPKey = attribute.Key("http.client_ip") +) + +// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" +// semantic conventions. It represents the URI scheme identifying the used +// protocol. +func HTTPScheme(val string) attribute.KeyValue { + return HTTPSchemeKey.String(val) +} + +// HTTPTarget returns an attribute KeyValue conforming to the "http.target" +// semantic conventions. It represents the full request target as passed in a +// HTTP request line or equivalent. +func HTTPTarget(val string) attribute.KeyValue { + return HTTPTargetKey.String(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route (path template in the +// format used by the respective server framework). See note below +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// HTTPClientIP returns an attribute KeyValue conforming to the +// "http.client_ip" semantic conventions. It represents the IP address of the +// original client behind all proxies, if known (e.g. from +// [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). +func HTTPClientIP(val string) attribute.KeyValue { + return HTTPClientIPKey.String(val) +} + +// Attributes that exist for multiple DynamoDB request types. +const ( + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys + // in the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { + // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number }, "TableName": "string", + // "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to + // the "aws.dynamodb.item_collection_metrics" semantic conventions. It + // represents the JSON-serialized value of the `ItemCollectionMetrics` + // response field. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": + // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { + // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], + // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, + // "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to + // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It + // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` + // request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming + // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. + // It represents the value of the + // `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the + // value of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value + // of the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, + // RelatedItems, ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of + // the `Limit` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value + // of the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of + // the `Select` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") +) + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in +// the `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming +// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It +// represents the JSON-serialized value of the `ItemCollectionMetrics` response +// field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.ReadCapacityUnits` request parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.WriteCapacityUnits` request parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of +// the `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to +// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the +// value of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of +// the `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// DynamoDB.CreateTable +const ( + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `GlobalSecondaryIndexes` request field + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `LocalSecondaryIndexes` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexARN": "string", "IndexName": "string", + // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") +) + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_indexes" semantic +// conventions. It represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming +// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `LocalSecondaryIndexes` request field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// DynamoDB.ListTables +const ( + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents + // the value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the the + // number of items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") +) + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming +// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It +// represents the value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the the +// number of items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// DynamoDB.Query +const ( + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the + // value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") +) + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// DynamoDB.Scan +const ( + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of + // the `Segment` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the + // value of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of + // the `Count` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the + // value of the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") +) + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value +// of the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value +// of the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// DynamoDB.UpdateTable +const ( + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to + // the "aws.dynamodb.attribute_definitions" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `AttributeDefinitions` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key + // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic + // conventions. It represents the JSON-serialized value of each item in the + // the `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") +) + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming +// to the "aws.dynamodb.attribute_definitions" semantic conventions. It +// represents the JSON-serialized value of each item in the +// `AttributeDefinitions` request field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// Semantic conventions to apply when instrumenting the GraphQL implementation. +// They map GraphQL operations to attributes on a Span. +const ( + // GraphqlOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of + // the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphqlOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of + // the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") + + // GraphqlDocumentKey is the attribute Key conforming to the + // "graphql.document" semantic conventions. It represents the GraphQL + // document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// GraphqlOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphqlOperationName(val string) attribute.KeyValue { + return GraphqlOperationNameKey.String(val) +} + +// GraphqlDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphqlDocument(val string) attribute.KeyValue { + return GraphqlDocumentKey.String(val) +} + +// Semantic convention describing per-message attributes populated on messaging +// spans or links. +const ( + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used + // by the messaging system as an identifier for the message, represented as + // a string. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents + // the [conversation ID](#conversations) identifying the conversation to + // which the message belongs, represented as a string. Sometimes called + // "Correlation ID". + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyConversationID' + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to + // the "messaging.message.payload_size_bytes" semantic conventions. It + // represents the (uncompressed) size of the message payload in bytes. Also + // use this attribute if it is unknown whether the compressed or + // uncompressed payload size is reported. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2738 + MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes") + + // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key + // conforming to the "messaging.message.payload_compressed_size_bytes" + // semantic conventions. It represents the compressed size of the message + // payload in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2048 + MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes") +) + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by +// the messaging system as an identifier for the message, represented as a +// string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming +// to the "messaging.message.conversation_id" semantic conventions. It +// represents the [conversation ID](#conversations) identifying the +// conversation to which the message belongs, represented as a string. +// Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming +// to the "messaging.message.payload_size_bytes" semantic conventions. It +// represents the (uncompressed) size of the message payload in bytes. Also use +// this attribute if it is unknown whether the compressed or uncompressed +// payload size is reported. +func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadSizeBytesKey.Int(val) +} + +// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue +// conforming to the "messaging.message.payload_compressed_size_bytes" semantic +// conventions. It represents the compressed size of the message payload in +// bytes. +func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadCompressedSizeBytesKey.Int(val) +} + +// Semantic convention for attributes that describe messaging destination on +// broker +const ( + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the + // message destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic + // or other entity within the broker. If + // the broker does not have such notion, the destination name SHOULD + // uniquely identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationKindKey is the attribute Key conforming to the + // "messaging.destination.kind" semantic conventions. It represents the + // kind of message destination + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationKindKey = attribute.Key("messaging.destination.kind") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the + // low cardinality representation of the messaging destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example + // would be a destination name involving a user name or product id. + // Although the destination name in this case is of high cardinality, the + // underlying template is of low cardinality and can be effectively used + // for grouping and aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might + // not exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") +) + +var ( + // A message sent to a queue + MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") + // A message sent to a topic + MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") +) + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to +// the "messaging.destination.template" semantic conventions. It represents the +// low cardinality representation of the messaging destination name +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to +// the "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to +// the "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be +// unnamed or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// Semantic convention for attributes that describe messaging source on broker +const ( + // MessagingSourceNameKey is the attribute Key conforming to the + // "messaging.source.name" semantic conventions. It represents the message + // source name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Source name SHOULD uniquely identify a specific queue, topic, or + // other entity within the broker. If + // the broker does not have such notion, the source name SHOULD uniquely + // identify the broker. + MessagingSourceNameKey = attribute.Key("messaging.source.name") + + // MessagingSourceKindKey is the attribute Key conforming to the + // "messaging.source.kind" semantic conventions. It represents the kind of + // message source + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingSourceKindKey = attribute.Key("messaging.source.kind") + + // MessagingSourceTemplateKey is the attribute Key conforming to the + // "messaging.source.template" semantic conventions. It represents the low + // cardinality representation of the messaging source name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Source names could be constructed from templates. An example would + // be a source name involving a user name or product id. Although the + // source name in this case is of high cardinality, the underlying template + // is of low cardinality and can be effectively used for grouping and + // aggregation. + MessagingSourceTemplateKey = attribute.Key("messaging.source.template") + + // MessagingSourceTemporaryKey is the attribute Key conforming to the + // "messaging.source.temporary" semantic conventions. It represents a + // boolean that is true if the message source is temporary and might not + // exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary") + + // MessagingSourceAnonymousKey is the attribute Key conforming to the + // "messaging.source.anonymous" semantic conventions. It represents a + // boolean that is true if the message source is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous") +) + +var ( + // A message received from a queue + MessagingSourceKindQueue = MessagingSourceKindKey.String("queue") + // A message received from a topic + MessagingSourceKindTopic = MessagingSourceKindKey.String("topic") +) + +// MessagingSourceName returns an attribute KeyValue conforming to the +// "messaging.source.name" semantic conventions. It represents the message +// source name +func MessagingSourceName(val string) attribute.KeyValue { + return MessagingSourceNameKey.String(val) +} + +// MessagingSourceTemplate returns an attribute KeyValue conforming to the +// "messaging.source.template" semantic conventions. It represents the low +// cardinality representation of the messaging source name +func MessagingSourceTemplate(val string) attribute.KeyValue { + return MessagingSourceTemplateKey.String(val) +} + +// MessagingSourceTemporary returns an attribute KeyValue conforming to the +// "messaging.source.temporary" semantic conventions. It represents a boolean +// that is true if the message source is temporary and might not exist anymore +// after messages are processed. +func MessagingSourceTemporary(val bool) attribute.KeyValue { + return MessagingSourceTemporaryKey.Bool(val) +} + +// MessagingSourceAnonymous returns an attribute KeyValue conforming to the +// "messaging.source.anonymous" semantic conventions. It represents a boolean +// that is true if the message source is anonymous (could be unnamed or have +// auto-generated name). +func MessagingSourceAnonymous(val bool) attribute.KeyValue { + return MessagingSourceAnonymousKey.Bool(val) +} + +// General attributes used in messaging systems. +const ( + // MessagingSystemKey is the attribute Key conforming to the + // "messaging.system" semantic conventions. It represents a string + // identifying the messaging system. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' + MessagingSystemKey = attribute.Key("messaging.system") + + // MessagingOperationKey is the attribute Key conforming to the + // "messaging.operation" semantic conventions. It represents a string + // identifying the kind of messaging operation as defined in the [Operation + // names](#operation-names) section above. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationKey = attribute.Key("messaging.operation") + + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the span describes an + // operation on a batch of messages.) + // Stability: stable + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client + // library supports both batch and single-message API for the same + // operation, instrumentations SHOULD use `messaging.batch.message_count` + // for batching APIs and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") +) + +var ( + // publish + MessagingOperationPublish = MessagingOperationKey.String("publish") + // receive + MessagingOperationReceive = MessagingOperationKey.String("receive") + // process + MessagingOperationProcess = MessagingOperationKey.String("process") +) + +// MessagingSystem returns an attribute KeyValue conforming to the +// "messaging.system" semantic conventions. It represents a string identifying +// the messaging system. +func MessagingSystem(val string) attribute.KeyValue { + return MessagingSystemKey.String(val) +} + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to +// the "messaging.batch.message_count" semantic conventions. It represents the +// number of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// Semantic convention for a consumer of messages received from a messaging +// system +const ( + // MessagingConsumerIDKey is the attribute Key conforming to the + // "messaging.consumer.id" semantic conventions. It represents the + // identifier for the consumer receiving a message. For Kafka, set it to + // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if + // both are present, or only `messaging.kafka.consumer.group`. For brokers, + // such as RabbitMQ and Artemis, set it to the `client_id` of the client + // consuming the message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mygroup - client-6' + MessagingConsumerIDKey = attribute.Key("messaging.consumer.id") +) + +// MessagingConsumerID returns an attribute KeyValue conforming to the +// "messaging.consumer.id" semantic conventions. It represents the identifier +// for the consumer receiving a message. For Kafka, set it to +// `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both +// are present, or only `messaging.kafka.consumer.group`. For brokers, such as +// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the +// message. +func MessagingConsumerID(val string) attribute.KeyValue { + return MessagingConsumerIDKey.String(val) +} + +// Attributes for RabbitMQ +const ( + // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key + // conforming to the "messaging.rabbitmq.destination.routing_key" semantic + // conventions. It represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If not empty.) + // Stability: stable + // Examples: 'myKey' + MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") +) + +// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitmqDestinationRoutingKeyKey.String(val) +} + +// Attributes for Apache Kafka +const ( + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the + // message keys in Kafka are used for grouping alike messages to ensure + // they're processed on the same partition. They differ from + // `messaging.message.id` in that they're not unique. If the key is `null`, + // the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to + // be supplied for the attribute. If the key has no unambiguous, canonical + // string form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the + // "messaging.kafka.consumer.group" semantic conventions. It represents the + // name of the Kafka Consumer Group that is handling the message. Only + // applies to consumers, not producers. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") + + // MessagingKafkaClientIDKey is the attribute Key conforming to the + // "messaging.kafka.client_id" semantic conventions. It represents the + // client ID for the Consumer or Producer that is handling the message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'client-5' + MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") + + // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to + // the "messaging.kafka.destination.partition" semantic conventions. It + // represents the partition the message is sent to. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") + + // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the + // "messaging.kafka.source.partition" semantic conventions. It represents + // the partition the message is received from. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition") + + // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the + // "messaging.kafka.message.offset" semantic conventions. It represents the + // offset of a record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents + // a boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: ConditionallyRequired (If value is `true`. When + // missing, the value is assumed to be `false`.) + // Stability: stable + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") +) + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the +// message keys in Kafka are used for grouping alike messages to ensure they're +// processed on the same partition. They differ from `messaging.message.id` in +// that they're not unique. If the key is `null`, the attribute MUST NOT be +// set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to +// the "messaging.kafka.consumer.group" semantic conventions. It represents the +// name of the Kafka Consumer Group that is handling the message. Only applies +// to consumers, not producers. +func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { + return MessagingKafkaConsumerGroupKey.String(val) +} + +// MessagingKafkaClientID returns an attribute KeyValue conforming to the +// "messaging.kafka.client_id" semantic conventions. It represents the client +// ID for the Consumer or Producer that is handling the message. +func MessagingKafkaClientID(val string) attribute.KeyValue { + return MessagingKafkaClientIDKey.String(val) +} + +// MessagingKafkaDestinationPartition returns an attribute KeyValue +// conforming to the "messaging.kafka.destination.partition" semantic +// conventions. It represents the partition the message is sent to. +func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { + return MessagingKafkaDestinationPartitionKey.Int(val) +} + +// MessagingKafkaSourcePartition returns an attribute KeyValue conforming to +// the "messaging.kafka.source.partition" semantic conventions. It represents +// the partition the message is received from. +func MessagingKafkaSourcePartition(val int) attribute.KeyValue { + return MessagingKafkaSourcePartitionKey.Int(val) +} + +// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to +// the "messaging.kafka.message.offset" semantic conventions. It represents the +// offset of a record in the corresponding Kafka partition. +func MessagingKafkaMessageOffset(val int) attribute.KeyValue { + return MessagingKafkaMessageOffsetKey.Int(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming +// to the "messaging.kafka.message.tombstone" semantic conventions. It +// represents a boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// Attributes for Apache RocketMQ +const ( + // MessagingRocketmqNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + + // MessagingRocketmqClientGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.client_group" semantic conventions. It represents + // the name of the RocketMQ producer/consumer group that is handling the + // message. The client type is identified by the SpanKind. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + + // MessagingRocketmqClientIDKey is the attribute Key conforming to the + // "messaging.rocketmq.client_id" semantic conventions. It represents the + // unique identifier for each client. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myhost@8742@s8083jm' + MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") + + // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delivery_timestamp" + // semantic conventions. It represents the timestamp in milliseconds that + // the delay message is expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delay time level is not specified.) + // Stability: stable + // Examples: 1665987217045 + MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delay_time_level" semantic + // conventions. It represents the delay time level for delay message, which + // determines the message delay time. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delivery timestamp is not specified.) + // Stability: stable + // Examples: 3 + MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents + // the it is essential for FIFO message. Messages that belong to the same + // message group are always processed one by one within the same consumer + // group. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If the message type is FIFO.) + // Stability: stable + // Examples: 'myMessageGroup' + MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents + // the type of message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketmqMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents + // the key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to + // the "messaging.rocketmq.consumption_model" semantic conventions. It + // represents the model of message consumption. This only applies to + // consumer spans. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +// MessagingRocketmqNamespace returns an attribute KeyValue conforming to +// the "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketmqNamespace(val string) attribute.KeyValue { + return MessagingRocketmqNamespaceKey.String(val) +} + +// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.client_group" semantic conventions. It represents +// the name of the RocketMQ producer/consumer group that is handling the +// message. The client type is identified by the SpanKind. +func MessagingRocketmqClientGroup(val string) attribute.KeyValue { + return MessagingRocketmqClientGroupKey.String(val) +} + +// MessagingRocketmqClientID returns an attribute KeyValue conforming to the +// "messaging.rocketmq.client_id" semantic conventions. It represents the +// unique identifier for each client. +func MessagingRocketmqClientID(val string) attribute.KeyValue { + return MessagingRocketmqClientIDKey.String(val) +} + +// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.group" semantic conventions. It represents +// the it is essential for FIFO message. Messages that belong to the same +// message group are always processed one by one within the same consumer +// group. +func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { + return MessagingRocketmqMessageGroupKey.String(val) +} + +// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketmqMessageTag(val string) attribute.KeyValue { + return MessagingRocketmqMessageTagKey.String(val) +} + +// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.keys" semantic conventions. It represents +// the key(s) of message, another way to mark message besides message id. +func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketmqMessageKeysKey.StringSlice(val) +} + +// Semantic conventions for remote procedure calls. +const ( + // RPCSystemKey is the attribute Key conforming to the "rpc.system" + // semantic conventions. It represents a string identifying the remoting + // system. See below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCSystemKey = attribute.Key("rpc.system") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" + // semantic conventions. It represents the full (logical) name of the + // service being called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing + // class. The `code.namespace` attribute may be used to store the latter + // (despite the attribute name, it may include a class name; e.g., class + // with method actually executing the call on the server side, RPC client + // stub class on the client side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" + // semantic conventions. It represents the name of the (logical) method + // being called, must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") +) + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// Tech-specific attributes for gRPC. +const ( + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the [numeric + // status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of + // the gRPC request. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). +const ( + // RPCJsonrpcVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // does not specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If other than the default + // version (`1.0`)) + // Stability: stable + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCJsonrpcRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be + // cast to string for simplicity. Use empty string in case of `null` value. + // Omit entirely if this is a notification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the + // `error.code` property of response if it is an error response. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If response is not successful.) + // Stability: stable + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") +) + +// RPCJsonrpcVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol +// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 +// does not specify this, the value can be omitted. +func RPCJsonrpcVersion(val string) attribute.KeyValue { + return RPCJsonrpcVersionKey.String(val) +} + +// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` +// property of request or response. Since protocol allows id to be int, string, +// `null` or missing (for notifications), value is expected to be cast to +// string for simplicity. Use empty string in case of `null` value. Omit +// entirely if this is a notification. +func RPCJsonrpcRequestID(val string) attribute.KeyValue { + return RPCJsonrpcRequestIDKey.String(val) +} + +// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the +// `error.code` property of response if it is an error response. +func RPCJsonrpcErrorCode(val int) attribute.KeyValue { + return RPCJsonrpcErrorCodeKey.Int(val) +} + +// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { + return RPCJsonrpcErrorMessageKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/trace.go b/vendor/go.opentelemetry.io/otel/trace.go new file mode 100644 index 000000000..caf7249de --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace.go @@ -0,0 +1,47 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +import ( + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/trace" +) + +// Tracer creates a named tracer that implements Tracer interface. +// If the name is an empty string then provider uses default name. +// +// This is short for GetTracerProvider().Tracer(name, opts...) +func Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + return GetTracerProvider().Tracer(name, opts...) +} + +// GetTracerProvider returns the registered global trace provider. +// If none is registered then an instance of NoopTracerProvider is returned. +// +// Use the trace provider to create a named tracer. E.g. +// +// tracer := otel.GetTracerProvider().Tracer("example.com/foo") +// +// or +// +// tracer := otel.Tracer("example.com/foo") +func GetTracerProvider() trace.TracerProvider { + return global.TracerProvider() +} + +// SetTracerProvider registers `tp` as the global trace provider. +func SetTracerProvider(tp trace.TracerProvider) { + global.SetTracerProvider(tp) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/LICENSE b/vendor/go.opentelemetry.io/otel/trace/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go new file mode 100644 index 000000000..3aadc66cf --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -0,0 +1,334 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// TracerConfig is a group of options for a Tracer. +type TracerConfig struct { + instrumentationVersion string + // Schema URL of the telemetry emitted by the Tracer. + schemaURL string + attrs attribute.Set +} + +// InstrumentationVersion returns the version of the library providing instrumentation. +func (t *TracerConfig) InstrumentationVersion() string { + return t.instrumentationVersion +} + +// InstrumentationAttributes returns the attributes associated with the library +// providing instrumentation. +func (t *TracerConfig) InstrumentationAttributes() attribute.Set { + return t.attrs +} + +// SchemaURL returns the Schema URL of the telemetry emitted by the Tracer. +func (t *TracerConfig) SchemaURL() string { + return t.schemaURL +} + +// NewTracerConfig applies all the options to a returned TracerConfig. +func NewTracerConfig(options ...TracerOption) TracerConfig { + var config TracerConfig + for _, option := range options { + config = option.apply(config) + } + return config +} + +// TracerOption applies an option to a TracerConfig. +type TracerOption interface { + apply(TracerConfig) TracerConfig +} + +type tracerOptionFunc func(TracerConfig) TracerConfig + +func (fn tracerOptionFunc) apply(cfg TracerConfig) TracerConfig { + return fn(cfg) +} + +// SpanConfig is a group of options for a Span. +type SpanConfig struct { + attributes []attribute.KeyValue + timestamp time.Time + links []Link + newRoot bool + spanKind SpanKind + stackTrace bool +} + +// Attributes describe the associated qualities of a Span. +func (cfg *SpanConfig) Attributes() []attribute.KeyValue { + return cfg.attributes +} + +// Timestamp is a time in a Span life-cycle. +func (cfg *SpanConfig) Timestamp() time.Time { + return cfg.timestamp +} + +// StackTrace checks whether stack trace capturing is enabled. +func (cfg *SpanConfig) StackTrace() bool { + return cfg.stackTrace +} + +// Links are the associations a Span has with other Spans. +func (cfg *SpanConfig) Links() []Link { + return cfg.links +} + +// NewRoot identifies a Span as the root Span for a new trace. This is +// commonly used when an existing trace crosses trust boundaries and the +// remote parent span context should be ignored for security. +func (cfg *SpanConfig) NewRoot() bool { + return cfg.newRoot +} + +// SpanKind is the role a Span has in a trace. +func (cfg *SpanConfig) SpanKind() SpanKind { + return cfg.spanKind +} + +// NewSpanStartConfig applies all the options to a returned SpanConfig. +// No validation is performed on the returned SpanConfig (e.g. no uniqueness +// checking or bounding of data), it is left to the SDK to perform this +// action. +func NewSpanStartConfig(options ...SpanStartOption) SpanConfig { + var c SpanConfig + for _, option := range options { + c = option.applySpanStart(c) + } + return c +} + +// NewSpanEndConfig applies all the options to a returned SpanConfig. +// No validation is performed on the returned SpanConfig (e.g. no uniqueness +// checking or bounding of data), it is left to the SDK to perform this +// action. +func NewSpanEndConfig(options ...SpanEndOption) SpanConfig { + var c SpanConfig + for _, option := range options { + c = option.applySpanEnd(c) + } + return c +} + +// SpanStartOption applies an option to a SpanConfig. These options are applicable +// only when the span is created. +type SpanStartOption interface { + applySpanStart(SpanConfig) SpanConfig +} + +type spanOptionFunc func(SpanConfig) SpanConfig + +func (fn spanOptionFunc) applySpanStart(cfg SpanConfig) SpanConfig { + return fn(cfg) +} + +// SpanEndOption applies an option to a SpanConfig. These options are +// applicable only when the span is ended. +type SpanEndOption interface { + applySpanEnd(SpanConfig) SpanConfig +} + +// EventConfig is a group of options for an Event. +type EventConfig struct { + attributes []attribute.KeyValue + timestamp time.Time + stackTrace bool +} + +// Attributes describe the associated qualities of an Event. +func (cfg *EventConfig) Attributes() []attribute.KeyValue { + return cfg.attributes +} + +// Timestamp is a time in an Event life-cycle. +func (cfg *EventConfig) Timestamp() time.Time { + return cfg.timestamp +} + +// StackTrace checks whether stack trace capturing is enabled. +func (cfg *EventConfig) StackTrace() bool { + return cfg.stackTrace +} + +// NewEventConfig applies all the EventOptions to a returned EventConfig. If no +// timestamp option is passed, the returned EventConfig will have a Timestamp +// set to the call time, otherwise no validation is performed on the returned +// EventConfig. +func NewEventConfig(options ...EventOption) EventConfig { + var c EventConfig + for _, option := range options { + c = option.applyEvent(c) + } + if c.timestamp.IsZero() { + c.timestamp = time.Now() + } + return c +} + +// EventOption applies span event options to an EventConfig. +type EventOption interface { + applyEvent(EventConfig) EventConfig +} + +// SpanOption are options that can be used at both the beginning and end of a span. +type SpanOption interface { + SpanStartOption + SpanEndOption +} + +// SpanStartEventOption are options that can be used at the start of a span, or with an event. +type SpanStartEventOption interface { + SpanStartOption + EventOption +} + +// SpanEndEventOption are options that can be used at the end of a span, or with an event. +type SpanEndEventOption interface { + SpanEndOption + EventOption +} + +type attributeOption []attribute.KeyValue + +func (o attributeOption) applySpan(c SpanConfig) SpanConfig { + c.attributes = append(c.attributes, []attribute.KeyValue(o)...) + return c +} +func (o attributeOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) } +func (o attributeOption) applyEvent(c EventConfig) EventConfig { + c.attributes = append(c.attributes, []attribute.KeyValue(o)...) + return c +} + +var _ SpanStartEventOption = attributeOption{} + +// WithAttributes adds the attributes related to a span life-cycle event. +// These attributes are used to describe the work a Span represents when this +// option is provided to a Span's start or end events. Otherwise, these +// attributes provide additional information about the event being recorded +// (e.g. error, state change, processing progress, system event). +// +// If multiple of these options are passed the attributes of each successive +// option will extend the attributes instead of overwriting. There is no +// guarantee of uniqueness in the resulting attributes. +func WithAttributes(attributes ...attribute.KeyValue) SpanStartEventOption { + return attributeOption(attributes) +} + +// SpanEventOption are options that can be used with an event or a span. +type SpanEventOption interface { + SpanOption + EventOption +} + +type timestampOption time.Time + +func (o timestampOption) applySpan(c SpanConfig) SpanConfig { + c.timestamp = time.Time(o) + return c +} +func (o timestampOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) } +func (o timestampOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) } +func (o timestampOption) applyEvent(c EventConfig) EventConfig { + c.timestamp = time.Time(o) + return c +} + +var _ SpanEventOption = timestampOption{} + +// WithTimestamp sets the time of a Span or Event life-cycle moment (e.g. +// started, stopped, errored). +func WithTimestamp(t time.Time) SpanEventOption { + return timestampOption(t) +} + +type stackTraceOption bool + +func (o stackTraceOption) applyEvent(c EventConfig) EventConfig { + c.stackTrace = bool(o) + return c +} + +func (o stackTraceOption) applySpan(c SpanConfig) SpanConfig { + c.stackTrace = bool(o) + return c +} +func (o stackTraceOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) } + +// WithStackTrace sets the flag to capture the error with stack trace (e.g. true, false). +func WithStackTrace(b bool) SpanEndEventOption { + return stackTraceOption(b) +} + +// WithLinks adds links to a Span. The links are added to the existing Span +// links, i.e. this does not overwrite. Links with invalid span context are ignored. +func WithLinks(links ...Link) SpanStartOption { + return spanOptionFunc(func(cfg SpanConfig) SpanConfig { + cfg.links = append(cfg.links, links...) + return cfg + }) +} + +// WithNewRoot specifies that the Span should be treated as a root Span. Any +// existing parent span context will be ignored when defining the Span's trace +// identifiers. +func WithNewRoot() SpanStartOption { + return spanOptionFunc(func(cfg SpanConfig) SpanConfig { + cfg.newRoot = true + return cfg + }) +} + +// WithSpanKind sets the SpanKind of a Span. +func WithSpanKind(kind SpanKind) SpanStartOption { + return spanOptionFunc(func(cfg SpanConfig) SpanConfig { + cfg.spanKind = kind + return cfg + }) +} + +// WithInstrumentationVersion sets the instrumentation version. +func WithInstrumentationVersion(version string) TracerOption { + return tracerOptionFunc(func(cfg TracerConfig) TracerConfig { + cfg.instrumentationVersion = version + return cfg + }) +} + +// WithInstrumentationAttributes sets the instrumentation attributes. +// +// The passed attributes will be de-duplicated. +func WithInstrumentationAttributes(attr ...attribute.KeyValue) TracerOption { + return tracerOptionFunc(func(config TracerConfig) TracerConfig { + config.attrs = attribute.NewSet(attr...) + return config + }) +} + +// WithSchemaURL sets the schema URL for the Tracer. +func WithSchemaURL(schemaURL string) TracerOption { + return tracerOptionFunc(func(cfg TracerConfig) TracerConfig { + cfg.schemaURL = schemaURL + return cfg + }) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/context.go b/vendor/go.opentelemetry.io/otel/trace/context.go new file mode 100644 index 000000000..76f9a083c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/context.go @@ -0,0 +1,61 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +import "context" + +type traceContextKeyType int + +const currentSpanKey traceContextKeyType = iota + +// ContextWithSpan returns a copy of parent with span set as the current Span. +func ContextWithSpan(parent context.Context, span Span) context.Context { + return context.WithValue(parent, currentSpanKey, span) +} + +// ContextWithSpanContext returns a copy of parent with sc as the current +// Span. The Span implementation that wraps sc is non-recording and performs +// no operations other than to return sc as the SpanContext from the +// SpanContext method. +func ContextWithSpanContext(parent context.Context, sc SpanContext) context.Context { + return ContextWithSpan(parent, nonRecordingSpan{sc: sc}) +} + +// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicly +// as a remote SpanContext and as the current Span. The Span implementation +// that wraps rsc is non-recording and performs no operations other than to +// return rsc as the SpanContext from the SpanContext method. +func ContextWithRemoteSpanContext(parent context.Context, rsc SpanContext) context.Context { + return ContextWithSpanContext(parent, rsc.WithRemote(true)) +} + +// SpanFromContext returns the current Span from ctx. +// +// If no Span is currently set in ctx an implementation of a Span that +// performs no operations is returned. +func SpanFromContext(ctx context.Context) Span { + if ctx == nil { + return noopSpan{} + } + if span, ok := ctx.Value(currentSpanKey).(Span); ok { + return span + } + return noopSpan{} +} + +// SpanContextFromContext returns the current Span's SpanContext. +func SpanContextFromContext(ctx context.Context) SpanContext { + return SpanFromContext(ctx).SpanContext() +} diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go new file mode 100644 index 000000000..440f3d756 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/doc.go @@ -0,0 +1,130 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package trace provides an implementation of the tracing part of the +OpenTelemetry API. + +To participate in distributed traces a Span needs to be created for the +operation being performed as part of a traced workflow. In its simplest form: + + var tracer trace.Tracer + + func init() { + tracer = otel.Tracer("instrumentation/package/name") + } + + func operation(ctx context.Context) { + var span trace.Span + ctx, span = tracer.Start(ctx, "operation") + defer span.End() + // ... + } + +A Tracer is unique to the instrumentation and is used to create Spans. +Instrumentation should be designed to accept a TracerProvider from which it +can create its own unique Tracer. Alternatively, the registered global +TracerProvider from the go.opentelemetry.io/otel package can be used as +a default. + + const ( + name = "instrumentation/package/name" + version = "0.1.0" + ) + + type Instrumentation struct { + tracer trace.Tracer + } + + func NewInstrumentation(tp trace.TracerProvider) *Instrumentation { + if tp == nil { + tp = otel.TracerProvider() + } + return &Instrumentation{ + tracer: tp.Tracer(name, trace.WithInstrumentationVersion(version)), + } + } + + func operation(ctx context.Context, inst *Instrumentation) { + var span trace.Span + ctx, span = inst.tracer.Start(ctx, "operation") + defer span.End() + // ... + } + +# API Implementations + +This package does not conform to the standard Go versioning policy; all of its +interfaces may have methods added to them without a package major version bump. +This non-standard API evolution could surprise an uninformed implementation +author. They could unknowingly build their implementation in a way that would +result in a runtime panic for their users that update to the new API. + +The API is designed to help inform an instrumentation author about this +non-standard API evolution. It requires them to choose a default behavior for +unimplemented interface methods. There are three behavior choices they can +make: + + - Compilation failure + - Panic + - Default to another implementation + +All interfaces in this API embed a corresponding interface from +[go.opentelemetry.io/otel/trace/embedded]. If an author wants the default +behavior of their implementations to be a compilation failure, signaling to +their users they need to update to the latest version of that implementation, +they need to embed the corresponding interface from +[go.opentelemetry.io/otel/trace/embedded] in their implementation. For +example, + + import "go.opentelemetry.io/otel/trace/embedded" + + type TracerProvider struct { + embedded.TracerProvider + // ... + } + +If an author wants the default behavior of their implementations to panic, they +can embed the API interface directly. + + import "go.opentelemetry.io/otel/trace" + + type TracerProvider struct { + trace.TracerProvider + // ... + } + +This option is not recommended. It will lead to publishing packages that +contain runtime panics when users update to newer versions of +[go.opentelemetry.io/otel/trace], which may be done with a trasitive +dependency. + +Finally, an author can embed another implementation in theirs. The embedded +implementation will be used for methods not defined by the author. For example, +an author who wants to default to silently dropping the call can use +[go.opentelemetry.io/otel/trace/noop]: + + import "go.opentelemetry.io/otel/trace/noop" + + type TracerProvider struct { + noop.TracerProvider + // ... + } + +It is strongly recommended that authors only embed +[go.opentelemetry.io/otel/trace/noop] if they choose this default behavior. +That implementation is the only one OpenTelemetry authors can guarantee will +fully implement all the API interfaces when a user updates their API. +*/ +package trace // import "go.opentelemetry.io/otel/trace" diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go new file mode 100644 index 000000000..898db5a75 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go @@ -0,0 +1,56 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package embedded provides interfaces embedded within the [OpenTelemetry +// trace API]. +// +// Implementers of the [OpenTelemetry trace API] can embed the relevant type +// from this package into their implementation directly. Doing so will result +// in a compilation error for users when the [OpenTelemetry trace API] is +// extended (which is something that can happen without a major version bump of +// the API package). +// +// [OpenTelemetry trace API]: https://pkg.go.dev/go.opentelemetry.io/otel/trace +package embedded // import "go.opentelemetry.io/otel/trace/embedded" + +// TracerProvider is embedded in +// [go.opentelemetry.io/otel/trace.TracerProvider]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/trace.TracerProvider] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/trace.TracerProvider] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type TracerProvider interface{ tracerProvider() } + +// Tracer is embedded in [go.opentelemetry.io/otel/trace.Tracer]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/trace.Tracer] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/trace.Tracer] interface +// is extended (which is something that can happen without a major version bump +// of the API package). +type Tracer interface{ tracer() } + +// Span is embedded in [go.opentelemetry.io/otel/trace.Span]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/trace.Span] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/trace.Span] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Span interface{ span() } diff --git a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go new file mode 100644 index 000000000..88fcb8161 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go @@ -0,0 +1,27 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +// nonRecordingSpan is a minimal implementation of a Span that wraps a +// SpanContext. It performs no operations other than to return the wrapped +// SpanContext. +type nonRecordingSpan struct { + noopSpan + + sc SpanContext +} + +// SpanContext returns the wrapped SpanContext. +func (s nonRecordingSpan) SpanContext() SpanContext { return s.sc } diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go new file mode 100644 index 000000000..c125491ca --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/noop.go @@ -0,0 +1,93 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace/embedded" +) + +// NewNoopTracerProvider returns an implementation of TracerProvider that +// performs no operations. The Tracer and Spans created from the returned +// TracerProvider also perform no operations. +// +// Deprecated: Use [go.opentelemetry.io/otel/trace/noop.NewTracerProvider] +// instead. +func NewNoopTracerProvider() TracerProvider { + return noopTracerProvider{} +} + +type noopTracerProvider struct{ embedded.TracerProvider } + +var _ TracerProvider = noopTracerProvider{} + +// Tracer returns noop implementation of Tracer. +func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer { + return noopTracer{} +} + +// noopTracer is an implementation of Tracer that performs no operations. +type noopTracer struct{ embedded.Tracer } + +var _ Tracer = noopTracer{} + +// Start carries forward a non-recording Span, if one is present in the context, otherwise it +// creates a no-op Span. +func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption) (context.Context, Span) { + span := SpanFromContext(ctx) + if _, ok := span.(nonRecordingSpan); !ok { + // span is likely already a noopSpan, but let's be sure + span = noopSpan{} + } + return ContextWithSpan(ctx, span), span +} + +// noopSpan is an implementation of Span that performs no operations. +type noopSpan struct{ embedded.Span } + +var _ Span = noopSpan{} + +// SpanContext returns an empty span context. +func (noopSpan) SpanContext() SpanContext { return SpanContext{} } + +// IsRecording always returns false. +func (noopSpan) IsRecording() bool { return false } + +// SetStatus does nothing. +func (noopSpan) SetStatus(codes.Code, string) {} + +// SetError does nothing. +func (noopSpan) SetError(bool) {} + +// SetAttributes does nothing. +func (noopSpan) SetAttributes(...attribute.KeyValue) {} + +// End does nothing. +func (noopSpan) End(...SpanEndOption) {} + +// RecordError does nothing. +func (noopSpan) RecordError(error, ...EventOption) {} + +// AddEvent does nothing. +func (noopSpan) AddEvent(string, ...EventOption) {} + +// SetName does nothing. +func (noopSpan) SetName(string) {} + +// TracerProvider returns a no-op TracerProvider. +func (noopSpan) TracerProvider() TracerProvider { return noopTracerProvider{} } diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go new file mode 100644 index 000000000..26a4b2260 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -0,0 +1,577 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "bytes" + "context" + "encoding/hex" + "encoding/json" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace/embedded" +) + +const ( + // FlagsSampled is a bitmask with the sampled bit set. A SpanContext + // with the sampling bit set means the span is sampled. + FlagsSampled = TraceFlags(0x01) + + errInvalidHexID errorConst = "trace-id and span-id can only contain [0-9a-f] characters, all lowercase" + + errInvalidTraceIDLength errorConst = "hex encoded trace-id must have length equals to 32" + errNilTraceID errorConst = "trace-id can't be all zero" + + errInvalidSpanIDLength errorConst = "hex encoded span-id must have length equals to 16" + errNilSpanID errorConst = "span-id can't be all zero" +) + +type errorConst string + +func (e errorConst) Error() string { + return string(e) +} + +// TraceID is a unique identity of a trace. +// nolint:revive // revive complains about stutter of `trace.TraceID`. +type TraceID [16]byte + +var ( + nilTraceID TraceID + _ json.Marshaler = nilTraceID +) + +// IsValid checks whether the trace TraceID is valid. A valid trace ID does +// not consist of zeros only. +func (t TraceID) IsValid() bool { + return !bytes.Equal(t[:], nilTraceID[:]) +} + +// MarshalJSON implements a custom marshal function to encode TraceID +// as a hex string. +func (t TraceID) MarshalJSON() ([]byte, error) { + return json.Marshal(t.String()) +} + +// String returns the hex string representation form of a TraceID. +func (t TraceID) String() string { + return hex.EncodeToString(t[:]) +} + +// SpanID is a unique identity of a span in a trace. +type SpanID [8]byte + +var ( + nilSpanID SpanID + _ json.Marshaler = nilSpanID +) + +// IsValid checks whether the SpanID is valid. A valid SpanID does not consist +// of zeros only. +func (s SpanID) IsValid() bool { + return !bytes.Equal(s[:], nilSpanID[:]) +} + +// MarshalJSON implements a custom marshal function to encode SpanID +// as a hex string. +func (s SpanID) MarshalJSON() ([]byte, error) { + return json.Marshal(s.String()) +} + +// String returns the hex string representation form of a SpanID. +func (s SpanID) String() string { + return hex.EncodeToString(s[:]) +} + +// TraceIDFromHex returns a TraceID from a hex string if it is compliant with +// the W3C trace-context specification. See more at +// https://www.w3.org/TR/trace-context/#trace-id +// nolint:revive // revive complains about stutter of `trace.TraceIDFromHex`. +func TraceIDFromHex(h string) (TraceID, error) { + t := TraceID{} + if len(h) != 32 { + return t, errInvalidTraceIDLength + } + + if err := decodeHex(h, t[:]); err != nil { + return t, err + } + + if !t.IsValid() { + return t, errNilTraceID + } + return t, nil +} + +// SpanIDFromHex returns a SpanID from a hex string if it is compliant +// with the w3c trace-context specification. +// See more at https://www.w3.org/TR/trace-context/#parent-id +func SpanIDFromHex(h string) (SpanID, error) { + s := SpanID{} + if len(h) != 16 { + return s, errInvalidSpanIDLength + } + + if err := decodeHex(h, s[:]); err != nil { + return s, err + } + + if !s.IsValid() { + return s, errNilSpanID + } + return s, nil +} + +func decodeHex(h string, b []byte) error { + for _, r := range h { + switch { + case 'a' <= r && r <= 'f': + continue + case '0' <= r && r <= '9': + continue + default: + return errInvalidHexID + } + } + + decoded, err := hex.DecodeString(h) + if err != nil { + return err + } + + copy(b, decoded) + return nil +} + +// TraceFlags contains flags that can be set on a SpanContext. +type TraceFlags byte //nolint:revive // revive complains about stutter of `trace.TraceFlags`. + +// IsSampled returns if the sampling bit is set in the TraceFlags. +func (tf TraceFlags) IsSampled() bool { + return tf&FlagsSampled == FlagsSampled +} + +// WithSampled sets the sampling bit in a new copy of the TraceFlags. +func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { // nolint:revive // sampled is not a control flag. + if sampled { + return tf | FlagsSampled + } + + return tf &^ FlagsSampled +} + +// MarshalJSON implements a custom marshal function to encode TraceFlags +// as a hex string. +func (tf TraceFlags) MarshalJSON() ([]byte, error) { + return json.Marshal(tf.String()) +} + +// String returns the hex string representation form of TraceFlags. +func (tf TraceFlags) String() string { + return hex.EncodeToString([]byte{byte(tf)}[:]) +} + +// SpanContextConfig contains mutable fields usable for constructing +// an immutable SpanContext. +type SpanContextConfig struct { + TraceID TraceID + SpanID SpanID + TraceFlags TraceFlags + TraceState TraceState + Remote bool +} + +// NewSpanContext constructs a SpanContext using values from the provided +// SpanContextConfig. +func NewSpanContext(config SpanContextConfig) SpanContext { + return SpanContext{ + traceID: config.TraceID, + spanID: config.SpanID, + traceFlags: config.TraceFlags, + traceState: config.TraceState, + remote: config.Remote, + } +} + +// SpanContext contains identifying trace information about a Span. +type SpanContext struct { + traceID TraceID + spanID SpanID + traceFlags TraceFlags + traceState TraceState + remote bool +} + +var _ json.Marshaler = SpanContext{} + +// IsValid returns if the SpanContext is valid. A valid span context has a +// valid TraceID and SpanID. +func (sc SpanContext) IsValid() bool { + return sc.HasTraceID() && sc.HasSpanID() +} + +// IsRemote indicates whether the SpanContext represents a remotely-created Span. +func (sc SpanContext) IsRemote() bool { + return sc.remote +} + +// WithRemote returns a copy of sc with the Remote property set to remote. +func (sc SpanContext) WithRemote(remote bool) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: sc.spanID, + traceFlags: sc.traceFlags, + traceState: sc.traceState, + remote: remote, + } +} + +// TraceID returns the TraceID from the SpanContext. +func (sc SpanContext) TraceID() TraceID { + return sc.traceID +} + +// HasTraceID checks if the SpanContext has a valid TraceID. +func (sc SpanContext) HasTraceID() bool { + return sc.traceID.IsValid() +} + +// WithTraceID returns a new SpanContext with the TraceID replaced. +func (sc SpanContext) WithTraceID(traceID TraceID) SpanContext { + return SpanContext{ + traceID: traceID, + spanID: sc.spanID, + traceFlags: sc.traceFlags, + traceState: sc.traceState, + remote: sc.remote, + } +} + +// SpanID returns the SpanID from the SpanContext. +func (sc SpanContext) SpanID() SpanID { + return sc.spanID +} + +// HasSpanID checks if the SpanContext has a valid SpanID. +func (sc SpanContext) HasSpanID() bool { + return sc.spanID.IsValid() +} + +// WithSpanID returns a new SpanContext with the SpanID replaced. +func (sc SpanContext) WithSpanID(spanID SpanID) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: spanID, + traceFlags: sc.traceFlags, + traceState: sc.traceState, + remote: sc.remote, + } +} + +// TraceFlags returns the flags from the SpanContext. +func (sc SpanContext) TraceFlags() TraceFlags { + return sc.traceFlags +} + +// IsSampled returns if the sampling bit is set in the SpanContext's TraceFlags. +func (sc SpanContext) IsSampled() bool { + return sc.traceFlags.IsSampled() +} + +// WithTraceFlags returns a new SpanContext with the TraceFlags replaced. +func (sc SpanContext) WithTraceFlags(flags TraceFlags) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: sc.spanID, + traceFlags: flags, + traceState: sc.traceState, + remote: sc.remote, + } +} + +// TraceState returns the TraceState from the SpanContext. +func (sc SpanContext) TraceState() TraceState { + return sc.traceState +} + +// WithTraceState returns a new SpanContext with the TraceState replaced. +func (sc SpanContext) WithTraceState(state TraceState) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: sc.spanID, + traceFlags: sc.traceFlags, + traceState: state, + remote: sc.remote, + } +} + +// Equal is a predicate that determines whether two SpanContext values are equal. +func (sc SpanContext) Equal(other SpanContext) bool { + return sc.traceID == other.traceID && + sc.spanID == other.spanID && + sc.traceFlags == other.traceFlags && + sc.traceState.String() == other.traceState.String() && + sc.remote == other.remote +} + +// MarshalJSON implements a custom marshal function to encode a SpanContext. +func (sc SpanContext) MarshalJSON() ([]byte, error) { + return json.Marshal(SpanContextConfig{ + TraceID: sc.traceID, + SpanID: sc.spanID, + TraceFlags: sc.traceFlags, + TraceState: sc.traceState, + Remote: sc.remote, + }) +} + +// Span is the individual component of a trace. It represents a single named +// and timed operation of a workflow that is traced. A Tracer is used to +// create a Span and it is then up to the operation the Span represents to +// properly end the Span when the operation itself ends. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Span interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Span + + // End completes the Span. The Span is considered complete and ready to be + // delivered through the rest of the telemetry pipeline after this method + // is called. Therefore, updates to the Span are not allowed after this + // method has been called. + End(options ...SpanEndOption) + + // AddEvent adds an event with the provided name and options. + AddEvent(name string, options ...EventOption) + + // IsRecording returns the recording state of the Span. It will return + // true if the Span is active and events can be recorded. + IsRecording() bool + + // RecordError will record err as an exception span event for this span. An + // additional call to SetStatus is required if the Status of the Span should + // be set to Error, as this method does not change the Span status. If this + // span is not being recorded or err is nil then this method does nothing. + RecordError(err error, options ...EventOption) + + // SpanContext returns the SpanContext of the Span. The returned SpanContext + // is usable even after the End method has been called for the Span. + SpanContext() SpanContext + + // SetStatus sets the status of the Span in the form of a code and a + // description, provided the status hasn't already been set to a higher + // value before (OK > Error > Unset). The description is only included in a + // status when the code is for an error. + SetStatus(code codes.Code, description string) + + // SetName sets the Span name. + SetName(name string) + + // SetAttributes sets kv as attributes of the Span. If a key from kv + // already exists for an attribute of the Span it will be overwritten with + // the value contained in kv. + SetAttributes(kv ...attribute.KeyValue) + + // TracerProvider returns a TracerProvider that can be used to generate + // additional Spans on the same telemetry pipeline as the current Span. + TracerProvider() TracerProvider +} + +// Link is the relationship between two Spans. The relationship can be within +// the same Trace or across different Traces. +// +// For example, a Link is used in the following situations: +// +// 1. Batch Processing: A batch of operations may contain operations +// associated with one or more traces/spans. Since there can only be one +// parent SpanContext, a Link is used to keep reference to the +// SpanContext of all operations in the batch. +// 2. Public Endpoint: A SpanContext for an in incoming client request on a +// public endpoint should be considered untrusted. In such a case, a new +// trace with its own identity and sampling decision needs to be created, +// but this new trace needs to be related to the original trace in some +// form. A Link is used to keep reference to the original SpanContext and +// track the relationship. +type Link struct { + // SpanContext of the linked Span. + SpanContext SpanContext + + // Attributes describe the aspects of the link. + Attributes []attribute.KeyValue +} + +// LinkFromContext returns a link encapsulating the SpanContext in the provided ctx. +func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { + return Link{ + SpanContext: SpanContextFromContext(ctx), + Attributes: attrs, + } +} + +// SpanKind is the role a Span plays in a Trace. +type SpanKind int + +// As a convenience, these match the proto definition, see +// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 +// +// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` +// to coerce a span kind to a valid value. +const ( + // SpanKindUnspecified is an unspecified SpanKind and is not a valid + // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal + // if it is received. + SpanKindUnspecified SpanKind = 0 + // SpanKindInternal is a SpanKind for a Span that represents an internal + // operation within an application. + SpanKindInternal SpanKind = 1 + // SpanKindServer is a SpanKind for a Span that represents the operation + // of handling a request from a client. + SpanKindServer SpanKind = 2 + // SpanKindClient is a SpanKind for a Span that represents the operation + // of client making a request to a server. + SpanKindClient SpanKind = 3 + // SpanKindProducer is a SpanKind for a Span that represents the operation + // of a producer sending a message to a message broker. Unlike + // SpanKindClient and SpanKindServer, there is often no direct + // relationship between this kind of Span and a SpanKindConsumer kind. A + // SpanKindProducer Span will end once the message is accepted by the + // message broker which might not overlap with the processing of that + // message. + SpanKindProducer SpanKind = 4 + // SpanKindConsumer is a SpanKind for a Span that represents the operation + // of a consumer receiving a message from a message broker. Like + // SpanKindProducer Spans, there is often no direct relationship between + // this Span and the Span that produced the message. + SpanKindConsumer SpanKind = 5 +) + +// ValidateSpanKind returns a valid span kind value. This will coerce +// invalid values into the default value, SpanKindInternal. +func ValidateSpanKind(spanKind SpanKind) SpanKind { + switch spanKind { + case SpanKindInternal, + SpanKindServer, + SpanKindClient, + SpanKindProducer, + SpanKindConsumer: + // valid + return spanKind + default: + return SpanKindInternal + } +} + +// String returns the specified name of the SpanKind in lower-case. +func (sk SpanKind) String() string { + switch sk { + case SpanKindInternal: + return "internal" + case SpanKindServer: + return "server" + case SpanKindClient: + return "client" + case SpanKindProducer: + return "producer" + case SpanKindConsumer: + return "consumer" + default: + return "unspecified" + } +} + +// Tracer is the creator of Spans. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Tracer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Tracer + + // Start creates a span and a context.Context containing the newly-created span. + // + // If the context.Context provided in `ctx` contains a Span then the newly-created + // Span will be a child of that span, otherwise it will be a root span. This behavior + // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the + // newly-created Span to be a root span even if `ctx` contains a Span. + // + // When creating a Span it is recommended to provide all known span attributes using + // the `WithAttributes()` SpanOption as samplers will only have access to the + // attributes provided when a Span is created. + // + // Any Span that is created MUST also be ended. This is the responsibility of the user. + // Implementations of this API may leak memory or other resources if Spans are not ended. + Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) +} + +// TracerProvider provides Tracers that are used by instrumentation code to +// trace computational workflows. +// +// A TracerProvider is the collection destination of all Spans from Tracers it +// provides, it represents a unique telemetry collection pipeline. How that +// pipeline is defined, meaning how those Spans are collected, processed, and +// where they are exported, depends on its implementation. Instrumentation +// authors do not need to define this implementation, rather just use the +// provided Tracers to instrument code. +// +// Commonly, instrumentation code will accept a TracerProvider implementation +// at runtime from its users or it can simply use the globally registered one +// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type TracerProvider interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.TracerProvider + + // Tracer returns a unique Tracer scoped to be used by instrumentation code + // to trace computational workflows. The scope and identity of that + // instrumentation code is uniquely defined by the name and options passed. + // + // The passed name needs to uniquely identify instrumentation code. + // Therefore, it is recommended that name is the Go package name of the + // library providing instrumentation (note: not the code being + // instrumented). Instrumentation libraries can have multiple versions, + // therefore, the WithInstrumentationVersion option should be used to + // distinguish these different codebases. Additionally, instrumentation + // libraries may sometimes use traces to communicate different domains of + // workflow data (i.e. using spans to communicate workflow events only). If + // this is the case, the WithScopeAttributes option should be used to + // uniquely identify Tracers that handle the different domains of workflow + // data. + // + // If the same name and options are passed multiple times, the same Tracer + // will be returned (it is up to the implementation if this will be the + // same underlying instance of that Tracer or not). It is not necessary to + // call this multiple times with the same name and options to get an + // up-to-date Tracer. All implementations will ensure any TracerProvider + // configuration changes are propagated to all provided Tracers. + // + // If name is empty, then an implementation defined default name will be + // used instead. + // + // This method is safe to call concurrently. + Tracer(name string, options ...TracerOption) Tracer +} diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go new file mode 100644 index 000000000..db936ba5b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -0,0 +1,331 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "encoding/json" + "fmt" + "strings" +) + +const ( + maxListMembers = 32 + + listDelimiters = "," + memberDelimiter = "=" + + errInvalidKey errorConst = "invalid tracestate key" + errInvalidValue errorConst = "invalid tracestate value" + errInvalidMember errorConst = "invalid tracestate list-member" + errMemberNumber errorConst = "too many list-members in tracestate" + errDuplicate errorConst = "duplicate list-member in tracestate" +) + +type member struct { + Key string + Value string +} + +// according to (chr = %x20 / (nblk-char = %x21-2B / %x2D-3C / %x3E-7E) ) +// means (chr = %x20-2B / %x2D-3C / %x3E-7E) . +func checkValueChar(v byte) bool { + return v >= '\x20' && v <= '\x7e' && v != '\x2c' && v != '\x3d' +} + +// according to (nblk-chr = %x21-2B / %x2D-3C / %x3E-7E) . +func checkValueLast(v byte) bool { + return v >= '\x21' && v <= '\x7e' && v != '\x2c' && v != '\x3d' +} + +// based on the W3C Trace Context specification +// +// value = (0*255(chr)) nblk-chr +// nblk-chr = %x21-2B / %x2D-3C / %x3E-7E +// chr = %x20 / nblk-chr +// +// see https://www.w3.org/TR/trace-context-1/#value +func checkValue(val string) bool { + n := len(val) + if n == 0 || n > 256 { + return false + } + for i := 0; i < n-1; i++ { + if !checkValueChar(val[i]) { + return false + } + } + return checkValueLast(val[n-1]) +} + +func checkKeyRemain(key string) bool { + // ( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ) + for _, v := range key { + if isAlphaNum(byte(v)) { + continue + } + switch v { + case '_', '-', '*', '/': + continue + } + return false + } + return true +} + +// according to +// +// simple-key = lcalpha (0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// system-id = lcalpha (0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// +// param n is remain part length, should be 255 in simple-key or 13 in system-id. +func checkKeyPart(key string, n int) bool { + if len(key) == 0 { + return false + } + first := key[0] // key's first char + ret := len(key[1:]) <= n + ret = ret && first >= 'a' && first <= 'z' + return ret && checkKeyRemain(key[1:]) +} + +func isAlphaNum(c byte) bool { + if c >= 'a' && c <= 'z' { + return true + } + return c >= '0' && c <= '9' +} + +// according to +// +// tenant-id = ( lcalpha / DIGIT ) 0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ) +// +// param n is remain part length, should be 240 exactly. +func checkKeyTenant(key string, n int) bool { + if len(key) == 0 { + return false + } + return isAlphaNum(key[0]) && len(key[1:]) <= n && checkKeyRemain(key[1:]) +} + +// based on the W3C Trace Context specification +// +// key = simple-key / multi-tenant-key +// simple-key = lcalpha (0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// multi-tenant-key = tenant-id "@" system-id +// tenant-id = ( lcalpha / DIGIT ) (0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// system-id = lcalpha (0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// lcalpha = %x61-7A ; a-z +// +// see https://www.w3.org/TR/trace-context-1/#tracestate-header. +func checkKey(key string) bool { + tenant, system, ok := strings.Cut(key, "@") + if !ok { + return checkKeyPart(key, 255) + } + return checkKeyTenant(tenant, 240) && checkKeyPart(system, 13) +} + +func newMember(key, value string) (member, error) { + if !checkKey(key) { + return member{}, errInvalidKey + } + if !checkValue(value) { + return member{}, errInvalidValue + } + return member{Key: key, Value: value}, nil +} + +func parseMember(m string) (member, error) { + key, val, ok := strings.Cut(m, memberDelimiter) + if !ok { + return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) + } + key = strings.TrimLeft(key, " \t") + val = strings.TrimRight(val, " \t") + result, e := newMember(key, val) + if e != nil { + return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) + } + return result, nil +} + +// String encodes member into a string compliant with the W3C Trace Context +// specification. +func (m member) String() string { + return m.Key + "=" + m.Value +} + +// TraceState provides additional vendor-specific trace identification +// information across different distributed tracing systems. It represents an +// immutable list consisting of key/value pairs, each pair is referred to as a +// list-member. +// +// TraceState conforms to the W3C Trace Context specification +// (https://www.w3.org/TR/trace-context-1). All operations that create or copy +// a TraceState do so by validating all input and will only produce TraceState +// that conform to the specification. Specifically, this means that all +// list-member's key/value pairs are valid, no duplicate list-members exist, +// and the maximum number of list-members (32) is not exceeded. +type TraceState struct { //nolint:revive // revive complains about stutter of `trace.TraceState` + // list is the members in order. + list []member +} + +var _ json.Marshaler = TraceState{} + +// ParseTraceState attempts to decode a TraceState from the passed +// string. It returns an error if the input is invalid according to the W3C +// Trace Context specification. +func ParseTraceState(ts string) (TraceState, error) { + if ts == "" { + return TraceState{}, nil + } + + wrapErr := func(err error) error { + return fmt.Errorf("failed to parse tracestate: %w", err) + } + + var members []member + found := make(map[string]struct{}) + for ts != "" { + var memberStr string + memberStr, ts, _ = strings.Cut(ts, listDelimiters) + if len(memberStr) == 0 { + continue + } + + m, err := parseMember(memberStr) + if err != nil { + return TraceState{}, wrapErr(err) + } + + if _, ok := found[m.Key]; ok { + return TraceState{}, wrapErr(errDuplicate) + } + found[m.Key] = struct{}{} + + members = append(members, m) + if n := len(members); n > maxListMembers { + return TraceState{}, wrapErr(errMemberNumber) + } + } + + return TraceState{list: members}, nil +} + +// MarshalJSON marshals the TraceState into JSON. +func (ts TraceState) MarshalJSON() ([]byte, error) { + return json.Marshal(ts.String()) +} + +// String encodes the TraceState into a string compliant with the W3C +// Trace Context specification. The returned string will be invalid if the +// TraceState contains any invalid members. +func (ts TraceState) String() string { + if len(ts.list) == 0 { + return "" + } + var n int + n += len(ts.list) // member delimiters: '=' + n += len(ts.list) - 1 // list delimiters: ',' + for _, mem := range ts.list { + n += len(mem.Key) + n += len(mem.Value) + } + + var sb strings.Builder + sb.Grow(n) + _, _ = sb.WriteString(ts.list[0].Key) + _ = sb.WriteByte('=') + _, _ = sb.WriteString(ts.list[0].Value) + for i := 1; i < len(ts.list); i++ { + _ = sb.WriteByte(listDelimiters[0]) + _, _ = sb.WriteString(ts.list[i].Key) + _ = sb.WriteByte('=') + _, _ = sb.WriteString(ts.list[i].Value) + } + return sb.String() +} + +// Get returns the value paired with key from the corresponding TraceState +// list-member if it exists, otherwise an empty string is returned. +func (ts TraceState) Get(key string) string { + for _, member := range ts.list { + if member.Key == key { + return member.Value + } + } + + return "" +} + +// Insert adds a new list-member defined by the key/value pair to the +// TraceState. If a list-member already exists for the given key, that +// list-member's value is updated. The new or updated list-member is always +// moved to the beginning of the TraceState as specified by the W3C Trace +// Context specification. +// +// If key or value are invalid according to the W3C Trace Context +// specification an error is returned with the original TraceState. +// +// If adding a new list-member means the TraceState would have more members +// then is allowed, the new list-member will be inserted and the right-most +// list-member will be dropped in the returned TraceState. +func (ts TraceState) Insert(key, value string) (TraceState, error) { + m, err := newMember(key, value) + if err != nil { + return ts, err + } + n := len(ts.list) + found := n + for i := range ts.list { + if ts.list[i].Key == key { + found = i + } + } + cTS := TraceState{} + if found == n && n < maxListMembers { + cTS.list = make([]member, n+1) + } else { + cTS.list = make([]member, n) + } + cTS.list[0] = m + // When the number of members exceeds capacity, drop the "right-most". + copy(cTS.list[1:], ts.list[0:found]) + if found < n { + copy(cTS.list[1+found:], ts.list[found+1:]) + } + return cTS, nil +} + +// Delete returns a copy of the TraceState with the list-member identified by +// key removed. +func (ts TraceState) Delete(key string) TraceState { + members := make([]member, ts.Len()) + copy(members, ts.list) + for i, member := range ts.list { + if member.Key == key { + members = append(members[:i], members[i+1:]...) + // TraceState should contain no duplicate members. + break + } + } + return TraceState{list: members} +} + +// Len returns the number of list-members in the TraceState. +func (ts TraceState) Len() int { + return len(ts.list) +} diff --git a/vendor/go.opentelemetry.io/otel/verify_examples.sh b/vendor/go.opentelemetry.io/otel/verify_examples.sh new file mode 100644 index 000000000..dbb61a422 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/verify_examples.sh @@ -0,0 +1,85 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -euo pipefail + +cd $(dirname $0) +TOOLS_DIR=$(pwd)/.tools + +if [ -z "${GOPATH}" ] ; then + printf "GOPATH is not defined.\n" + exit -1 +fi + +if [ ! -d "${GOPATH}" ] ; then + printf "GOPATH ${GOPATH} is invalid \n" + exit -1 +fi + +# Pre-requisites +if ! git diff --quiet; then \ + git status + printf "\n\nError: working tree is not clean\n" + exit -1 +fi + +if [ "$(git tag --contains $(git log -1 --pretty=format:"%H"))" = "" ] ; then + printf "$(git log -1)" + printf "\n\nError: HEAD is not pointing to a tagged version" +fi + +make ${TOOLS_DIR}/gojq + +DIR_TMP="${GOPATH}/src/oteltmp/" +rm -rf $DIR_TMP +mkdir -p $DIR_TMP + +printf "Copy examples to ${DIR_TMP}\n" +cp -a ./example ${DIR_TMP} + +# Update go.mod files +printf "Update go.mod: rename module and remove replace\n" + +PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | egrep 'example' | sed 's/^\.\///' | sort) + +for dir in $PACKAGE_DIRS; do + printf " Update go.mod for $dir\n" + (cd "${DIR_TMP}/${dir}" && \ + # replaces is ("mod1" "mod2" …) + replaces=($(go mod edit -json | ${TOOLS_DIR}/gojq '.Replace[].Old.Path')) && \ + # strip double quotes + replaces=("${replaces[@]%\"}") && \ + replaces=("${replaces[@]#\"}") && \ + # make an array (-dropreplace=mod1 -dropreplace=mod2 …) + dropreplaces=("${replaces[@]/#/-dropreplace=}") && \ + go mod edit -module "oteltmp/${dir}" "${dropreplaces[@]}" && \ + go mod tidy) +done +printf "Update done:\n\n" + +# Build directories that contain main package. These directories are different than +# directories that contain go.mod files. +printf "Build examples:\n" +EXAMPLES=$(./get_main_pkgs.sh ./example) +for ex in $EXAMPLES; do + printf " Build $ex in ${DIR_TMP}/${ex}\n" + (cd "${DIR_TMP}/${ex}" && \ + go build .) +done + +# Cleanup +printf "Remove copied files.\n" +rm -rf $DIR_TMP diff --git a/vendor/github.com/go-openapi/swag/pre_go18.go b/vendor/go.opentelemetry.io/otel/version.go similarity index 67% rename from vendor/github.com/go-openapi/swag/pre_go18.go rename to vendor/go.opentelemetry.io/otel/version.go index 2757d9b95..c7aba1c3f 100644 --- a/vendor/github.com/go-openapi/swag/pre_go18.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -1,10 +1,10 @@ -// Copyright 2015 go-swagger maintainers +// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -12,13 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build !go1.8 -// +build !go1.8 +package otel // import "go.opentelemetry.io/otel" -package swag - -import "net/url" - -func pathUnescape(path string) (string, error) { - return url.QueryUnescape(path) +// Version is the current release version of OpenTelemetry in use. +func Version() string { + return "1.22.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml new file mode 100644 index 000000000..a9cfb80ae --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -0,0 +1,52 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +module-sets: + stable-v1: + version: v1.22.0 + modules: + - go.opentelemetry.io/otel + - go.opentelemetry.io/otel/bridge/opentracing + - go.opentelemetry.io/otel/bridge/opentracing/test + - go.opentelemetry.io/otel/example/dice + - go.opentelemetry.io/otel/example/namedtracer + - go.opentelemetry.io/otel/example/otel-collector + - go.opentelemetry.io/otel/example/passthrough + - go.opentelemetry.io/otel/example/zipkin + - go.opentelemetry.io/otel/exporters/otlp/otlptrace + - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc + - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp + - go.opentelemetry.io/otel/exporters/stdout/stdouttrace + - go.opentelemetry.io/otel/exporters/zipkin + - go.opentelemetry.io/otel/metric + - go.opentelemetry.io/otel/sdk + - go.opentelemetry.io/otel/sdk/metric + - go.opentelemetry.io/otel/trace + experimental-metrics: + version: v0.45.0 + modules: + - go.opentelemetry.io/otel/bridge/opencensus + - go.opentelemetry.io/otel/bridge/opencensus/test + - go.opentelemetry.io/otel/example/opencensus + - go.opentelemetry.io/otel/example/prometheus + - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc + - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp + - go.opentelemetry.io/otel/exporters/prometheus + - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric + experimental-schema: + version: v0.0.7 + modules: + - go.opentelemetry.io/otel/schema +excluded-modules: + - go.opentelemetry.io/otel/internal/tools diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s index d2ca5deeb..b3c1699bf 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s @@ -19,15 +19,14 @@ #define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3, t4, t5) \ MULLD r0, h0, t0; \ - MULLD r0, h1, t4; \ MULHDU r0, h0, t1; \ + MULLD r0, h1, t4; \ MULHDU r0, h1, t5; \ ADDC t4, t1, t1; \ MULLD r0, h2, t2; \ - ADDZE t5; \ MULHDU r1, h0, t4; \ MULLD r1, h0, h0; \ - ADD t5, t2, t2; \ + ADDE t5, t2, t2; \ ADDC h0, t1, t1; \ MULLD h2, r1, t3; \ ADDZE t4, h0; \ @@ -37,13 +36,11 @@ ADDE t5, t3, t3; \ ADDC h0, t2, t2; \ MOVD $-4, t4; \ - MOVD t0, h0; \ - MOVD t1, h1; \ ADDZE t3; \ - ANDCC $3, t2, h2; \ - AND t2, t4, t0; \ + RLDICL $0, t2, $62, h2; \ + AND t2, t4, h0; \ ADDC t0, h0, h0; \ - ADDE t3, h1, h1; \ + ADDE t3, t1, h1; \ SLD $62, t3, t4; \ SRD $2, t2; \ ADDZE h2; \ @@ -75,6 +72,7 @@ TEXT ·update(SB), $0-32 loop: POLY1305_ADD(R4, R8, R9, R10, R20, R21, R22) + PCALIGN $16 multiply: POLY1305_MUL(R8, R9, R10, R11, R12, R16, R17, R18, R14, R20, R21) ADD $-16, R5 diff --git a/vendor/golang.org/x/crypto/ocsp/ocsp.go b/vendor/golang.org/x/crypto/ocsp/ocsp.go index 4269ed113..bf2259537 100644 --- a/vendor/golang.org/x/crypto/ocsp/ocsp.go +++ b/vendor/golang.org/x/crypto/ocsp/ocsp.go @@ -279,21 +279,22 @@ func getOIDFromHashAlgorithm(target crypto.Hash) asn1.ObjectIdentifier { // This is the exposed reflection of the internal OCSP structures. -// The status values that can be expressed in OCSP. See RFC 6960. +// The status values that can be expressed in OCSP. See RFC 6960. +// These are used for the Response.Status field. const ( // Good means that the certificate is valid. - Good = iota + Good = 0 // Revoked means that the certificate has been deliberately revoked. - Revoked + Revoked = 1 // Unknown means that the OCSP responder doesn't know about the certificate. - Unknown + Unknown = 2 // ServerFailed is unused and was never used (see // https://go-review.googlesource.com/#/c/18944). ParseResponse will // return a ResponseError when an error response is parsed. - ServerFailed + ServerFailed = 3 ) -// The enumerated reasons for revoking a certificate. See RFC 5280. +// The enumerated reasons for revoking a certificate. See RFC 5280. const ( Unspecified = 0 KeyCompromise = 1 diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go index 5e8158bba..46ceac343 100644 --- a/vendor/golang.org/x/exp/slices/slices.go +++ b/vendor/golang.org/x/exp/slices/slices.go @@ -209,25 +209,37 @@ func Insert[S ~[]E, E any](s S, i int, v ...E) S { return s } +// clearSlice sets all elements up to the length of s to the zero value of E. +// We may use the builtin clear func instead, and remove clearSlice, when upgrading +// to Go 1.21+. +func clearSlice[S ~[]E, E any](s S) { + var zero E + for i := range s { + s[i] = zero + } +} + // Delete removes the elements s[i:j] from s, returning the modified slice. -// Delete panics if s[i:j] is not a valid slice of s. -// Delete is O(len(s)-j), so if many items must be deleted, it is better to +// Delete panics if j > len(s) or s[i:j] is not a valid slice of s. +// Delete is O(len(s)-i), so if many items must be deleted, it is better to // make a single call deleting them all together than to delete one at a time. -// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those -// elements contain pointers you might consider zeroing those elements so that -// objects they reference can be garbage collected. +// Delete zeroes the elements s[len(s)-(j-i):len(s)]. func Delete[S ~[]E, E any](s S, i, j int) S { - _ = s[i:j] // bounds check + _ = s[i:j:len(s)] // bounds check - return append(s[:i], s[j:]...) + if i == j { + return s + } + + oldlen := len(s) + s = append(s[:i], s[j:]...) + clearSlice(s[len(s):oldlen]) // zero/nil out the obsolete elements, for GC + return s } // DeleteFunc removes any elements from s for which del returns true, // returning the modified slice. -// When DeleteFunc removes m elements, it might not modify the elements -// s[len(s)-m:len(s)]. If those elements contain pointers you might consider -// zeroing those elements so that objects they reference can be garbage -// collected. +// DeleteFunc zeroes the elements between the new length and the original length. func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { i := IndexFunc(s, del) if i == -1 { @@ -240,11 +252,13 @@ func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { i++ } } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC return s[:i] } // Replace replaces the elements s[i:j] by the given v, and returns the // modified slice. Replace panics if s[i:j] is not a valid slice of s. +// When len(v) < (j-i), Replace zeroes the elements between the new length and the original length. func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { _ = s[i:j] // verify that i:j is a valid subslice @@ -272,6 +286,7 @@ func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { if i+len(v) != j { copy(r[i+len(v):], s[j:]) } + clearSlice(s[tot:]) // zero/nil out the obsolete elements, for GC return r } @@ -345,9 +360,7 @@ func Clone[S ~[]E, E any](s S) S { // This is like the uniq command found on Unix. // Compact modifies the contents of the slice s and returns the modified slice, // which may have a smaller length. -// When Compact discards m elements in total, it might not modify the elements -// s[len(s)-m:len(s)]. If those elements contain pointers you might consider -// zeroing those elements so that objects they reference can be garbage collected. +// Compact zeroes the elements between the new length and the original length. func Compact[S ~[]E, E comparable](s S) S { if len(s) < 2 { return s @@ -361,11 +374,13 @@ func Compact[S ~[]E, E comparable](s S) S { i++ } } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC return s[:i] } // CompactFunc is like [Compact] but uses an equality function to compare elements. // For runs of elements that compare equal, CompactFunc keeps the first one. +// CompactFunc zeroes the elements between the new length and the original length. func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { if len(s) < 2 { return s @@ -379,6 +394,7 @@ func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { i++ } } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC return s[:i] } diff --git a/vendor/golang.org/x/net/html/token.go b/vendor/golang.org/x/net/html/token.go index de67f938a..3c57880d6 100644 --- a/vendor/golang.org/x/net/html/token.go +++ b/vendor/golang.org/x/net/html/token.go @@ -910,9 +910,6 @@ func (z *Tokenizer) readTagAttrKey() { return } switch c { - case ' ', '\n', '\r', '\t', '\f', '/': - z.pendingAttr[0].end = z.raw.end - 1 - return case '=': if z.pendingAttr[0].start+1 == z.raw.end { // WHATWG 13.2.5.32, if we see an equals sign before the attribute name @@ -920,7 +917,9 @@ func (z *Tokenizer) readTagAttrKey() { continue } fallthrough - case '>': + case ' ', '\n', '\r', '\t', '\f', '/', '>': + // WHATWG 13.2.5.33 Attribute name state + // We need to reconsume the char in the after attribute name state to support the / character z.raw.end-- z.pendingAttr[0].end = z.raw.end return @@ -939,6 +938,11 @@ func (z *Tokenizer) readTagAttrVal() { if z.err != nil { return } + if c == '/' { + // WHATWG 13.2.5.34 After attribute name state + // U+002F SOLIDUS (/) - Switch to the self-closing start tag state. + return + } if c != '=' { z.raw.end-- return diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index c1f6b90dc..e2b298d85 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -1510,13 +1510,12 @@ func (mh *MetaHeadersFrame) checkPseudos() error { } func (fr *Framer) maxHeaderStringLen() int { - v := fr.maxHeaderListSize() - if uint32(int(v)) == v { - return int(v) + v := int(fr.maxHeaderListSize()) + if v < 0 { + // If maxHeaderListSize overflows an int, use no limit (0). + return 0 } - // They had a crazy big number for MaxHeaderBytes anyway, - // so give them unlimited header lengths: - return 0 + return v } // readMetaFrame returns 0 or more CONTINUATION frames from fr and diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index df578b86c..c2a5b44b3 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -2911,6 +2911,15 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { fl = &cs.flow } if !fl.add(int32(f.Increment)) { + // For stream, the sender sends RST_STREAM with an error code of FLOW_CONTROL_ERROR + if cs != nil { + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeFlowControl, + }) + return nil + } + return ConnectionError(ErrCodeFlowControl) } cc.cond.Broadcast() diff --git a/vendor/golang.org/x/sys/unix/aliases.go b/vendor/golang.org/x/sys/unix/aliases.go index e7d3df4bd..b0e419857 100644 --- a/vendor/golang.org/x/sys/unix/aliases.go +++ b/vendor/golang.org/x/sys/unix/aliases.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) && go1.9 +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos package unix diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index c6492020e..fdcaa974d 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -584,7 +584,7 @@ ccflags="$@" $2 ~ /^KEY_(SPEC|REQKEY_DEFL)_/ || $2 ~ /^KEYCTL_/ || $2 ~ /^PERF_/ || - $2 ~ /^SECCOMP_MODE_/ || + $2 ~ /^SECCOMP_/ || $2 ~ /^SEEK_/ || $2 ~ /^SCHED_/ || $2 ~ /^SPLICE_/ || diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go index 16dc69937..2f0fa76e4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build darwin && go1.12 +//go:build darwin package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index 64d1bb4db..2b57e0f73 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -13,6 +13,7 @@ package unix import ( + "errors" "sync" "unsafe" ) @@ -169,25 +170,26 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { func Uname(uname *Utsname) error { mib := []_C_int{CTL_KERN, KERN_OSTYPE} n := unsafe.Sizeof(uname.Sysname) - if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil { + // Suppress ENOMEM errors to be compatible with the C library __xuname() implementation. + if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } mib = []_C_int{CTL_KERN, KERN_HOSTNAME} n = unsafe.Sizeof(uname.Nodename) - if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } mib = []_C_int{CTL_KERN, KERN_OSRELEASE} n = unsafe.Sizeof(uname.Release) - if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } mib = []_C_int{CTL_KERN, KERN_VERSION} n = unsafe.Sizeof(uname.Version) - if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } @@ -205,7 +207,7 @@ func Uname(uname *Utsname) error { mib = []_C_int{CTL_HW, HW_MACHINE} n = unsafe.Sizeof(uname.Machine) - if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 0f85e29e6..5682e2628 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1849,6 +1849,105 @@ func Dup2(oldfd, newfd int) error { //sys Fsmount(fd int, flags int, mountAttrs int) (fsfd int, err error) //sys Fsopen(fsName string, flags int) (fd int, err error) //sys Fspick(dirfd int, pathName string, flags int) (fd int, err error) + +//sys fsconfig(fd int, cmd uint, key *byte, value *byte, aux int) (err error) + +func fsconfigCommon(fd int, cmd uint, key string, value *byte, aux int) (err error) { + var keyp *byte + if keyp, err = BytePtrFromString(key); err != nil { + return + } + return fsconfig(fd, cmd, keyp, value, aux) +} + +// FsconfigSetFlag is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_FLAG. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +func FsconfigSetFlag(fd int, key string) (err error) { + return fsconfigCommon(fd, FSCONFIG_SET_FLAG, key, nil, 0) +} + +// FsconfigSetString is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_STRING. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// value is the parameter value to set. +func FsconfigSetString(fd int, key string, value string) (err error) { + var valuep *byte + if valuep, err = BytePtrFromString(value); err != nil { + return + } + return fsconfigCommon(fd, FSCONFIG_SET_STRING, key, valuep, 0) +} + +// FsconfigSetBinary is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_BINARY. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// value is the parameter value to set. +func FsconfigSetBinary(fd int, key string, value []byte) (err error) { + if len(value) == 0 { + return EINVAL + } + return fsconfigCommon(fd, FSCONFIG_SET_BINARY, key, &value[0], len(value)) +} + +// FsconfigSetPath is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_PATH. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// path is a non-empty path for specified key. +// atfd is a file descriptor at which to start lookup from or AT_FDCWD. +func FsconfigSetPath(fd int, key string, path string, atfd int) (err error) { + var valuep *byte + if valuep, err = BytePtrFromString(path); err != nil { + return + } + return fsconfigCommon(fd, FSCONFIG_SET_PATH, key, valuep, atfd) +} + +// FsconfigSetPathEmpty is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_PATH_EMPTY. The same as +// FconfigSetPath but with AT_PATH_EMPTY implied. +func FsconfigSetPathEmpty(fd int, key string, path string, atfd int) (err error) { + var valuep *byte + if valuep, err = BytePtrFromString(path); err != nil { + return + } + return fsconfigCommon(fd, FSCONFIG_SET_PATH_EMPTY, key, valuep, atfd) +} + +// FsconfigSetFd is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_FD. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// value is a file descriptor to be assigned to specified key. +func FsconfigSetFd(fd int, key string, value int) (err error) { + return fsconfigCommon(fd, FSCONFIG_SET_FD, key, nil, value) +} + +// FsconfigCreate is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_CMD_CREATE. +// +// fd is the filesystem context to act upon. +func FsconfigCreate(fd int) (err error) { + return fsconfig(fd, FSCONFIG_CMD_CREATE, nil, nil, 0) +} + +// FsconfigReconfigure is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_CMD_RECONFIGURE. +// +// fd is the filesystem context to act upon. +func FsconfigReconfigure(fd int) (err error) { + return fsconfig(fd, FSCONFIG_CMD_RECONFIGURE, nil, nil, 0) +} + //sys Getdents(fd int, buf []byte) (n int, err error) = SYS_GETDENTS64 //sysnb Getpgid(pid int) (pgid int, err error) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index a5d3ff8df..36bf8399f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1785,6 +1785,8 @@ const ( LANDLOCK_ACCESS_FS_REMOVE_FILE = 0x20 LANDLOCK_ACCESS_FS_TRUNCATE = 0x4000 LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2 + LANDLOCK_ACCESS_NET_BIND_TCP = 0x1 + LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2 LANDLOCK_CREATE_RULESET_VERSION = 0x1 LINUX_REBOOT_CMD_CAD_OFF = 0x0 LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef @@ -2465,6 +2467,7 @@ const ( PR_MCE_KILL_GET = 0x22 PR_MCE_KILL_LATE = 0x0 PR_MCE_KILL_SET = 0x1 + PR_MDWE_NO_INHERIT = 0x2 PR_MDWE_REFUSE_EXEC_GAIN = 0x1 PR_MPX_DISABLE_MANAGEMENT = 0x2c PR_MPX_ENABLE_MANAGEMENT = 0x2b @@ -2669,8 +2672,9 @@ const ( RTAX_FEATURES = 0xc RTAX_FEATURE_ALLFRAG = 0x8 RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf + RTAX_FEATURE_MASK = 0x1f RTAX_FEATURE_SACK = 0x2 + RTAX_FEATURE_TCP_USEC_TS = 0x10 RTAX_FEATURE_TIMESTAMP = 0x4 RTAX_HOPLIMIT = 0xa RTAX_INITCWND = 0xb @@ -2913,9 +2917,38 @@ const ( SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x1d SC_LOG_FLUSH = 0x100000 + SECCOMP_ADDFD_FLAG_SEND = 0x2 + SECCOMP_ADDFD_FLAG_SETFD = 0x1 + SECCOMP_FILTER_FLAG_LOG = 0x2 + SECCOMP_FILTER_FLAG_NEW_LISTENER = 0x8 + SECCOMP_FILTER_FLAG_SPEC_ALLOW = 0x4 + SECCOMP_FILTER_FLAG_TSYNC = 0x1 + SECCOMP_FILTER_FLAG_TSYNC_ESRCH = 0x10 + SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV = 0x20 + SECCOMP_GET_ACTION_AVAIL = 0x2 + SECCOMP_GET_NOTIF_SIZES = 0x3 + SECCOMP_IOCTL_NOTIF_RECV = 0xc0502100 + SECCOMP_IOCTL_NOTIF_SEND = 0xc0182101 + SECCOMP_IOC_MAGIC = '!' SECCOMP_MODE_DISABLED = 0x0 SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 + SECCOMP_RET_ACTION = 0x7fff0000 + SECCOMP_RET_ACTION_FULL = 0xffff0000 + SECCOMP_RET_ALLOW = 0x7fff0000 + SECCOMP_RET_DATA = 0xffff + SECCOMP_RET_ERRNO = 0x50000 + SECCOMP_RET_KILL = 0x0 + SECCOMP_RET_KILL_PROCESS = 0x80000000 + SECCOMP_RET_KILL_THREAD = 0x0 + SECCOMP_RET_LOG = 0x7ffc0000 + SECCOMP_RET_TRACE = 0x7ff00000 + SECCOMP_RET_TRAP = 0x30000 + SECCOMP_RET_USER_NOTIF = 0x7fc00000 + SECCOMP_SET_MODE_FILTER = 0x1 + SECCOMP_SET_MODE_STRICT = 0x0 + SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP = 0x1 + SECCOMP_USER_NOTIF_FLAG_CONTINUE = 0x1 SECRETMEM_MAGIC = 0x5345434d SECURITYFS_MAGIC = 0x73636673 SEEK_CUR = 0x1 @@ -3075,6 +3108,7 @@ const ( SOL_TIPC = 0x10f SOL_TLS = 0x11a SOL_UDP = 0x11 + SOL_VSOCK = 0x11f SOL_X25 = 0x106 SOL_XDP = 0x11b SOMAXCONN = 0x1000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 4920821cf..42ff8c3c1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index a0c1e4112..dca436004 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -282,6 +282,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index c63985560..5cca668ac 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -288,6 +288,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 47cc62e25..d8cae6d15 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -278,6 +278,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 27ac4a09e..28e39afdc 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -275,6 +275,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 54694642a..cd66e92cb 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 3adb81d75..c1595eba7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 2dfe98f0d..ee9456b0d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index f5398f84f..8cfca81e1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index c54f152d6..60b0deb3a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -336,6 +336,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 76057dc72..f90aa7281 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -340,6 +340,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index e0c3725e2..ba9e01503 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -340,6 +340,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 18f2813ed..07cdfd6e9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -272,6 +272,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 11619d4ec..2f1dd214a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -344,6 +344,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 396d994da..f40519d90 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -335,6 +335,9 @@ const ( SCM_TIMESTAMPNS = 0x21 SCM_TXTIME = 0x3f SCM_WIFI_STATUS = 0x25 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x400000 SFD_NONBLOCK = 0x4000 SF_FP = 0x38 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 1488d2712..87d8612a1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -906,6 +906,16 @@ func Fspick(dirfd int, pathName string, flags int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fsconfig(fd int, cmd uint, key *byte, value *byte, aux int) (err error) { + _, _, e1 := Syscall6(SYS_FSCONFIG, uintptr(fd), uintptr(cmd), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(value)), uintptr(aux), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index fcf3ecbdd..0cc3ce496 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -448,4 +448,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index f56dc2504..856d92d69 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -371,4 +371,7 @@ const ( SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 974bf2467..8d467094c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -412,4 +412,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 39a2739e2..edc173244 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -315,4 +315,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index cf9c9d77e..445eba206 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -309,4 +309,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 10b7362ef..adba01bca 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -432,4 +432,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 4450 SYS_CACHESTAT = 4451 SYS_FCHMODAT2 = 4452 + SYS_MAP_SHADOW_STACK = 4453 + SYS_FUTEX_WAKE = 4454 + SYS_FUTEX_WAIT = 4455 + SYS_FUTEX_REQUEUE = 4456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index cd4d8b4fd..014c4e9c7 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -362,4 +362,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 5450 SYS_CACHESTAT = 5451 SYS_FCHMODAT2 = 5452 + SYS_MAP_SHADOW_STACK = 5453 + SYS_FUTEX_WAKE = 5454 + SYS_FUTEX_WAIT = 5455 + SYS_FUTEX_REQUEUE = 5456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 2c0efca81..ccc97d74d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -362,4 +362,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 5450 SYS_CACHESTAT = 5451 SYS_FCHMODAT2 = 5452 + SYS_MAP_SHADOW_STACK = 5453 + SYS_FUTEX_WAKE = 5454 + SYS_FUTEX_WAIT = 5455 + SYS_FUTEX_REQUEUE = 5456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index a72e31d39..ec2b64a95 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -432,4 +432,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 4450 SYS_CACHESTAT = 4451 SYS_FCHMODAT2 = 4452 + SYS_MAP_SHADOW_STACK = 4453 + SYS_FUTEX_WAKE = 4454 + SYS_FUTEX_WAIT = 4455 + SYS_FUTEX_REQUEUE = 4456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index c7d1e3747..21a839e33 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -439,4 +439,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index f4d4838c8..c11121ec3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -411,4 +411,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index b64f0e591..909b631fc 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -411,4 +411,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 95711195a..e49bed16e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -316,4 +316,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index f94e943bc..66017d2d3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -377,4 +377,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index ba0c2bc51..47bab18dc 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -390,4 +390,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index bbf8399ff..eff6bcdef 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -174,7 +174,8 @@ type FscryptPolicyV2 struct { Contents_encryption_mode uint8 Filenames_encryption_mode uint8 Flags uint8 - _ [4]uint8 + Log2_data_unit_size uint8 + _ [3]uint8 Master_key_identifier [16]uint8 } @@ -455,60 +456,63 @@ type Ucred struct { } type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 - Pacing_rate uint64 - Max_pacing_rate uint64 - Bytes_acked uint64 - Bytes_received uint64 - Segs_out uint32 - Segs_in uint32 - Notsent_bytes uint32 - Min_rtt uint32 - Data_segs_in uint32 - Data_segs_out uint32 - Delivery_rate uint64 - Busy_time uint64 - Rwnd_limited uint64 - Sndbuf_limited uint64 - Delivered uint32 - Delivered_ce uint32 - Bytes_sent uint64 - Bytes_retrans uint64 - Dsack_dups uint32 - Reord_seen uint32 - Rcv_ooopack uint32 - Snd_wnd uint32 - Rcv_wnd uint32 - Rehash uint32 + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 + Pacing_rate uint64 + Max_pacing_rate uint64 + Bytes_acked uint64 + Bytes_received uint64 + Segs_out uint32 + Segs_in uint32 + Notsent_bytes uint32 + Min_rtt uint32 + Data_segs_in uint32 + Data_segs_out uint32 + Delivery_rate uint64 + Busy_time uint64 + Rwnd_limited uint64 + Sndbuf_limited uint64 + Delivered uint32 + Delivered_ce uint32 + Bytes_sent uint64 + Bytes_retrans uint64 + Dsack_dups uint32 + Reord_seen uint32 + Rcv_ooopack uint32 + Snd_wnd uint32 + Rcv_wnd uint32 + Rehash uint32 + Total_rto uint16 + Total_rto_recoveries uint16 + Total_rto_time uint32 } type CanFilter struct { @@ -551,7 +555,7 @@ const ( SizeofIPv6MTUInfo = 0x20 SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc - SizeofTCPInfo = 0xf0 + SizeofTCPInfo = 0xf8 SizeofCanFilter = 0x8 SizeofTCPRepairOpt = 0x8 ) @@ -832,6 +836,15 @@ const ( FSPICK_EMPTY_PATH = 0x8 FSMOUNT_CLOEXEC = 0x1 + + FSCONFIG_SET_FLAG = 0x0 + FSCONFIG_SET_STRING = 0x1 + FSCONFIG_SET_BINARY = 0x2 + FSCONFIG_SET_PATH = 0x3 + FSCONFIG_SET_PATH_EMPTY = 0x4 + FSCONFIG_SET_FD = 0x5 + FSCONFIG_CMD_CREATE = 0x6 + FSCONFIG_CMD_RECONFIGURE = 0x7 ) type OpenHow struct { @@ -1546,6 +1559,7 @@ const ( IFLA_DEVLINK_PORT = 0x3e IFLA_GSO_IPV4_MAX_SIZE = 0x3f IFLA_GRO_IPV4_MAX_SIZE = 0x40 + IFLA_DPLL_PIN = 0x41 IFLA_PROTO_DOWN_REASON_UNSPEC = 0x0 IFLA_PROTO_DOWN_REASON_MASK = 0x1 IFLA_PROTO_DOWN_REASON_VALUE = 0x2 @@ -1561,6 +1575,7 @@ const ( IFLA_INET6_ICMP6STATS = 0x6 IFLA_INET6_TOKEN = 0x7 IFLA_INET6_ADDR_GEN_MODE = 0x8 + IFLA_INET6_RA_MTU = 0x9 IFLA_BR_UNSPEC = 0x0 IFLA_BR_FORWARD_DELAY = 0x1 IFLA_BR_HELLO_TIME = 0x2 @@ -1608,6 +1623,9 @@ const ( IFLA_BR_MCAST_MLD_VERSION = 0x2c IFLA_BR_VLAN_STATS_PER_PORT = 0x2d IFLA_BR_MULTI_BOOLOPT = 0x2e + IFLA_BR_MCAST_QUERIER_STATE = 0x2f + IFLA_BR_FDB_N_LEARNED = 0x30 + IFLA_BR_FDB_MAX_LEARNED = 0x31 IFLA_BRPORT_UNSPEC = 0x0 IFLA_BRPORT_STATE = 0x1 IFLA_BRPORT_PRIORITY = 0x2 @@ -1645,6 +1663,14 @@ const ( IFLA_BRPORT_BACKUP_PORT = 0x22 IFLA_BRPORT_MRP_RING_OPEN = 0x23 IFLA_BRPORT_MRP_IN_OPEN = 0x24 + IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT = 0x25 + IFLA_BRPORT_MCAST_EHT_HOSTS_CNT = 0x26 + IFLA_BRPORT_LOCKED = 0x27 + IFLA_BRPORT_MAB = 0x28 + IFLA_BRPORT_MCAST_N_GROUPS = 0x29 + IFLA_BRPORT_MCAST_MAX_GROUPS = 0x2a + IFLA_BRPORT_NEIGH_VLAN_SUPPRESS = 0x2b + IFLA_BRPORT_BACKUP_NHID = 0x2c IFLA_INFO_UNSPEC = 0x0 IFLA_INFO_KIND = 0x1 IFLA_INFO_DATA = 0x2 @@ -1666,6 +1692,9 @@ const ( IFLA_MACVLAN_MACADDR = 0x4 IFLA_MACVLAN_MACADDR_DATA = 0x5 IFLA_MACVLAN_MACADDR_COUNT = 0x6 + IFLA_MACVLAN_BC_QUEUE_LEN = 0x7 + IFLA_MACVLAN_BC_QUEUE_LEN_USED = 0x8 + IFLA_MACVLAN_BC_CUTOFF = 0x9 IFLA_VRF_UNSPEC = 0x0 IFLA_VRF_TABLE = 0x1 IFLA_VRF_PORT_UNSPEC = 0x0 @@ -1689,9 +1718,22 @@ const ( IFLA_XFRM_UNSPEC = 0x0 IFLA_XFRM_LINK = 0x1 IFLA_XFRM_IF_ID = 0x2 + IFLA_XFRM_COLLECT_METADATA = 0x3 IFLA_IPVLAN_UNSPEC = 0x0 IFLA_IPVLAN_MODE = 0x1 IFLA_IPVLAN_FLAGS = 0x2 + NETKIT_NEXT = -0x1 + NETKIT_PASS = 0x0 + NETKIT_DROP = 0x2 + NETKIT_REDIRECT = 0x7 + NETKIT_L2 = 0x0 + NETKIT_L3 = 0x1 + IFLA_NETKIT_UNSPEC = 0x0 + IFLA_NETKIT_PEER_INFO = 0x1 + IFLA_NETKIT_PRIMARY = 0x2 + IFLA_NETKIT_POLICY = 0x3 + IFLA_NETKIT_PEER_POLICY = 0x4 + IFLA_NETKIT_MODE = 0x5 IFLA_VXLAN_UNSPEC = 0x0 IFLA_VXLAN_ID = 0x1 IFLA_VXLAN_GROUP = 0x2 @@ -1722,6 +1764,8 @@ const ( IFLA_VXLAN_GPE = 0x1b IFLA_VXLAN_TTL_INHERIT = 0x1c IFLA_VXLAN_DF = 0x1d + IFLA_VXLAN_VNIFILTER = 0x1e + IFLA_VXLAN_LOCALBYPASS = 0x1f IFLA_GENEVE_UNSPEC = 0x0 IFLA_GENEVE_ID = 0x1 IFLA_GENEVE_REMOTE = 0x2 @@ -1736,6 +1780,7 @@ const ( IFLA_GENEVE_LABEL = 0xb IFLA_GENEVE_TTL_INHERIT = 0xc IFLA_GENEVE_DF = 0xd + IFLA_GENEVE_INNER_PROTO_INHERIT = 0xe IFLA_BAREUDP_UNSPEC = 0x0 IFLA_BAREUDP_PORT = 0x1 IFLA_BAREUDP_ETHERTYPE = 0x2 @@ -1748,6 +1793,8 @@ const ( IFLA_GTP_FD1 = 0x2 IFLA_GTP_PDP_HASHSIZE = 0x3 IFLA_GTP_ROLE = 0x4 + IFLA_GTP_CREATE_SOCKETS = 0x5 + IFLA_GTP_RESTART_COUNT = 0x6 IFLA_BOND_UNSPEC = 0x0 IFLA_BOND_MODE = 0x1 IFLA_BOND_ACTIVE_SLAVE = 0x2 @@ -1777,6 +1824,9 @@ const ( IFLA_BOND_AD_ACTOR_SYSTEM = 0x1a IFLA_BOND_TLB_DYNAMIC_LB = 0x1b IFLA_BOND_PEER_NOTIF_DELAY = 0x1c + IFLA_BOND_AD_LACP_ACTIVE = 0x1d + IFLA_BOND_MISSED_MAX = 0x1e + IFLA_BOND_NS_IP6_TARGET = 0x1f IFLA_BOND_AD_INFO_UNSPEC = 0x0 IFLA_BOND_AD_INFO_AGGREGATOR = 0x1 IFLA_BOND_AD_INFO_NUM_PORTS = 0x2 @@ -1792,6 +1842,7 @@ const ( IFLA_BOND_SLAVE_AD_AGGREGATOR_ID = 0x6 IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE = 0x7 IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE = 0x8 + IFLA_BOND_SLAVE_PRIO = 0x9 IFLA_VF_INFO_UNSPEC = 0x0 IFLA_VF_INFO = 0x1 IFLA_VF_UNSPEC = 0x0 @@ -1850,8 +1901,16 @@ const ( IFLA_STATS_LINK_XSTATS_SLAVE = 0x3 IFLA_STATS_LINK_OFFLOAD_XSTATS = 0x4 IFLA_STATS_AF_SPEC = 0x5 + IFLA_STATS_GETSET_UNSPEC = 0x0 + IFLA_STATS_GET_FILTERS = 0x1 + IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS = 0x2 IFLA_OFFLOAD_XSTATS_UNSPEC = 0x0 IFLA_OFFLOAD_XSTATS_CPU_HIT = 0x1 + IFLA_OFFLOAD_XSTATS_HW_S_INFO = 0x2 + IFLA_OFFLOAD_XSTATS_L3_STATS = 0x3 + IFLA_OFFLOAD_XSTATS_HW_S_INFO_UNSPEC = 0x0 + IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST = 0x1 + IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED = 0x2 IFLA_XDP_UNSPEC = 0x0 IFLA_XDP_FD = 0x1 IFLA_XDP_ATTACHED = 0x2 @@ -1881,6 +1940,11 @@ const ( IFLA_RMNET_UNSPEC = 0x0 IFLA_RMNET_MUX_ID = 0x1 IFLA_RMNET_FLAGS = 0x2 + IFLA_MCTP_UNSPEC = 0x0 + IFLA_MCTP_NET = 0x1 + IFLA_DSA_UNSPEC = 0x0 + IFLA_DSA_CONDUIT = 0x1 + IFLA_DSA_MASTER = 0x1 ) const ( @@ -3399,7 +3463,7 @@ const ( DEVLINK_PORT_FN_ATTR_STATE = 0x2 DEVLINK_PORT_FN_ATTR_OPSTATE = 0x3 DEVLINK_PORT_FN_ATTR_CAPS = 0x4 - DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x4 + DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x5 ) type FsverityDigest struct { @@ -4183,7 +4247,8 @@ const ( ) type LandlockRulesetAttr struct { - Access_fs uint64 + Access_fs uint64 + Access_net uint64 } type LandlockPathBeneathAttr struct { @@ -5134,7 +5199,7 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x1b + NL80211_FREQUENCY_ATTR_MAX = 0x1c NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc @@ -5547,7 +5612,7 @@ const ( NL80211_REGDOM_TYPE_CUSTOM_WORLD = 0x2 NL80211_REGDOM_TYPE_INTERSECTION = 0x3 NL80211_REGDOM_TYPE_WORLD = 0x1 - NL80211_REG_RULE_ATTR_MAX = 0x7 + NL80211_REG_RULE_ATTR_MAX = 0x8 NL80211_REKEY_DATA_AKM = 0x4 NL80211_REKEY_DATA_KCK = 0x2 NL80211_REKEY_DATA_KEK = 0x1 diff --git a/vendor/golang.org/x/sys/windows/env_windows.go b/vendor/golang.org/x/sys/windows/env_windows.go index b8ad19250..d4577a423 100644 --- a/vendor/golang.org/x/sys/windows/env_windows.go +++ b/vendor/golang.org/x/sys/windows/env_windows.go @@ -37,14 +37,17 @@ func (token Token) Environ(inheritExisting bool) (env []string, err error) { return nil, err } defer DestroyEnvironmentBlock(block) - blockp := unsafe.Pointer(block) - for { - entry := UTF16PtrToString((*uint16)(blockp)) - if len(entry) == 0 { - break + size := unsafe.Sizeof(*block) + for *block != 0 { + // find NUL terminator + end := unsafe.Pointer(block) + for *(*uint16)(end) != 0 { + end = unsafe.Add(end, size) } - env = append(env, entry) - blockp = unsafe.Add(blockp, 2*(len(entry)+1)) + + entry := unsafe.Slice(block, (uintptr(end)-uintptr(unsafe.Pointer(block)))/size) + env = append(env, UTF16ToString(entry)) + block = (*uint16)(unsafe.Add(end, size)) } return env, nil } diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index ffb8708cc..6395a031d 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -125,8 +125,7 @@ func UTF16PtrToString(p *uint16) string { for ptr := unsafe.Pointer(p); *(*uint16)(ptr) != 0; n++ { ptr = unsafe.Pointer(uintptr(ptr) + unsafe.Sizeof(*p)) } - - return string(utf16.Decode(unsafe.Slice(p, n))) + return UTF16ToString(unsafe.Slice(p, n)) } func Getpagesize() int { return 4096 } diff --git a/vendor/golang.org/x/time/LICENSE b/vendor/golang.org/x/time/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/golang.org/x/time/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/k8s.io/kubernetes/third_party/forked/golang/PATENTS b/vendor/golang.org/x/time/PATENTS similarity index 100% rename from vendor/k8s.io/kubernetes/third_party/forked/golang/PATENTS rename to vendor/golang.org/x/time/PATENTS diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go new file mode 100644 index 000000000..f0e0cf3cb --- /dev/null +++ b/vendor/golang.org/x/time/rate/rate.go @@ -0,0 +1,428 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package rate provides a rate limiter. +package rate + +import ( + "context" + "fmt" + "math" + "sync" + "time" +) + +// Limit defines the maximum frequency of some events. +// Limit is represented as number of events per second. +// A zero Limit allows no events. +type Limit float64 + +// Inf is the infinite rate limit; it allows all events (even if burst is zero). +const Inf = Limit(math.MaxFloat64) + +// Every converts a minimum time interval between events to a Limit. +func Every(interval time.Duration) Limit { + if interval <= 0 { + return Inf + } + return 1 / Limit(interval.Seconds()) +} + +// A Limiter controls how frequently events are allowed to happen. +// It implements a "token bucket" of size b, initially full and refilled +// at rate r tokens per second. +// Informally, in any large enough time interval, the Limiter limits the +// rate to r tokens per second, with a maximum burst size of b events. +// As a special case, if r == Inf (the infinite rate), b is ignored. +// See https://en.wikipedia.org/wiki/Token_bucket for more about token buckets. +// +// The zero value is a valid Limiter, but it will reject all events. +// Use NewLimiter to create non-zero Limiters. +// +// Limiter has three main methods, Allow, Reserve, and Wait. +// Most callers should use Wait. +// +// Each of the three methods consumes a single token. +// They differ in their behavior when no token is available. +// If no token is available, Allow returns false. +// If no token is available, Reserve returns a reservation for a future token +// and the amount of time the caller must wait before using it. +// If no token is available, Wait blocks until one can be obtained +// or its associated context.Context is canceled. +// +// The methods AllowN, ReserveN, and WaitN consume n tokens. +type Limiter struct { + mu sync.Mutex + limit Limit + burst int + tokens float64 + // last is the last time the limiter's tokens field was updated + last time.Time + // lastEvent is the latest time of a rate-limited event (past or future) + lastEvent time.Time +} + +// Limit returns the maximum overall event rate. +func (lim *Limiter) Limit() Limit { + lim.mu.Lock() + defer lim.mu.Unlock() + return lim.limit +} + +// Burst returns the maximum burst size. Burst is the maximum number of tokens +// that can be consumed in a single call to Allow, Reserve, or Wait, so higher +// Burst values allow more events to happen at once. +// A zero Burst allows no events, unless limit == Inf. +func (lim *Limiter) Burst() int { + lim.mu.Lock() + defer lim.mu.Unlock() + return lim.burst +} + +// TokensAt returns the number of tokens available at time t. +func (lim *Limiter) TokensAt(t time.Time) float64 { + lim.mu.Lock() + _, tokens := lim.advance(t) // does not mutate lim + lim.mu.Unlock() + return tokens +} + +// Tokens returns the number of tokens available now. +func (lim *Limiter) Tokens() float64 { + return lim.TokensAt(time.Now()) +} + +// NewLimiter returns a new Limiter that allows events up to rate r and permits +// bursts of at most b tokens. +func NewLimiter(r Limit, b int) *Limiter { + return &Limiter{ + limit: r, + burst: b, + } +} + +// Allow reports whether an event may happen now. +func (lim *Limiter) Allow() bool { + return lim.AllowN(time.Now(), 1) +} + +// AllowN reports whether n events may happen at time t. +// Use this method if you intend to drop / skip events that exceed the rate limit. +// Otherwise use Reserve or Wait. +func (lim *Limiter) AllowN(t time.Time, n int) bool { + return lim.reserveN(t, n, 0).ok +} + +// A Reservation holds information about events that are permitted by a Limiter to happen after a delay. +// A Reservation may be canceled, which may enable the Limiter to permit additional events. +type Reservation struct { + ok bool + lim *Limiter + tokens int + timeToAct time.Time + // This is the Limit at reservation time, it can change later. + limit Limit +} + +// OK returns whether the limiter can provide the requested number of tokens +// within the maximum wait time. If OK is false, Delay returns InfDuration, and +// Cancel does nothing. +func (r *Reservation) OK() bool { + return r.ok +} + +// Delay is shorthand for DelayFrom(time.Now()). +func (r *Reservation) Delay() time.Duration { + return r.DelayFrom(time.Now()) +} + +// InfDuration is the duration returned by Delay when a Reservation is not OK. +const InfDuration = time.Duration(math.MaxInt64) + +// DelayFrom returns the duration for which the reservation holder must wait +// before taking the reserved action. Zero duration means act immediately. +// InfDuration means the limiter cannot grant the tokens requested in this +// Reservation within the maximum wait time. +func (r *Reservation) DelayFrom(t time.Time) time.Duration { + if !r.ok { + return InfDuration + } + delay := r.timeToAct.Sub(t) + if delay < 0 { + return 0 + } + return delay +} + +// Cancel is shorthand for CancelAt(time.Now()). +func (r *Reservation) Cancel() { + r.CancelAt(time.Now()) +} + +// CancelAt indicates that the reservation holder will not perform the reserved action +// and reverses the effects of this Reservation on the rate limit as much as possible, +// considering that other reservations may have already been made. +func (r *Reservation) CancelAt(t time.Time) { + if !r.ok { + return + } + + r.lim.mu.Lock() + defer r.lim.mu.Unlock() + + if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(t) { + return + } + + // calculate tokens to restore + // The duration between lim.lastEvent and r.timeToAct tells us how many tokens were reserved + // after r was obtained. These tokens should not be restored. + restoreTokens := float64(r.tokens) - r.limit.tokensFromDuration(r.lim.lastEvent.Sub(r.timeToAct)) + if restoreTokens <= 0 { + return + } + // advance time to now + t, tokens := r.lim.advance(t) + // calculate new number of tokens + tokens += restoreTokens + if burst := float64(r.lim.burst); tokens > burst { + tokens = burst + } + // update state + r.lim.last = t + r.lim.tokens = tokens + if r.timeToAct == r.lim.lastEvent { + prevEvent := r.timeToAct.Add(r.limit.durationFromTokens(float64(-r.tokens))) + if !prevEvent.Before(t) { + r.lim.lastEvent = prevEvent + } + } +} + +// Reserve is shorthand for ReserveN(time.Now(), 1). +func (lim *Limiter) Reserve() *Reservation { + return lim.ReserveN(time.Now(), 1) +} + +// ReserveN returns a Reservation that indicates how long the caller must wait before n events happen. +// The Limiter takes this Reservation into account when allowing future events. +// The returned Reservation’s OK() method returns false if n exceeds the Limiter's burst size. +// Usage example: +// +// r := lim.ReserveN(time.Now(), 1) +// if !r.OK() { +// // Not allowed to act! Did you remember to set lim.burst to be > 0 ? +// return +// } +// time.Sleep(r.Delay()) +// Act() +// +// Use this method if you wish to wait and slow down in accordance with the rate limit without dropping events. +// If you need to respect a deadline or cancel the delay, use Wait instead. +// To drop or skip events exceeding rate limit, use Allow instead. +func (lim *Limiter) ReserveN(t time.Time, n int) *Reservation { + r := lim.reserveN(t, n, InfDuration) + return &r +} + +// Wait is shorthand for WaitN(ctx, 1). +func (lim *Limiter) Wait(ctx context.Context) (err error) { + return lim.WaitN(ctx, 1) +} + +// WaitN blocks until lim permits n events to happen. +// It returns an error if n exceeds the Limiter's burst size, the Context is +// canceled, or the expected wait time exceeds the Context's Deadline. +// The burst limit is ignored if the rate limit is Inf. +func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { + // The test code calls lim.wait with a fake timer generator. + // This is the real timer generator. + newTimer := func(d time.Duration) (<-chan time.Time, func() bool, func()) { + timer := time.NewTimer(d) + return timer.C, timer.Stop, func() {} + } + + return lim.wait(ctx, n, time.Now(), newTimer) +} + +// wait is the internal implementation of WaitN. +func (lim *Limiter) wait(ctx context.Context, n int, t time.Time, newTimer func(d time.Duration) (<-chan time.Time, func() bool, func())) error { + lim.mu.Lock() + burst := lim.burst + limit := lim.limit + lim.mu.Unlock() + + if n > burst && limit != Inf { + return fmt.Errorf("rate: Wait(n=%d) exceeds limiter's burst %d", n, burst) + } + // Check if ctx is already cancelled + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + // Determine wait limit + waitLimit := InfDuration + if deadline, ok := ctx.Deadline(); ok { + waitLimit = deadline.Sub(t) + } + // Reserve + r := lim.reserveN(t, n, waitLimit) + if !r.ok { + return fmt.Errorf("rate: Wait(n=%d) would exceed context deadline", n) + } + // Wait if necessary + delay := r.DelayFrom(t) + if delay == 0 { + return nil + } + ch, stop, advance := newTimer(delay) + defer stop() + advance() // only has an effect when testing + select { + case <-ch: + // We can proceed. + return nil + case <-ctx.Done(): + // Context was canceled before we could proceed. Cancel the + // reservation, which may permit other events to proceed sooner. + r.Cancel() + return ctx.Err() + } +} + +// SetLimit is shorthand for SetLimitAt(time.Now(), newLimit). +func (lim *Limiter) SetLimit(newLimit Limit) { + lim.SetLimitAt(time.Now(), newLimit) +} + +// SetLimitAt sets a new Limit for the limiter. The new Limit, and Burst, may be violated +// or underutilized by those which reserved (using Reserve or Wait) but did not yet act +// before SetLimitAt was called. +func (lim *Limiter) SetLimitAt(t time.Time, newLimit Limit) { + lim.mu.Lock() + defer lim.mu.Unlock() + + t, tokens := lim.advance(t) + + lim.last = t + lim.tokens = tokens + lim.limit = newLimit +} + +// SetBurst is shorthand for SetBurstAt(time.Now(), newBurst). +func (lim *Limiter) SetBurst(newBurst int) { + lim.SetBurstAt(time.Now(), newBurst) +} + +// SetBurstAt sets a new burst size for the limiter. +func (lim *Limiter) SetBurstAt(t time.Time, newBurst int) { + lim.mu.Lock() + defer lim.mu.Unlock() + + t, tokens := lim.advance(t) + + lim.last = t + lim.tokens = tokens + lim.burst = newBurst +} + +// reserveN is a helper method for AllowN, ReserveN, and WaitN. +// maxFutureReserve specifies the maximum reservation wait duration allowed. +// reserveN returns Reservation, not *Reservation, to avoid allocation in AllowN and WaitN. +func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) Reservation { + lim.mu.Lock() + defer lim.mu.Unlock() + + if lim.limit == Inf { + return Reservation{ + ok: true, + lim: lim, + tokens: n, + timeToAct: t, + } + } else if lim.limit == 0 { + var ok bool + if lim.burst >= n { + ok = true + lim.burst -= n + } + return Reservation{ + ok: ok, + lim: lim, + tokens: lim.burst, + timeToAct: t, + } + } + + t, tokens := lim.advance(t) + + // Calculate the remaining number of tokens resulting from the request. + tokens -= float64(n) + + // Calculate the wait duration + var waitDuration time.Duration + if tokens < 0 { + waitDuration = lim.limit.durationFromTokens(-tokens) + } + + // Decide result + ok := n <= lim.burst && waitDuration <= maxFutureReserve + + // Prepare reservation + r := Reservation{ + ok: ok, + lim: lim, + limit: lim.limit, + } + if ok { + r.tokens = n + r.timeToAct = t.Add(waitDuration) + + // Update state + lim.last = t + lim.tokens = tokens + lim.lastEvent = r.timeToAct + } + + return r +} + +// advance calculates and returns an updated state for lim resulting from the passage of time. +// lim is not changed. +// advance requires that lim.mu is held. +func (lim *Limiter) advance(t time.Time) (newT time.Time, newTokens float64) { + last := lim.last + if t.Before(last) { + last = t + } + + // Calculate the new number of tokens, due to time that passed. + elapsed := t.Sub(last) + delta := lim.limit.tokensFromDuration(elapsed) + tokens := lim.tokens + delta + if burst := float64(lim.burst); tokens > burst { + tokens = burst + } + return t, tokens +} + +// durationFromTokens is a unit conversion function from the number of tokens to the duration +// of time it takes to accumulate them at a rate of limit tokens per second. +func (limit Limit) durationFromTokens(tokens float64) time.Duration { + if limit <= 0 { + return InfDuration + } + seconds := tokens / float64(limit) + return time.Duration(float64(time.Second) * seconds) +} + +// tokensFromDuration is a unit conversion function from a time duration to the number of tokens +// which could be accumulated during that duration at a rate of limit tokens per second. +func (limit Limit) tokensFromDuration(d time.Duration) float64 { + if limit <= 0 { + return 0 + } + return d.Seconds() * float64(limit) +} diff --git a/vendor/golang.org/x/time/rate/sometimes.go b/vendor/golang.org/x/time/rate/sometimes.go new file mode 100644 index 000000000..6ba99ddb6 --- /dev/null +++ b/vendor/golang.org/x/time/rate/sometimes.go @@ -0,0 +1,67 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rate + +import ( + "sync" + "time" +) + +// Sometimes will perform an action occasionally. The First, Every, and +// Interval fields govern the behavior of Do, which performs the action. +// A zero Sometimes value will perform an action exactly once. +// +// # Example: logging with rate limiting +// +// var sometimes = rate.Sometimes{First: 3, Interval: 10*time.Second} +// func Spammy() { +// sometimes.Do(func() { log.Info("here I am!") }) +// } +type Sometimes struct { + First int // if non-zero, the first N calls to Do will run f. + Every int // if non-zero, every Nth call to Do will run f. + Interval time.Duration // if non-zero and Interval has elapsed since f's last run, Do will run f. + + mu sync.Mutex + count int // number of Do calls + last time.Time // last time f was run +} + +// Do runs the function f as allowed by First, Every, and Interval. +// +// The model is a union (not intersection) of filters. The first call to Do +// always runs f. Subsequent calls to Do run f if allowed by First or Every or +// Interval. +// +// A non-zero First:N causes the first N Do(f) calls to run f. +// +// A non-zero Every:M causes every Mth Do(f) call, starting with the first, to +// run f. +// +// A non-zero Interval causes Do(f) to run f if Interval has elapsed since +// Do last ran f. +// +// Specifying multiple filters produces the union of these execution streams. +// For example, specifying both First:N and Every:M causes the first N Do(f) +// calls and every Mth Do(f) call, starting with the first, to run f. See +// Examples for more. +// +// If Do is called multiple times simultaneously, the calls will block and run +// serially. Therefore, Do is intended for lightweight operations. +// +// Because a call to Do may block until f returns, if f causes Do to be called, +// it will deadlock. +func (s *Sometimes) Do(f func()) { + s.mu.Lock() + defer s.mu.Unlock() + if s.count == 0 || + (s.First > 0 && s.count < s.First) || + (s.Every > 0 && s.count%s.Every == 0) || + (s.Interval > 0 && time.Since(s.last) >= s.Interval) { + f() + s.last = time.Now() + } + s.count++ +} diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go index b2a0b7c6a..a8d7b06ac 100644 --- a/vendor/golang.org/x/tools/go/packages/doc.go +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -15,22 +15,10 @@ Load passes most patterns directly to the underlying build tool. The default build tool is the go command. Its supported patterns are described at https://pkg.go.dev/cmd/go#hdr-Package_lists_and_patterns. +Other build systems may be supported by providing a "driver"; +see [The driver protocol]. -Load may be used in Go projects that use alternative build systems, by -installing an appropriate "driver" program for the build system and -specifying its location in the GOPACKAGESDRIVER environment variable. -For example, -https://github.com/bazelbuild/rules_go/wiki/Editor-and-tool-integration -explains how to use the driver for Bazel. -The driver program is responsible for interpreting patterns in its -preferred notation and reporting information about the packages that -they identify. -(See driverRequest and driverResponse types for the JSON -schema used by the protocol. -Though the protocol is supported, these types are currently unexported; -see #64608 for a proposal to publish them.) - -Regardless of driver, all patterns with the prefix "query=", where query is a +All patterns with the prefix "query=", where query is a non-empty string of letters from [a-z], are reserved and may be interpreted as query operators. @@ -86,7 +74,29 @@ for details. Most tools should pass their command-line arguments (after any flags) uninterpreted to [Load], so that it can interpret them according to the conventions of the underlying build system. + See the Example function for typical usage. + +# The driver protocol + +[Load] may be used to load Go packages even in Go projects that use +alternative build systems, by installing an appropriate "driver" +program for the build system and specifying its location in the +GOPACKAGESDRIVER environment variable. +For example, +https://github.com/bazelbuild/rules_go/wiki/Editor-and-tool-integration +explains how to use the driver for Bazel. + +The driver program is responsible for interpreting patterns in its +preferred notation and reporting information about the packages that +those patterns identify. Drivers must also support the special "file=" +and "pattern=" patterns described above. + +The patterns are provided as positional command-line arguments. A +JSON-encoded [DriverRequest] message providing additional information +is written to the driver's standard input. The driver must write a +JSON-encoded [DriverResponse] message to its standard output. (This +message differs from the JSON schema produced by 'go list'.) */ package packages // import "golang.org/x/tools/go/packages" diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go index 7db1d1293..4335c1eb1 100644 --- a/vendor/golang.org/x/tools/go/packages/external.go +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -2,12 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// This file enables an external tool to intercept package requests. -// If the tool is present then its results are used in preference to -// the go list command. - package packages +// This file defines the protocol that enables an external "driver" +// tool to supply package metadata in place of 'go list'. + import ( "bytes" "encoding/json" @@ -17,31 +16,71 @@ import ( "strings" ) -// The Driver Protocol +// DriverRequest defines the schema of a request for package metadata +// from an external driver program. The JSON-encoded DriverRequest +// message is provided to the driver program's standard input. The +// query patterns are provided as command-line arguments. // -// The driver, given the inputs to a call to Load, returns metadata about the packages specified. -// This allows for different build systems to support go/packages by telling go/packages how the -// packages' source is organized. -// The driver is a binary, either specified by the GOPACKAGESDRIVER environment variable or in -// the path as gopackagesdriver. It's given the inputs to load in its argv. See the package -// documentation in doc.go for the full description of the patterns that need to be supported. -// A driver receives as a JSON-serialized driverRequest struct in standard input and will -// produce a JSON-serialized driverResponse (see definition in packages.go) in its standard output. - -// driverRequest is used to provide the portion of Load's Config that is needed by a driver. -type driverRequest struct { +// See the package documentation for an overview. +type DriverRequest struct { Mode LoadMode `json:"mode"` + // Env specifies the environment the underlying build system should be run in. Env []string `json:"env"` + // BuildFlags are flags that should be passed to the underlying build system. BuildFlags []string `json:"build_flags"` + // Tests specifies whether the patterns should also return test packages. Tests bool `json:"tests"` + // Overlay maps file paths (relative to the driver's working directory) to the byte contents // of overlay files. Overlay map[string][]byte `json:"overlay"` } +// DriverResponse defines the schema of a response from an external +// driver program, providing the results of a query for package +// metadata. The driver program must write a JSON-encoded +// DriverResponse message to its standard output. +// +// See the package documentation for an overview. +type DriverResponse struct { + // NotHandled is returned if the request can't be handled by the current + // driver. If an external driver returns a response with NotHandled, the + // rest of the DriverResponse is ignored, and go/packages will fallback + // to the next driver. If go/packages is extended in the future to support + // lists of multiple drivers, go/packages will fall back to the next driver. + NotHandled bool + + // Compiler and Arch are the arguments pass of types.SizesFor + // to get a types.Sizes to use when type checking. + Compiler string + Arch string + + // Roots is the set of package IDs that make up the root packages. + // We have to encode this separately because when we encode a single package + // we cannot know if it is one of the roots as that requires knowledge of the + // graph it is part of. + Roots []string `json:",omitempty"` + + // Packages is the full set of packages in the graph. + // The packages are not connected into a graph. + // The Imports if populated will be stubs that only have their ID set. + // Imports will be connected and then type and syntax information added in a + // later pass (see refine). + Packages []*Package + + // GoVersion is the minor version number used by the driver + // (e.g. the go command on the PATH) when selecting .go files. + // Zero means unknown. + GoVersion int +} + +// driver is the type for functions that query the build system for the +// packages named by the patterns. +type driver func(cfg *Config, patterns ...string) (*DriverResponse, error) + // findExternalDriver returns the file path of a tool that supplies // the build system package structure, or "" if not found." // If GOPACKAGESDRIVER is set in the environment findExternalTool returns its @@ -64,8 +103,8 @@ func findExternalDriver(cfg *Config) driver { return nil } } - return func(cfg *Config, words ...string) (*driverResponse, error) { - req, err := json.Marshal(driverRequest{ + return func(cfg *Config, words ...string) (*DriverResponse, error) { + req, err := json.Marshal(DriverRequest{ Mode: cfg.Mode, Env: cfg.Env, BuildFlags: cfg.BuildFlags, @@ -92,7 +131,7 @@ func findExternalDriver(cfg *Config) driver { fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd), stderr) } - var response driverResponse + var response DriverResponse if err := json.Unmarshal(buf.Bytes(), &response); err != nil { return nil, err } diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index cd375fbc3..22305d9c9 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -35,23 +35,23 @@ type goTooOldError struct { error } -// responseDeduper wraps a driverResponse, deduplicating its contents. +// responseDeduper wraps a DriverResponse, deduplicating its contents. type responseDeduper struct { seenRoots map[string]bool seenPackages map[string]*Package - dr *driverResponse + dr *DriverResponse } func newDeduper() *responseDeduper { return &responseDeduper{ - dr: &driverResponse{}, + dr: &DriverResponse{}, seenRoots: map[string]bool{}, seenPackages: map[string]*Package{}, } } -// addAll fills in r with a driverResponse. -func (r *responseDeduper) addAll(dr *driverResponse) { +// addAll fills in r with a DriverResponse. +func (r *responseDeduper) addAll(dr *DriverResponse) { for _, pkg := range dr.Packages { r.addPackage(pkg) } @@ -128,7 +128,7 @@ func (state *golistState) mustGetEnv() map[string]string { // goListDriver uses the go list command to interpret the patterns and produce // the build system package structure. // See driver for more details. -func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { +func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error) { // Make sure that any asynchronous go commands are killed when we return. parentCtx := cfg.Context if parentCtx == nil { @@ -146,16 +146,18 @@ func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { } // Fill in response.Sizes asynchronously if necessary. - var sizeserr error - var sizeswg sync.WaitGroup if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { - sizeswg.Add(1) + errCh := make(chan error) go func() { compiler, arch, err := packagesdriver.GetSizesForArgsGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner) - sizeserr = err response.dr.Compiler = compiler response.dr.Arch = arch - sizeswg.Done() + errCh <- err + }() + defer func() { + if sizesErr := <-errCh; sizesErr != nil { + err = sizesErr + } }() } @@ -208,10 +210,7 @@ extractQueries: } } - sizeswg.Wait() - if sizeserr != nil { - return nil, sizeserr - } + // (We may yet return an error due to defer.) return response.dr, nil } @@ -266,7 +265,7 @@ func (state *golistState) runContainsQueries(response *responseDeduper, queries // adhocPackage attempts to load or construct an ad-hoc package for a given // query, if the original call to the driver produced inadequate results. -func (state *golistState) adhocPackage(pattern, query string) (*driverResponse, error) { +func (state *golistState) adhocPackage(pattern, query string) (*DriverResponse, error) { response, err := state.createDriverResponse(query) if err != nil { return nil, err @@ -357,7 +356,7 @@ func otherFiles(p *jsonPackage) [][]string { // createDriverResponse uses the "go list" command to expand the pattern // words and return a response for the specified packages. -func (state *golistState) createDriverResponse(words ...string) (*driverResponse, error) { +func (state *golistState) createDriverResponse(words ...string) (*DriverResponse, error) { // go list uses the following identifiers in ImportPath and Imports: // // "p" -- importable package or main (command) @@ -384,7 +383,7 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse pkgs := make(map[string]*Package) additionalErrors := make(map[string][]Error) // Decode the JSON and convert it to Package form. - response := &driverResponse{ + response := &DriverResponse{ GoVersion: goVersion, } for dec := json.NewDecoder(buf); dec.More(); { diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 81e9e6a72..f33b0afc2 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -206,43 +206,6 @@ type Config struct { Overlay map[string][]byte } -// driver is the type for functions that query the build system for the -// packages named by the patterns. -type driver func(cfg *Config, patterns ...string) (*driverResponse, error) - -// driverResponse contains the results for a driver query. -type driverResponse struct { - // NotHandled is returned if the request can't be handled by the current - // driver. If an external driver returns a response with NotHandled, the - // rest of the driverResponse is ignored, and go/packages will fallback - // to the next driver. If go/packages is extended in the future to support - // lists of multiple drivers, go/packages will fall back to the next driver. - NotHandled bool - - // Compiler and Arch are the arguments pass of types.SizesFor - // to get a types.Sizes to use when type checking. - Compiler string - Arch string - - // Roots is the set of package IDs that make up the root packages. - // We have to encode this separately because when we encode a single package - // we cannot know if it is one of the roots as that requires knowledge of the - // graph it is part of. - Roots []string `json:",omitempty"` - - // Packages is the full set of packages in the graph. - // The packages are not connected into a graph. - // The Imports if populated will be stubs that only have their ID set. - // Imports will be connected and then type and syntax information added in a - // later pass (see refine). - Packages []*Package - - // GoVersion is the minor version number used by the driver - // (e.g. the go command on the PATH) when selecting .go files. - // Zero means unknown. - GoVersion int -} - // Load loads and returns the Go packages named by the given patterns. // // Config specifies loading options; @@ -291,7 +254,7 @@ func Load(cfg *Config, patterns ...string) ([]*Package, error) { // no external driver, or the driver returns a response with NotHandled set, // defaultDriver will fall back to the go list driver. // The boolean result indicates that an external driver handled the request. -func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, bool, error) { +func defaultDriver(cfg *Config, patterns ...string) (*DriverResponse, bool, error) { if driver := findExternalDriver(cfg); driver != nil { response, err := driver(cfg, patterns...) if err != nil { @@ -303,7 +266,10 @@ func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, bool, erro } response, err := goListDriver(cfg, patterns...) - return response, false, err + if err != nil { + return nil, false, err + } + return response, false, nil } // A Package describes a loaded Go package. @@ -648,7 +614,7 @@ func newLoader(cfg *Config) *loader { // refine connects the supplied packages into a graph and then adds type // and syntax information as requested by the LoadMode. -func (ld *loader) refine(response *driverResponse) ([]*Package, error) { +func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { roots := response.Roots rootMap := make(map[string]int, len(roots)) for i, root := range roots { diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 9bde15e3b..9fffa9ad0 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -224,6 +224,7 @@ func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte // Gather the relevant packages from the manifest. items := make([]GetPackagesItem, r.uint64()) + uniquePkgPaths := make(map[string]bool) for i := range items { pkgPathOff := r.uint64() pkgPath := p.stringAt(pkgPathOff) @@ -248,6 +249,12 @@ func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte } items[i].nameIndex = nameIndex + + uniquePkgPaths[pkgPath] = true + } + // Debugging #63822; hypothesis: there are duplicate PkgPaths. + if len(uniquePkgPaths) != len(items) { + reportf("found duplicate PkgPaths while reading export data manifest: %v", items) } // Request packages all at once from the client, diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index 1bc92248c..ab0fbb79b 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -1,8 +1,8 @@ # gRPC-Go -[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://pkg.go.dev/badge/google.golang.org/grpc)][API] [![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) +[![codecov](https://codecov.io/gh/grpc/grpc-go/graph/badge.svg)](https://codecov.io/gh/grpc/grpc-go) The [Go][] implementation of [gRPC][]: A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go index 712fef4d0..52d530d7a 100644 --- a/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -121,9 +121,9 @@ func (a *Attributes) String() string { return sb.String() } -func str(x any) string { +func str(x any) (s string) { if v, ok := x.(fmt.Stringer); ok { - return v.String() + return fmt.Sprint(v) } else if v, ok := x.(string); ok { return v } diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index b6377f445..d79560a2e 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -30,6 +30,7 @@ import ( "google.golang.org/grpc/channelz" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" @@ -39,6 +40,8 @@ import ( var ( // m is a map from name to balancer builder. m = make(map[string]Builder) + + logger = grpclog.Component("balancer") ) // Register registers the balancer builder to the balancer map. b.Name @@ -51,6 +54,12 @@ var ( // an init() function), and is not thread-safe. If multiple Balancers are // registered with the same name, the one registered last will take effect. func Register(b Builder) { + if strings.ToLower(b.Name()) != b.Name() { + // TODO: Skip the use of strings.ToLower() to index the map after v1.59 + // is released to switch to case sensitive balancer registry. Also, + // remove this warning and update the docstrings for Register and Get. + logger.Warningf("Balancer registered with name %q. grpc-go will be switching to case sensitive balancer registries soon", b.Name()) + } m[strings.ToLower(b.Name())] = b } @@ -70,6 +79,12 @@ func init() { // Note that the compare is done in a case-insensitive fashion. // If no builder is register with the name, nil will be returned. func Get(name string) Builder { + if strings.ToLower(name) != name { + // TODO: Skip the use of strings.ToLower() to index the map after v1.59 + // is released to switch to case sensitive balancer registry. Also, + // remove this warning and update the docstrings for Register and Get. + logger.Warningf("Balancer retrieved for name %q. grpc-go will be switching to case sensitive balancer registries soon", name) + } if b, ok := m[strings.ToLower(name)]; ok { return b } diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_wrapper.go similarity index 57% rename from vendor/google.golang.org/grpc/balancer_conn_wrappers.go rename to vendor/google.golang.org/grpc/balancer_wrapper.go index a4411c22b..b5e30cff0 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -32,21 +32,13 @@ import ( "google.golang.org/grpc/resolver" ) -type ccbMode int - -const ( - ccbModeActive = iota - ccbModeIdle - ccbModeClosed - ccbModeExitingIdle -) - // ccBalancerWrapper sits between the ClientConn and the Balancer. // // ccBalancerWrapper implements methods corresponding to the ones on the // balancer.Balancer interface. The ClientConn is free to call these methods // concurrently and the ccBalancerWrapper ensures that calls from the ClientConn -// to the Balancer happen synchronously and in order. +// to the Balancer happen in order by performing them in the serializer, without +// any mutexes held. // // ccBalancerWrapper also implements the balancer.ClientConn interface and is // passed to the Balancer implementations. It invokes unexported methods on the @@ -57,87 +49,75 @@ const ( type ccBalancerWrapper struct { // The following fields are initialized when the wrapper is created and are // read-only afterwards, and therefore can be accessed without a mutex. - cc *ClientConn - opts balancer.BuildOptions + cc *ClientConn + opts balancer.BuildOptions + serializer *grpcsync.CallbackSerializer + serializerCancel context.CancelFunc - // Outgoing (gRPC --> balancer) calls are guaranteed to execute in a - // mutually exclusive manner as they are scheduled in the serializer. Fields - // accessed *only* in these serializer callbacks, can therefore be accessed - // without a mutex. - balancer *gracefulswitch.Balancer + // The following fields are only accessed within the serializer or during + // initialization. curBalancerName string + balancer *gracefulswitch.Balancer - // mu guards access to the below fields. Access to the serializer and its - // cancel function needs to be mutex protected because they are overwritten - // when the wrapper exits idle mode. - mu sync.Mutex - serializer *grpcsync.CallbackSerializer // To serialize all outoing calls. - serializerCancel context.CancelFunc // To close the seralizer at close/enterIdle time. - mode ccbMode // Tracks the current mode of the wrapper. + // The following field is protected by mu. Caller must take cc.mu before + // taking mu. + mu sync.Mutex + closed bool } -// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer -// is not created until the switchTo() method is invoked. -func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper { - ctx, cancel := context.WithCancel(context.Background()) +// newCCBalancerWrapper creates a new balancer wrapper in idle state. The +// underlying balancer is not created until the switchTo() method is invoked. +func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { + ctx, cancel := context.WithCancel(cc.ctx) ccb := &ccBalancerWrapper{ - cc: cc, - opts: bopts, + cc: cc, + opts: balancer.BuildOptions{ + DialCreds: cc.dopts.copts.TransportCredentials, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + Authority: cc.authority, + CustomUserAgent: cc.dopts.copts.UserAgent, + ChannelzParentID: cc.channelzID, + Target: cc.parsedTarget, + }, serializer: grpcsync.NewCallbackSerializer(ctx), serializerCancel: cancel, } - ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts) + ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts) return ccb } // updateClientConnState is invoked by grpc to push a ClientConnState update to -// the underlying balancer. +// the underlying balancer. This is always executed from the serializer, so +// it is safe to call into the balancer here. func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { - ccb.mu.Lock() - errCh := make(chan error, 1) - // Here and everywhere else where Schedule() is called, it is done with the - // lock held. But the lock guards only the scheduling part. The actual - // callback is called asynchronously without the lock being held. - ok := ccb.serializer.Schedule(func(_ context.Context) { - errCh <- ccb.balancer.UpdateClientConnState(*ccs) + errCh := make(chan error) + ok := ccb.serializer.Schedule(func(ctx context.Context) { + defer close(errCh) + if ctx.Err() != nil || ccb.balancer == nil { + return + } + err := ccb.balancer.UpdateClientConnState(*ccs) + if logger.V(2) && err != nil { + logger.Infof("error from balancer.UpdateClientConnState: %v", err) + } + errCh <- err }) if !ok { - // If we are unable to schedule a function with the serializer, it - // indicates that it has been closed. A serializer is only closed when - // the wrapper is closed or is in idle. - ccb.mu.Unlock() - return fmt.Errorf("grpc: cannot send state update to a closed or idle balancer") - } - ccb.mu.Unlock() - - // We get here only if the above call to Schedule succeeds, in which case it - // is guaranteed that the scheduled function will run. Therefore it is safe - // to block on this channel. - err := <-errCh - if logger.V(2) && err != nil { - logger.Infof("error from balancer.UpdateClientConnState: %v", err) + return nil } - return err -} - -// updateSubConnState is invoked by grpc to push a subConn state update to the -// underlying balancer. -func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { - ccb.mu.Lock() - ccb.serializer.Schedule(func(_ context.Context) { - // Even though it is optional for balancers, gracefulswitch ensures - // opts.StateListener is set, so this cannot ever be nil. - sc.(*acBalancerWrapper).stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) - }) - ccb.mu.Unlock() + return <-errCh } +// resolverError is invoked by grpc to push a resolver error to the underlying +// balancer. The call to the balancer is executed from the serializer. func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.mu.Lock() - ccb.serializer.Schedule(func(_ context.Context) { + ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccb.balancer == nil { + return + } ccb.balancer.ResolverError(err) }) - ccb.mu.Unlock() } // switchTo is invoked by grpc to instruct the balancer wrapper to switch to the @@ -151,8 +131,10 @@ func (ccb *ccBalancerWrapper) resolverError(err error) { // the ccBalancerWrapper keeps track of the current LB policy name, and skips // the graceful balancer switching process if the name does not change. func (ccb *ccBalancerWrapper) switchTo(name string) { - ccb.mu.Lock() - ccb.serializer.Schedule(func(_ context.Context) { + ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccb.balancer == nil { + return + } // TODO: Other languages use case-sensitive balancer registries. We should // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. if strings.EqualFold(ccb.curBalancerName, name) { @@ -160,7 +142,6 @@ func (ccb *ccBalancerWrapper) switchTo(name string) { } ccb.buildLoadBalancingPolicy(name) }) - ccb.mu.Unlock() } // buildLoadBalancingPolicy performs the following: @@ -187,115 +168,49 @@ func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) { ccb.curBalancerName = builder.Name() } +// close initiates async shutdown of the wrapper. cc.mu must be held when +// calling this function. To determine the wrapper has finished shutting down, +// the channel should block on ccb.serializer.Done() without cc.mu held. func (ccb *ccBalancerWrapper) close() { - channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing") - ccb.closeBalancer(ccbModeClosed) -} - -// enterIdleMode is invoked by grpc when the channel enters idle mode upon -// expiry of idle_timeout. This call blocks until the balancer is closed. -func (ccb *ccBalancerWrapper) enterIdleMode() { - channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: entering idle mode") - ccb.closeBalancer(ccbModeIdle) -} - -// closeBalancer is invoked when the channel is being closed or when it enters -// idle mode upon expiry of idle_timeout. -func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) { ccb.mu.Lock() - if ccb.mode == ccbModeClosed || ccb.mode == ccbModeIdle { - ccb.mu.Unlock() - return - } - - ccb.mode = m - done := ccb.serializer.Done() - b := ccb.balancer - ok := ccb.serializer.Schedule(func(_ context.Context) { - // Close the serializer to ensure that no more calls from gRPC are sent - // to the balancer. - ccb.serializerCancel() - // Empty the current balancer name because we don't have a balancer - // anymore and also so that we act on the next call to switchTo by - // creating a new balancer specified by the new resolver. - ccb.curBalancerName = "" - }) - if !ok { - ccb.mu.Unlock() - return - } + ccb.closed = true ccb.mu.Unlock() - - // Give enqueued callbacks a chance to finish before closing the balancer. - <-done - b.Close() -} - -// exitIdleMode is invoked by grpc when the channel exits idle mode either -// because of an RPC or because of an invocation of the Connect() API. This -// recreates the balancer that was closed previously when entering idle mode. -// -// If the channel is not in idle mode, we know for a fact that we are here as a -// result of the user calling the Connect() method on the ClientConn. In this -// case, we can simply forward the call to the underlying balancer, instructing -// it to reconnect to the backends. -func (ccb *ccBalancerWrapper) exitIdleMode() { - ccb.mu.Lock() - if ccb.mode == ccbModeClosed { - // Request to exit idle is a no-op when wrapper is already closed. - ccb.mu.Unlock() - return - } - - if ccb.mode == ccbModeIdle { - // Recreate the serializer which was closed when we entered idle. - ctx, cancel := context.WithCancel(context.Background()) - ccb.serializer = grpcsync.NewCallbackSerializer(ctx) - ccb.serializerCancel = cancel - } - - // The ClientConn guarantees that mutual exclusion between close() and - // exitIdleMode(), and since we just created a new serializer, we can be - // sure that the below function will be scheduled. - done := make(chan struct{}) - ccb.serializer.Schedule(func(_ context.Context) { - defer close(done) - - ccb.mu.Lock() - defer ccb.mu.Unlock() - - if ccb.mode != ccbModeIdle { - ccb.balancer.ExitIdle() + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing") + ccb.serializer.Schedule(func(context.Context) { + if ccb.balancer == nil { return } - - // Gracefulswitch balancer does not support a switchTo operation after - // being closed. Hence we need to create a new one here. - ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts) - ccb.mode = ccbModeActive - channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: exiting idle mode") - + ccb.balancer.Close() + ccb.balancer = nil }) - ccb.mu.Unlock() - - <-done + ccb.serializerCancel() } -func (ccb *ccBalancerWrapper) isIdleOrClosed() bool { - ccb.mu.Lock() - defer ccb.mu.Unlock() - return ccb.mode == ccbModeIdle || ccb.mode == ccbModeClosed +// exitIdle invokes the balancer's exitIdle method in the serializer. +func (ccb *ccBalancerWrapper) exitIdle() { + ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccb.balancer == nil { + return + } + ccb.balancer.ExitIdle() + }) } func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { - if ccb.isIdleOrClosed() { - return nil, fmt.Errorf("grpc: cannot create SubConn when balancer is closed or idle") + ccb.cc.mu.Lock() + defer ccb.cc.mu.Unlock() + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() + return nil, fmt.Errorf("balancer is being closed; no new SubConns allowed") } + ccb.mu.Unlock() if len(addrs) == 0 { return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") } - ac, err := ccb.cc.newAddrConn(addrs, opts) + ac, err := ccb.cc.newAddrConnLocked(addrs, opts) if err != nil { channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) return nil, err @@ -316,10 +231,6 @@ func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { } func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { - if ccb.isIdleOrClosed() { - return - } - acbw, ok := sc.(*acBalancerWrapper) if !ok { return @@ -328,25 +239,39 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol } func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { - if ccb.isIdleOrClosed() { + ccb.cc.mu.Lock() + defer ccb.cc.mu.Unlock() + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() return } - + ccb.mu.Unlock() // Update picker before updating state. Even though the ordering here does // not matter, it can lead to multiple calls of Pick in the common start-up // case where we wait for ready and then perform an RPC. If the picker is // updated later, we could call the "connecting" picker when the state is // updated, and then call the "ready" picker after the picker gets updated. - ccb.cc.blockingpicker.updatePicker(s.Picker) + + // Note that there is no need to check if the balancer wrapper was closed, + // as we know the graceful switch LB policy will not call cc if it has been + // closed. + ccb.cc.pickerWrapper.updatePicker(s.Picker) ccb.cc.csMgr.updateState(s.ConnectivityState) } func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { - if ccb.isIdleOrClosed() { + ccb.cc.mu.RLock() + defer ccb.cc.mu.RUnlock() + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() return } - - ccb.cc.resolveNow(o) + ccb.mu.Unlock() + ccb.cc.resolveNowLocked(o) } func (ccb *ccBalancerWrapper) Target() string { @@ -364,6 +289,20 @@ type acBalancerWrapper struct { producers map[balancer.ProducerBuilder]*refCountedProducer } +// updateState is invoked by grpc to push a subConn state update to the +// underlying balancer. +func (acbw *acBalancerWrapper) updateState(s connectivity.State, err error) { + acbw.ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || acbw.ccb.balancer == nil { + return + } + // Even though it is optional for balancers, gracefulswitch ensures + // opts.StateListener is set, so this cannot ever be nil. + // TODO: delete this comment when UpdateSubConnState is removed. + acbw.stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) + }) +} + func (acbw *acBalancerWrapper) String() string { return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int()) } @@ -377,20 +316,7 @@ func (acbw *acBalancerWrapper) Connect() { } func (acbw *acBalancerWrapper) Shutdown() { - ccb := acbw.ccb - if ccb.isIdleOrClosed() { - // It it safe to ignore this call when the balancer is closed or in idle - // because the ClientConn takes care of closing the connections. - // - // Not returning early from here when the balancer is closed or in idle - // leads to a deadlock though, because of the following sequence of - // calls when holding cc.mu: - // cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close --> - // ccb.RemoveAddrConn --> cc.removeAddrConn - return - } - - ccb.cc.removeAddrConn(acbw.ac, errConnDrain) + acbw.ccb.cc.removeAddrConn(acbw.ac, errConnDrain) } // NewStream begins a streaming RPC on the addrConn. If the addrConn is not diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 595480112..e9e97d451 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -430,7 +430,7 @@ type ClientHeader struct { MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"` // A single process may be used to run multiple virtual // servers with different identities. - // The authority is the name of such a server identitiy. + // The authority is the name of such a server identity. // It is typically a portion of the URI in the form of // or : . Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index ff7fea102..f6e815e6b 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -33,9 +33,7 @@ import ( "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal" - "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/idle" @@ -48,9 +46,9 @@ import ( "google.golang.org/grpc/status" _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. - _ "google.golang.org/grpc/internal/resolver/dns" // To register dns resolver. _ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver. _ "google.golang.org/grpc/internal/resolver/unix" // To register unix resolver. + _ "google.golang.org/grpc/resolver/dns" // To register dns resolver. ) const ( @@ -119,23 +117,8 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires }, nil } -// DialContext creates a client connection to the given target. By default, it's -// a non-blocking dial (the function won't wait for connections to be -// established, and connecting happens in the background). To make it a blocking -// dial, use WithBlock() dial option. -// -// In the non-blocking case, the ctx does not act against the connection. It -// only controls the setup steps. -// -// In the blocking case, ctx can be used to cancel or expire the pending -// connection. Once this function returns, the cancellation and expiration of -// ctx will be noop. Users should call ClientConn.Close to terminate all the -// pending operations after this function returns. -// -// The target name syntax is defined in -// https://github.com/grpc/grpc/blob/master/doc/naming.md. -// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. -func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { +// newClient returns a new client in idle mode. +func newClient(target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ target: target, conns: make(map[*addrConn]struct{}), @@ -143,23 +126,11 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * czData: new(channelzData), } - // We start the channel off in idle mode, but kick it out of idle at the end - // of this method, instead of waiting for the first RPC. Other gRPC - // implementations do wait for the first RPC to kick the channel out of - // idle. But doing so would be a major behavior change for our users who are - // used to seeing the channel active after Dial. - // - // Taking this approach of kicking it out of idle at the end of this method - // allows us to share the code between channel creation and exiting idle - // mode. This will also make it easy for us to switch to starting the - // channel off in idle, if at all we ever get to do that. - cc.idlenessState = ccIdlenessStateIdle - cc.retryThrottler.Store((*retryThrottler)(nil)) cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) cc.ctx, cc.cancel = context.WithCancel(context.Background()) - cc.exitIdleCond = sync.NewCond(&cc.mu) + // Apply dial options. disableGlobalOpts := false for _, opt := range opts { if _, ok := opt.(*disableGlobalDialOptions); ok { @@ -177,21 +148,9 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * for _, opt := range opts { opt.apply(&cc.dopts) } - chainUnaryClientInterceptors(cc) chainStreamClientInterceptors(cc) - defer func() { - if err != nil { - cc.Close() - } - }() - - // Register ClientConn with channelz. - cc.channelzRegistration(target) - - cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelzID) - if err := cc.validateTransportCredentials(); err != nil { return nil, err } @@ -205,10 +164,80 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } cc.mkp = cc.dopts.copts.KeepaliveParams - if cc.dopts.copts.UserAgent != "" { - cc.dopts.copts.UserAgent += " " + grpcUA - } else { - cc.dopts.copts.UserAgent = grpcUA + // Register ClientConn with channelz. + cc.channelzRegistration(target) + + // TODO: Ideally it should be impossible to error from this function after + // channelz registration. This will require removing some channelz logs + // from the following functions that can error. Errors can be returned to + // the user, and successful logs can be emitted here, after the checks have + // passed and channelz is subsequently registered. + + // Determine the resolver to use. + if err := cc.parseTargetAndFindResolver(); err != nil { + channelz.RemoveEntry(cc.channelzID) + return nil, err + } + if err = cc.determineAuthority(); err != nil { + channelz.RemoveEntry(cc.channelzID) + return nil, err + } + + cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelzID) + cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) + + cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc. + cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout) + return cc, nil +} + +// DialContext creates a client connection to the given target. By default, it's +// a non-blocking dial (the function won't wait for connections to be +// established, and connecting happens in the background). To make it a blocking +// dial, use WithBlock() dial option. +// +// In the non-blocking case, the ctx does not act against the connection. It +// only controls the setup steps. +// +// In the blocking case, ctx can be used to cancel or expire the pending +// connection. Once this function returns, the cancellation and expiration of +// ctx will be noop. Users should call ClientConn.Close to terminate all the +// pending operations after this function returns. +// +// The target name syntax is defined in +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. +func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { + cc, err := newClient(target, opts...) + if err != nil { + return nil, err + } + + // We start the channel off in idle mode, but kick it out of idle now, + // instead of waiting for the first RPC. Other gRPC implementations do wait + // for the first RPC to kick the channel out of idle. But doing so would be + // a major behavior change for our users who are used to seeing the channel + // active after Dial. + // + // Taking this approach of kicking it out of idle at the end of this method + // allows us to share the code between channel creation and exiting idle + // mode. This will also make it easy for us to switch to starting the + // channel off in idle, i.e. by making newClient exported. + + defer func() { + if err != nil { + cc.Close() + } + }() + + // This creates the name resolver, load balancer, etc. + if err := cc.idlenessMgr.ExitIdleMode(); err != nil { + return nil, err + } + + // Return now for non-blocking dials. + if !cc.dopts.block { + return cc, nil } if cc.dopts.timeout > 0 { @@ -231,49 +260,6 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - if cc.dopts.bs == nil { - cc.dopts.bs = backoff.DefaultExponential - } - - // Determine the resolver to use. - if err := cc.parseTargetAndFindResolver(); err != nil { - return nil, err - } - if err = cc.determineAuthority(); err != nil { - return nil, err - } - - if cc.dopts.scChan != nil { - // Blocking wait for the initial service config. - select { - case sc, ok := <-cc.dopts.scChan: - if ok { - cc.sc = &sc - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) - } - case <-ctx.Done(): - return nil, ctx.Err() - } - } - if cc.dopts.scChan != nil { - go cc.scWatcher() - } - - // This creates the name resolver, load balancer, blocking picker etc. - if err := cc.exitIdleMode(); err != nil { - return nil, err - } - - // Configure idleness support with configured idle timeout or default idle - // timeout duration. Idleness can be explicitly disabled by the user, by - // setting the dial option to 0. - cc.idlenessMgr = idle.NewManager(idle.ManagerOptions{Enforcer: (*idler)(cc), Timeout: cc.dopts.idleTimeout, Logger: logger}) - - // Return early for non-blocking dials. - if !cc.dopts.block { - return cc, nil - } - // A blocking dial blocks until the clientConn is ready. for { s := cc.GetState() @@ -320,8 +306,8 @@ func (cc *ClientConn) addTraceEvent(msg string) { type idler ClientConn -func (i *idler) EnterIdleMode() error { - return (*ClientConn)(i).enterIdleMode() +func (i *idler) EnterIdleMode() { + (*ClientConn)(i).enterIdleMode() } func (i *idler) ExitIdleMode() error { @@ -329,117 +315,71 @@ func (i *idler) ExitIdleMode() error { } // exitIdleMode moves the channel out of idle mode by recreating the name -// resolver and load balancer. -func (cc *ClientConn) exitIdleMode() error { +// resolver and load balancer. This should never be called directly; use +// cc.idlenessMgr.ExitIdleMode instead. +func (cc *ClientConn) exitIdleMode() (err error) { cc.mu.Lock() if cc.conns == nil { cc.mu.Unlock() return errConnClosing } - if cc.idlenessState != ccIdlenessStateIdle { - cc.mu.Unlock() - channelz.Infof(logger, cc.channelzID, "ClientConn asked to exit idle mode, current mode is %v", cc.idlenessState) - return nil - } - - defer func() { - // When Close() and exitIdleMode() race against each other, one of the - // following two can happen: - // - Close() wins the race and runs first. exitIdleMode() runs after, and - // sees that the ClientConn is already closed and hence returns early. - // - exitIdleMode() wins the race and runs first and recreates the balancer - // and releases the lock before recreating the resolver. If Close() runs - // in this window, it will wait for exitIdleMode to complete. - // - // We achieve this synchronization using the below condition variable. - cc.mu.Lock() - cc.idlenessState = ccIdlenessStateActive - cc.exitIdleCond.Signal() - cc.mu.Unlock() - }() - - cc.idlenessState = ccIdlenessStateExitingIdle - exitedIdle := false - if cc.blockingpicker == nil { - cc.blockingpicker = newPickerWrapper(cc.dopts.copts.StatsHandlers) - } else { - cc.blockingpicker.exitIdleMode() - exitedIdle = true - } - - var credsClone credentials.TransportCredentials - if creds := cc.dopts.copts.TransportCredentials; creds != nil { - credsClone = creds.Clone() - } - if cc.balancerWrapper == nil { - cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ - DialCreds: credsClone, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - Authority: cc.authority, - CustomUserAgent: cc.dopts.copts.UserAgent, - ChannelzParentID: cc.channelzID, - Target: cc.parsedTarget, - }) - } else { - cc.balancerWrapper.exitIdleMode() - } - cc.firstResolveEvent = grpcsync.NewEvent() cc.mu.Unlock() // This needs to be called without cc.mu because this builds a new resolver - // which might update state or report error inline which needs to be handled - // by cc.updateResolverState() which also grabs cc.mu. - if err := cc.initResolverWrapper(credsClone); err != nil { + // which might update state or report error inline, which would then need to + // acquire cc.mu. + if err := cc.resolverWrapper.start(); err != nil { return err } - if exitedIdle { - cc.addTraceEvent("exiting idle mode") - } + cc.addTraceEvent("exiting idle mode") return nil } +// initIdleStateLocked initializes common state to how it should be while idle. +func (cc *ClientConn) initIdleStateLocked() { + cc.resolverWrapper = newCCResolverWrapper(cc) + cc.balancerWrapper = newCCBalancerWrapper(cc) + cc.firstResolveEvent = grpcsync.NewEvent() + // cc.conns == nil is a proxy for the ClientConn being closed. So, instead + // of setting it to nil here, we recreate the map. This also means that we + // don't have to do this when exiting idle mode. + cc.conns = make(map[*addrConn]struct{}) +} + // enterIdleMode puts the channel in idle mode, and as part of it shuts down the -// name resolver, load balancer and any subchannels. -func (cc *ClientConn) enterIdleMode() error { +// name resolver, load balancer, and any subchannels. This should never be +// called directly; use cc.idlenessMgr.EnterIdleMode instead. +func (cc *ClientConn) enterIdleMode() { cc.mu.Lock() + if cc.conns == nil { cc.mu.Unlock() - return ErrClientConnClosing - } - if cc.idlenessState != ccIdlenessStateActive { - channelz.Errorf(logger, cc.channelzID, "ClientConn asked to enter idle mode, current mode is %v", cc.idlenessState) - cc.mu.Unlock() - return nil + return } - // cc.conns == nil is a proxy for the ClientConn being closed. So, instead - // of setting it to nil here, we recreate the map. This also means that we - // don't have to do this when exiting idle mode. conns := cc.conns - cc.conns = make(map[*addrConn]struct{}) - // TODO: Currently, we close the resolver wrapper upon entering idle mode - // and create a new one upon exiting idle mode. This means that the - // `cc.resolverWrapper` field would be overwritten everytime we exit idle - // mode. While this means that we need to hold `cc.mu` when accessing - // `cc.resolverWrapper`, it makes the code simpler in the wrapper. We should - // try to do the same for the balancer and picker wrappers too. - cc.resolverWrapper.close() - cc.blockingpicker.enterIdleMode() - cc.balancerWrapper.enterIdleMode() + rWrapper := cc.resolverWrapper + rWrapper.close() + cc.pickerWrapper.reset() + bWrapper := cc.balancerWrapper + bWrapper.close() cc.csMgr.updateState(connectivity.Idle) - cc.idlenessState = ccIdlenessStateIdle + cc.addTraceEvent("entering idle mode") + + cc.initIdleStateLocked() + cc.mu.Unlock() - go func() { - cc.addTraceEvent("entering idle mode") - for ac := range conns { - ac.tearDown(errConnIdling) - } - }() - return nil + // Block until the name resolver and LB policy are closed. + <-rWrapper.serializer.Done() + <-bWrapper.serializer.Done() + + // Close all subchannels after the LB policy is closed. + for ac := range conns { + ac.tearDown(errConnIdling) + } } // validateTransportCredentials performs a series of checks on the configured @@ -649,66 +589,35 @@ type ClientConn struct { dopts dialOptions // Default and user specified dial options. channelzID *channelz.Identifier // Channelz identifier for the channel. resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). - balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. - idlenessMgr idle.Manager + idlenessMgr *idle.Manager // The following provide their own synchronization, and therefore don't // require cc.mu to be held to access them. csMgr *connectivityStateManager - blockingpicker *pickerWrapper + pickerWrapper *pickerWrapper safeConfigSelector iresolver.SafeConfigSelector czData *channelzData retryThrottler atomic.Value // Updated from service config. - // firstResolveEvent is used to track whether the name resolver sent us at - // least one update. RPCs block on this event. - firstResolveEvent *grpcsync.Event - // mu protects the following fields. // TODO: split mu so the same mutex isn't used for everything. mu sync.RWMutex - resolverWrapper *ccResolverWrapper // Initialized in Dial; cleared in Close. + resolverWrapper *ccResolverWrapper // Always recreated whenever entering idle to simplify Close. + balancerWrapper *ccBalancerWrapper // Always recreated whenever entering idle to simplify Close. sc *ServiceConfig // Latest service config received from the resolver. conns map[*addrConn]struct{} // Set to nil on close. mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. - idlenessState ccIdlenessState // Tracks idleness state of the channel. - exitIdleCond *sync.Cond // Signalled when channel exits idle. + // firstResolveEvent is used to track whether the name resolver sent us at + // least one update. RPCs block on this event. May be accessed without mu + // if we know we cannot be asked to enter idle mode while accessing it (e.g. + // when the idle manager has already been closed, or if we are already + // entering idle mode). + firstResolveEvent *grpcsync.Event lceMu sync.Mutex // protects lastConnectionError lastConnectionError error } -// ccIdlenessState tracks the idleness state of the channel. -// -// Channels start off in `active` and move to `idle` after a period of -// inactivity. When moving back to `active` upon an incoming RPC, they -// transition through `exiting_idle`. This state is useful for synchronization -// with Close(). -// -// This state tracking is mostly for self-protection. The idlenessManager is -// expected to keep track of the state as well, and is expected not to call into -// the ClientConn unnecessarily. -type ccIdlenessState int8 - -const ( - ccIdlenessStateActive ccIdlenessState = iota - ccIdlenessStateIdle - ccIdlenessStateExitingIdle -) - -func (s ccIdlenessState) String() string { - switch s { - case ccIdlenessStateActive: - return "active" - case ccIdlenessStateIdle: - return "idle" - case ccIdlenessStateExitingIdle: - return "exitingIdle" - default: - return "unknown" - } -} - // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. // @@ -748,29 +657,15 @@ func (cc *ClientConn) GetState() connectivity.State { // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. func (cc *ClientConn) Connect() { - cc.exitIdleMode() + if err := cc.idlenessMgr.ExitIdleMode(); err != nil { + cc.addTraceEvent(err.Error()) + return + } // If the ClientConn was not in idle mode, we need to call ExitIdle on the // LB policy so that connections can be created. - cc.balancerWrapper.exitIdleMode() -} - -func (cc *ClientConn) scWatcher() { - for { - select { - case sc, ok := <-cc.dopts.scChan: - if !ok { - return - } - cc.mu.Lock() - // TODO: load balance policy runtime change is ignored. - // We may revisit this decision in the future. - cc.sc = &sc - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) - cc.mu.Unlock() - case <-cc.ctx.Done(): - return - } - } + cc.mu.Lock() + cc.balancerWrapper.exitIdle() + cc.mu.Unlock() } // waitForResolvedAddrs blocks until the resolver has provided addresses or the @@ -804,6 +699,12 @@ func init() { internal.SubscribeToConnectivityStateChanges = func(cc *ClientConn, s grpcsync.Subscriber) func() { return cc.csMgr.pubSub.Subscribe(s) } + internal.EnterIdleModeForTesting = func(cc *ClientConn) { + cc.idlenessMgr.EnterIdleModeForTesting() + } + internal.ExitIdleModeForTesting = func(cc *ClientConn) error { + return cc.idlenessMgr.ExitIdleMode() + } } func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { @@ -818,9 +719,8 @@ func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { } } -func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { +func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) error { defer cc.firstResolveEvent.Fire() - cc.mu.Lock() // Check if the ClientConn is already closed. Some fields (e.g. // balancerWrapper) are set to nil when closing the ClientConn, and could // cause nil pointer panic if we don't have this check. @@ -866,7 +766,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { if cc.sc == nil { // Apply the failing LB only if we haven't received valid service config // from the name resolver in the past. - cc.applyFailingLB(s.ServiceConfig) + cc.applyFailingLBLocked(s.ServiceConfig) cc.mu.Unlock() return ret } @@ -888,15 +788,13 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { return ret } -// applyFailingLB is akin to configuring an LB policy on the channel which +// applyFailingLBLocked is akin to configuring an LB policy on the channel which // always fails RPCs. Here, an actual LB policy is not configured, but an always // erroring picker is configured, which returns errors with information about // what was invalid in the received service config. A config selector with no // service config is configured, and the connectivity state of the channel is // set to TransientFailure. -// -// Caller must hold cc.mu. -func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) { +func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) { var err error if sc.Err != nil { err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err) @@ -904,14 +802,10 @@ func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) { err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config) } cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) - cc.blockingpicker.updatePicker(base.NewErrPicker(err)) + cc.pickerWrapper.updatePicker(base.NewErrPicker(err)) cc.csMgr.updateState(connectivity.TransientFailure) } -func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { - cc.balancerWrapper.updateSubConnState(sc, s, err) -} - // Makes a copy of the input addresses slice and clears out the balancer // attributes field. Addresses are passed during subconn creation and address // update operations. In both cases, we will clear the balancer attributes by @@ -926,10 +820,14 @@ func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Ad return out } -// newAddrConn creates an addrConn for addrs and adds it to cc.conns. +// newAddrConnLocked creates an addrConn for addrs and adds it to cc.conns. // // Caller needs to make sure len(addrs) > 0. -func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { +func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { + if cc.conns == nil { + return nil, ErrClientConnClosing + } + ac := &addrConn{ state: connectivity.Idle, cc: cc, @@ -941,12 +839,6 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub stateChan: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) - // Track ac in cc. This needs to be done before any getTransport(...) is called. - cc.mu.Lock() - defer cc.mu.Unlock() - if cc.conns == nil { - return nil, ErrClientConnClosing - } var err error ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "") @@ -962,6 +854,7 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub }, }) + // Track ac in cc. This needs to be done before any getTransport(...) is called. cc.conns[ac] = struct{}{} return ac, nil } @@ -1168,7 +1061,7 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { } func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) { - return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ + return cc.pickerWrapper.pick(ctx, failfast, balancer.PickInfo{ Ctx: ctx, FullMethodName: method, }) @@ -1210,12 +1103,12 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { cc.mu.RLock() - r := cc.resolverWrapper + cc.resolverWrapper.resolveNow(o) cc.mu.RUnlock() - if r == nil { - return - } - go r.resolveNow(o) +} + +func (cc *ClientConn) resolveNowLocked(o resolver.ResolveNowOptions) { + cc.resolverWrapper.resolveNow(o) } // ResetConnectBackoff wakes up all subchannels in transient failure and causes @@ -1247,40 +1140,32 @@ func (cc *ClientConn) Close() error { <-cc.csMgr.pubSub.Done() }() + // Prevent calls to enter/exit idle immediately, and ensure we are not + // currently entering/exiting idle mode. + cc.idlenessMgr.Close() + cc.mu.Lock() if cc.conns == nil { cc.mu.Unlock() return ErrClientConnClosing } - for cc.idlenessState == ccIdlenessStateExitingIdle { - cc.exitIdleCond.Wait() - } - conns := cc.conns cc.conns = nil cc.csMgr.updateState(connectivity.Shutdown) - pWrapper := cc.blockingpicker - rWrapper := cc.resolverWrapper - bWrapper := cc.balancerWrapper - idlenessMgr := cc.idlenessMgr + // We can safely unlock and continue to access all fields now as + // cc.conns==nil, preventing any further operations on cc. cc.mu.Unlock() + cc.resolverWrapper.close() // The order of closing matters here since the balancer wrapper assumes the // picker is closed before it is closed. - if pWrapper != nil { - pWrapper.close() - } - if bWrapper != nil { - bWrapper.close() - } - if rWrapper != nil { - rWrapper.close() - } - if idlenessMgr != nil { - idlenessMgr.Close() - } + cc.pickerWrapper.close() + cc.balancerWrapper.close() + + <-cc.resolverWrapper.serializer.Done() + <-cc.balancerWrapper.serializer.Done() for ac := range conns { ac.tearDown(ErrClientConnClosing) @@ -1301,7 +1186,7 @@ type addrConn struct { cc *ClientConn dopts dialOptions - acbw balancer.SubConn + acbw *acBalancerWrapper scopts balancer.NewSubConnOptions // transport is set when there's a viable transport (note: ac state may not be READY as LB channel @@ -1339,7 +1224,7 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) } else { channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) } - ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr) + ac.acbw.updateState(s, lastErr) } // adjustParams updates parameters used to create transports upon @@ -1843,7 +1728,7 @@ func (cc *ClientConn) parseTargetAndFindResolver() error { if err != nil { channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err) } else { - channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) + channelz.Infof(logger, cc.channelzID, "parsed dial target is: %#v", parsedTarget) rb = cc.getResolver(parsedTarget.URL.Scheme) if rb != nil { cc.parsedTarget = parsedTarget @@ -1975,58 +1860,17 @@ func (cc *ClientConn) determineAuthority() error { } endpoint := cc.parsedTarget.Endpoint() - target := cc.target - switch { - case authorityFromDialOption != "": + if authorityFromDialOption != "" { cc.authority = authorityFromDialOption - case authorityFromCreds != "": + } else if authorityFromCreds != "" { cc.authority = authorityFromCreds - case strings.HasPrefix(target, "unix:") || strings.HasPrefix(target, "unix-abstract:"): - // TODO: remove when the unix resolver implements optional interface to - // return channel authority. - cc.authority = "localhost" - case strings.HasPrefix(endpoint, ":"): + } else if auth, ok := cc.resolverBuilder.(resolver.AuthorityOverrider); ok { + cc.authority = auth.OverrideAuthority(cc.parsedTarget) + } else if strings.HasPrefix(endpoint, ":") { cc.authority = "localhost" + endpoint - default: - // TODO: Define an optional interface on the resolver builder to return - // the channel authority given the user's dial target. For resolvers - // which don't implement this interface, we will use the endpoint from - // "scheme://authority/endpoint" as the default authority. - // Escape the endpoint to handle use cases where the endpoint - // might not be a valid authority by default. - // For example an endpoint which has multiple paths like - // 'a/b/c', which is not a valid authority by default. + } else { cc.authority = encodeAuthority(endpoint) } channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) return nil } - -// initResolverWrapper creates a ccResolverWrapper, which builds the name -// resolver. This method grabs the lock to assign the newly built resolver -// wrapper to the cc.resolverWrapper field. -func (cc *ClientConn) initResolverWrapper(creds credentials.TransportCredentials) error { - rw, err := newCCResolverWrapper(cc, ccResolverWrapperOpts{ - target: cc.parsedTarget, - builder: cc.resolverBuilder, - bOpts: resolver.BuildOptions{ - DisableServiceConfig: cc.dopts.disableServiceConfig, - DialCreds: creds, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - }, - channelzID: cc.channelzID, - }) - if err != nil { - return fmt.Errorf("failed to build resolver: %v", err) - } - // Resolver implementations may report state update or error inline when - // built (or right after), and this is handled in cc.updateResolverState. - // Also, an error from the resolver might lead to a re-resolution request - // from the balancer, which is handled in resolveNow() where - // `cc.resolverWrapper` is accessed. Hence, we need to hold the lock here. - cc.mu.Lock() - cc.resolverWrapper = rw - cc.mu.Unlock() - return nil -} diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go index 11b106182..08476ad1f 100644 --- a/vendor/google.golang.org/grpc/codes/codes.go +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -25,7 +25,13 @@ import ( "strconv" ) -// A Code is an unsigned 32-bit error code as defined in the gRPC spec. +// A Code is a status code defined according to the [gRPC documentation]. +// +// Only the codes defined as consts in this package are valid codes. Do not use +// other code values. Behavior of other codes is implementation-specific and +// interoperability between implementations is not guaranteed. +// +// [gRPC documentation]: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md type Code uint32 const ( diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 877b7cd21..5dafd34ed 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -44,10 +44,25 @@ func (t TLSInfo) AuthType() string { return "tls" } +// cipherSuiteLookup returns the string version of a TLS cipher suite ID. +func cipherSuiteLookup(cipherSuiteID uint16) string { + for _, s := range tls.CipherSuites() { + if s.ID == cipherSuiteID { + return s.Name + } + } + for _, s := range tls.InsecureCipherSuites() { + if s.ID == cipherSuiteID { + return s.Name + } + } + return fmt.Sprintf("unknown ID: %v", cipherSuiteID) +} + // GetSecurityValue returns security info requested by channelz. func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue { v := &TLSChannelzSecurityValue{ - StandardName: cipherSuiteLookup[t.State.CipherSuite], + StandardName: cipherSuiteLookup(t.State.CipherSuite), } // Currently there's no way to get LocalCertificate info from tls package. if len(t.State.PeerCertificates) > 0 { @@ -138,10 +153,39 @@ func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { return nil } +// The following cipher suites are forbidden for use with HTTP/2 by +// https://datatracker.ietf.org/doc/html/rfc7540#appendix-A +var tls12ForbiddenCipherSuites = map[uint16]struct{}{ + tls.TLS_RSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_RSA_WITH_AES_256_CBC_SHA: {}, + tls.TLS_RSA_WITH_AES_128_GCM_SHA256: {}, + tls.TLS_RSA_WITH_AES_256_GCM_SHA384: {}, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: {}, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: {}, +} + // NewTLS uses c to construct a TransportCredentials based on TLS. func NewTLS(c *tls.Config) TransportCredentials { tc := &tlsCreds{credinternal.CloneTLSConfig(c)} tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos) + // If the user did not configure a MinVersion and did not configure a + // MaxVersion < 1.2, use MinVersion=1.2, which is required by + // https://datatracker.ietf.org/doc/html/rfc7540#section-9.2 + if tc.config.MinVersion == 0 && (tc.config.MaxVersion == 0 || tc.config.MaxVersion >= tls.VersionTLS12) { + tc.config.MinVersion = tls.VersionTLS12 + } + // If the user did not configure CipherSuites, use all "secure" cipher + // suites reported by the TLS package, but remove some explicitly forbidden + // by https://datatracker.ietf.org/doc/html/rfc7540#appendix-A + if tc.config.CipherSuites == nil { + for _, cs := range tls.CipherSuites() { + if _, ok := tls12ForbiddenCipherSuites[cs.ID]; !ok { + tc.config.CipherSuites = append(tc.config.CipherSuites, cs.ID) + } + } + } return tc } @@ -205,32 +249,3 @@ type TLSChannelzSecurityValue struct { LocalCertificate []byte RemoteCertificate []byte } - -var cipherSuiteLookup = map[uint16]string{ - tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA", - tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", - tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA", - tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA", - tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256", - tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384", - tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA", - tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", - tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV", - tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", - tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", - tls.TLS_AES_128_GCM_SHA256: "TLS_AES_128_GCM_SHA256", - tls.TLS_AES_256_GCM_SHA384: "TLS_AES_256_GCM_SHA384", - tls.TLS_CHACHA20_POLY1305_SHA256: "TLS_CHACHA20_POLY1305_SHA256", -} diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 1fd0d5c12..ba2426180 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -46,6 +46,7 @@ func init() { internal.WithBinaryLogger = withBinaryLogger internal.JoinDialOptions = newJoinDialOption internal.DisableGlobalDialOptions = newDisableGlobalDialOptions + internal.WithRecvBufferPool = withRecvBufferPool } // dialOptions configure a Dial call. dialOptions are set by the DialOption @@ -63,7 +64,6 @@ type dialOptions struct { block bool returnLastError bool timeout time.Duration - scChan <-chan ServiceConfig authority string binaryLogger binarylog.Logger copts transport.ConnectOptions @@ -250,19 +250,6 @@ func WithDecompressor(dc Decompressor) DialOption { }) } -// WithServiceConfig returns a DialOption which has a channel to read the -// service configuration. -// -// Deprecated: service config should be received through name resolver or via -// WithDefaultServiceConfig, as specified at -// https://github.com/grpc/grpc/blob/master/doc/service_config.md. Will be -// removed in a future 1.x release. -func WithServiceConfig(c <-chan ServiceConfig) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.scChan = c - }) -} - // WithConnectParams configures the ClientConn to use the provided ConnectParams // for creating and maintaining connections to servers. // @@ -413,6 +400,17 @@ func WithTimeout(d time.Duration) DialOption { // connections. If FailOnNonTempDialError() is set to true, and an error is // returned by f, gRPC checks the error's Temporary() method to decide if it // should try to reconnect to the network address. +// +// Note: All supported releases of Go (as of December 2023) override the OS +// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive +// with OS defaults for keepalive time and interval, use a net.Dialer that sets +// the KeepAlive field to a negative value, and sets the SO_KEEPALIVE socket +// option to true from the Control field. For a concrete example of how to do +// this, see internal.NetDialerWithTCPKeepalive(). +// +// For more information, please see [issue 23459] in the Go github repo. +// +// [issue 23459]: https://github.com/golang/go/issues/23459 func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.Dialer = f @@ -487,7 +485,7 @@ func FailOnNonTempDialError(f bool) DialOption { // the RPCs. func WithUserAgent(s string) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.copts.UserAgent = s + o.copts.UserAgent = s + " " + grpcUA }) } @@ -637,13 +635,16 @@ func withHealthCheckFunc(f internal.HealthChecker) DialOption { func defaultDialOptions() dialOptions { return dialOptions{ - healthCheckFunc: internal.HealthCheckFunc, copts: transport.ConnectOptions{ - WriteBufferSize: defaultWriteBufSize, ReadBufferSize: defaultReadBufSize, + WriteBufferSize: defaultWriteBufSize, UseProxy: true, + UserAgent: grpcUA, }, - recvBufferPool: nopBufferPool{}, + bs: internalbackoff.DefaultExponential, + healthCheckFunc: internal.HealthCheckFunc, + idleTimeout: 30 * time.Minute, + recvBufferPool: nopBufferPool{}, } } @@ -680,8 +681,8 @@ func WithResolvers(rs ...resolver.Builder) DialOption { // channel will exit idle mode when the Connect() method is called or when an // RPC is initiated. // -// By default this feature is disabled, which can also be explicitly configured -// by passing zero to this function. +// A default timeout of 30 minutes will be used if this dial option is not set +// at dial time and idleness can be disabled by passing a timeout of zero. // // # Experimental // @@ -704,11 +705,13 @@ func WithIdleTimeout(d time.Duration) DialOption { // options are used: WithStatsHandler, EnableTracing, or binary logging. In such // cases, the shared buffer pool will be ignored. // -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. +// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in +// v1.60.0 or later. func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption { + return withRecvBufferPool(bufferPool) +} + +func withRecvBufferPool(bufferPool SharedBufferPool) DialOption { return newFuncDialOption(func(o *dialOptions) { o.recvBufferPool = bufferPool }) diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 69d5580b6..5ebf88d71 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -38,6 +38,10 @@ const Identity = "identity" // Compressor is used for compressing and decompressing when sending or // receiving messages. +// +// If a Compressor implements `DecompressedSize(compressedBytes []byte) int`, +// gRPC will invoke it to determine the size of the buffer allocated for the +// result of decompression. A return value of -1 indicates unknown size. type Compressor interface { // Compress writes the data written to wc to w after compressing it. If an // error occurs while initializing the compressor, that error is returned @@ -51,15 +55,6 @@ type Compressor interface { // coding header. The result must be static; the result cannot change // between calls. Name() string - // If a Compressor implements - // DecompressedSize(compressedBytes []byte) int, gRPC will call it - // to determine the size of the buffer allocated for the result of decompression. - // Return -1 to indicate unknown size. - // - // Experimental - // - // Notice: This API is EXPERIMENTAL and may be changed or removed in a - // later release. } var registeredCompressor = make(map[string]Compressor) diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go index 5fc0ee3da..fed1c011a 100644 --- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -23,6 +23,8 @@ package backoff import ( + "context" + "errors" "time" grpcbackoff "google.golang.org/grpc/backoff" @@ -71,3 +73,37 @@ func (bc Exponential) Backoff(retries int) time.Duration { } return time.Duration(backoff) } + +// ErrResetBackoff is the error to be returned by the function executed by RunF, +// to instruct the latter to reset its backoff state. +var ErrResetBackoff = errors.New("reset backoff state") + +// RunF provides a convenient way to run a function f repeatedly until the +// context expires or f returns a non-nil error that is not ErrResetBackoff. +// When f returns ErrResetBackoff, RunF continues to run f, but resets its +// backoff state before doing so. backoff accepts an integer representing the +// number of retries, and returns the amount of time to backoff. +func RunF(ctx context.Context, f func() error, backoff func(int) time.Duration) { + attempt := 0 + timer := time.NewTimer(0) + for ctx.Err() == nil { + select { + case <-timer.C: + case <-ctx.Done(): + timer.Stop() + return + } + + err := f() + if errors.Is(err, ErrResetBackoff) { + timer.Reset(0) + attempt = 0 + continue + } + if err != nil { + return + } + timer.Reset(backoff(attempt)) + attempt++ + } +} diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go index 4399c3df4..11f91668a 100644 --- a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go +++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -18,7 +18,10 @@ // Package buffer provides an implementation of an unbounded buffer. package buffer -import "sync" +import ( + "errors" + "sync" +) // Unbounded is an implementation of an unbounded buffer which does not use // extra goroutines. This is typically used for passing updates from one entity @@ -36,6 +39,7 @@ import "sync" type Unbounded struct { c chan any closed bool + closing bool mu sync.Mutex backlog []any } @@ -45,32 +49,32 @@ func NewUnbounded() *Unbounded { return &Unbounded{c: make(chan any, 1)} } +var errBufferClosed = errors.New("Put called on closed buffer.Unbounded") + // Put adds t to the unbounded buffer. -func (b *Unbounded) Put(t any) { +func (b *Unbounded) Put(t any) error { b.mu.Lock() defer b.mu.Unlock() - if b.closed { - return + if b.closing { + return errBufferClosed } if len(b.backlog) == 0 { select { case b.c <- t: - return + return nil default: } } b.backlog = append(b.backlog, t) + return nil } -// Load sends the earliest buffered data, if any, onto the read channel -// returned by Get(). Users are expected to call this every time they read a +// Load sends the earliest buffered data, if any, onto the read channel returned +// by Get(). Users are expected to call this every time they successfully read a // value from the read channel. func (b *Unbounded) Load() { b.mu.Lock() defer b.mu.Unlock() - if b.closed { - return - } if len(b.backlog) > 0 { select { case b.c <- b.backlog[0]: @@ -78,6 +82,8 @@ func (b *Unbounded) Load() { b.backlog = b.backlog[1:] default: } + } else if b.closing && !b.closed { + close(b.c) } } @@ -88,18 +94,23 @@ func (b *Unbounded) Load() { // send the next buffered value onto the channel if there is any. // // If the unbounded buffer is closed, the read channel returned by this method -// is closed. +// is closed after all data is drained. func (b *Unbounded) Get() <-chan any { return b.c } -// Close closes the unbounded buffer. +// Close closes the unbounded buffer. No subsequent data may be Put(), and the +// channel returned from Get() will be closed after all the data is read and +// Load() is called for the final time. func (b *Unbounded) Close() { b.mu.Lock() defer b.mu.Unlock() - if b.closed { + if b.closing { return } - b.closed = true - close(b.c) + b.closing = true + if len(b.backlog) == 0 { + b.closed = true + close(b.c) + } } diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index 5395e7752..fc094f344 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -31,6 +31,7 @@ import ( "time" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" ) const ( @@ -58,6 +59,12 @@ func TurnOn() { } } +func init() { + internal.ChannelzTurnOffForTesting = func() { + atomic.StoreInt32(&curState, 0) + } +} + // IsOn returns whether channelz data collection is on. func IsOn() bool { return atomic.LoadInt32(&curState) == 1 diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 3cf10ddfb..685a3cb41 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -36,9 +36,6 @@ var ( // "GRPC_RING_HASH_CAP". This does not override the default bounds // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) - // PickFirstLBConfig is set if we should support configuration of the - // pick_first LB policy. - PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", true) // LeastRequestLB is set if we should support the least_request_experimental // LB policy, which can be enabled by setting the environment variable // "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true". diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go index 02b4b6a1c..29f234acb 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -50,46 +50,7 @@ var ( // // When both bootstrap FileName and FileContent are set, FileName is used. XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv) - // XDSRingHash indicates whether ring hash support is enabled, which can be - // disabled by setting the environment variable - // "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false". - XDSRingHash = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH", true) - // XDSClientSideSecurity is used to control processing of security - // configuration on the client-side. - // - // Note that there is no env var protection for the server-side because we - // have a brand new API on the server-side and users explicitly need to use - // the new API to get security integration on the server. - XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true) - // XDSAggregateAndDNS indicates whether processing of aggregated cluster and - // DNS cluster is enabled, which can be disabled by setting the environment - // variable "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" - // to "false". - XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true) - - // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, - // which can be disabled by setting the environment variable - // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". - XDSRBAC = boolFromEnv("GRPC_XDS_EXPERIMENTAL_RBAC", true) - // XDSOutlierDetection indicates whether outlier detection support is - // enabled, which can be disabled by setting the environment variable - // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false". - XDSOutlierDetection = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION", true) - // XDSFederation indicates whether federation support is enabled, which can - // be enabled by setting the environment variable - // "GRPC_EXPERIMENTAL_XDS_FEDERATION" to "true". - XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", true) - - // XDSRLS indicates whether processing of Cluster Specifier plugins and - // support for the RLS CLuster Specifier is enabled, which can be disabled by - // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to - // "false". - XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", true) // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") - // XDSCustomLBPolicy indicates whether Custom LB Policies are enabled, which - // can be disabled by setting the environment variable - // "GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG" to "false". - XDSCustomLBPolicy = boolFromEnv("GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG", true) ) diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go new file mode 100644 index 000000000..7f7044e17 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/experimental.go @@ -0,0 +1,28 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +var ( + // WithRecvBufferPool is implemented by the grpc package and returns a dial + // option to configure a shared buffer pool for a grpc.ClientConn. + WithRecvBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption + + // RecvBufferPool is implemented by the grpc package and returns a server + // option to configure a shared buffer pool for a grpc.Server. + RecvBufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption +) diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go index 900917dbe..f7f40a16a 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -20,7 +20,6 @@ package grpcsync import ( "context" - "sync" "google.golang.org/grpc/internal/buffer" ) @@ -38,8 +37,6 @@ type CallbackSerializer struct { done chan struct{} callbacks *buffer.Unbounded - closedMu sync.Mutex - closed bool } // NewCallbackSerializer returns a new CallbackSerializer instance. The provided @@ -65,56 +62,34 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { // callbacks to be executed by the serializer. It is not possible to add // callbacks once the context passed to NewCallbackSerializer is cancelled. func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { - cs.closedMu.Lock() - defer cs.closedMu.Unlock() - - if cs.closed { - return false - } - cs.callbacks.Put(f) - return true + return cs.callbacks.Put(f) == nil } func (cs *CallbackSerializer) run(ctx context.Context) { - var backlog []func(context.Context) - defer close(cs.done) + + // TODO: when Go 1.21 is the oldest supported version, this loop and Close + // can be replaced with: + // + // context.AfterFunc(ctx, cs.callbacks.Close) for ctx.Err() == nil { select { case <-ctx.Done(): // Do nothing here. Next iteration of the for loop will not happen, // since ctx.Err() would be non-nil. - case callback, ok := <-cs.callbacks.Get(): - if !ok { - return - } + case cb := <-cs.callbacks.Get(): cs.callbacks.Load() - callback.(func(ctx context.Context))(ctx) + cb.(func(context.Context))(ctx) } } - // Fetch pending callbacks if any, and execute them before returning from - // this method and closing cs.done. - cs.closedMu.Lock() - cs.closed = true - backlog = cs.fetchPendingCallbacks() + // Close the buffer to prevent new callbacks from being added. cs.callbacks.Close() - cs.closedMu.Unlock() - for _, b := range backlog { - b(ctx) - } -} -func (cs *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) { - var backlog []func(context.Context) - for { - select { - case b := <-cs.callbacks.Get(): - backlog = append(backlog, b.(func(context.Context))) - cs.callbacks.Load() - default: - return backlog - } + // Run all pending callbacks. + for cb := range cs.callbacks.Get() { + cs.callbacks.Load() + cb.(func(context.Context))(ctx) } } diff --git a/vendor/google.golang.org/grpc/internal/idle/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go index 6c272476e..fe49cb74c 100644 --- a/vendor/google.golang.org/grpc/internal/idle/idle.go +++ b/vendor/google.golang.org/grpc/internal/idle/idle.go @@ -26,8 +26,6 @@ import ( "sync" "sync/atomic" "time" - - "google.golang.org/grpc/grpclog" ) // For overriding in unit tests. @@ -39,27 +37,12 @@ var timeAfterFunc = func(d time.Duration, f func()) *time.Timer { // and exit from idle mode. type Enforcer interface { ExitIdleMode() error - EnterIdleMode() error -} - -// Manager defines the functionality required to track RPC activity on a -// channel. -type Manager interface { - OnCallBegin() error - OnCallEnd() - Close() + EnterIdleMode() } -type noopManager struct{} - -func (noopManager) OnCallBegin() error { return nil } -func (noopManager) OnCallEnd() {} -func (noopManager) Close() {} - -// manager implements the Manager interface. It uses atomic operations to -// synchronize access to shared state and a mutex to guarantee mutual exclusion -// in a critical section. -type manager struct { +// Manager implements idleness detection and calls the configured Enforcer to +// enter/exit idle mode when appropriate. Must be created by NewManager. +type Manager struct { // State accessed atomically. lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed. activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there. @@ -69,8 +52,7 @@ type manager struct { // Can be accessed without atomics or mutex since these are set at creation // time and read-only after that. enforcer Enforcer // Functionality provided by grpc.ClientConn. - timeout int64 // Idle timeout duration nanos stored as an int64. - logger grpclog.LoggerV2 + timeout time.Duration // idleMu is used to guarantee mutual exclusion in two scenarios: // - Opposing intentions: @@ -88,57 +70,48 @@ type manager struct { timer *time.Timer } -// ManagerOptions is a collection of options used by -// NewManager. -type ManagerOptions struct { - Enforcer Enforcer - Timeout time.Duration - Logger grpclog.LoggerV2 +// NewManager creates a new idleness manager implementation for the +// given idle timeout. It begins in idle mode. +func NewManager(enforcer Enforcer, timeout time.Duration) *Manager { + return &Manager{ + enforcer: enforcer, + timeout: timeout, + actuallyIdle: true, + activeCallsCount: -math.MaxInt32, + } } -// NewManager creates a new idleness manager implementation for the -// given idle timeout. -func NewManager(opts ManagerOptions) Manager { - if opts.Timeout == 0 { - return noopManager{} +// resetIdleTimerLocked resets the idle timer to the given duration. Called +// when exiting idle mode or when the timer fires and we need to reset it. +func (m *Manager) resetIdleTimerLocked(d time.Duration) { + if m.isClosed() || m.timeout == 0 || m.actuallyIdle { + return } - m := &manager{ - enforcer: opts.Enforcer, - timeout: int64(opts.Timeout), - logger: opts.Logger, + // It is safe to ignore the return value from Reset() because this method is + // only ever called from the timer callback or when exiting idle mode. + if m.timer != nil { + m.timer.Stop() } - m.timer = timeAfterFunc(opts.Timeout, m.handleIdleTimeout) - return m + m.timer = timeAfterFunc(d, m.handleIdleTimeout) } -// resetIdleTimer resets the idle timer to the given duration. This method -// should only be called from the timer callback. -func (m *manager) resetIdleTimer(d time.Duration) { +func (m *Manager) resetIdleTimer(d time.Duration) { m.idleMu.Lock() defer m.idleMu.Unlock() - - if m.timer == nil { - // Only close sets timer to nil. We are done. - return - } - - // It is safe to ignore the return value from Reset() because this method is - // only ever called from the timer callback, which means the timer has - // already fired. - m.timer.Reset(d) + m.resetIdleTimerLocked(d) } // handleIdleTimeout is the timer callback that is invoked upon expiry of the // configured idle timeout. The channel is considered inactive if there are no // ongoing calls and no RPC activity since the last time the timer fired. -func (m *manager) handleIdleTimeout() { +func (m *Manager) handleIdleTimeout() { if m.isClosed() { return } if atomic.LoadInt32(&m.activeCallsCount) > 0 { - m.resetIdleTimer(time.Duration(m.timeout)) + m.resetIdleTimer(m.timeout) return } @@ -148,24 +121,12 @@ func (m *manager) handleIdleTimeout() { // Set the timer to fire after a duration of idle timeout, calculated // from the time the most recent RPC completed. atomic.StoreInt32(&m.activeSinceLastTimerCheck, 0) - m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime) + m.timeout - time.Now().UnixNano())) + m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime)-time.Now().UnixNano()) + m.timeout) return } - // This CAS operation is extremely likely to succeed given that there has - // been no activity since the last time we were here. Setting the - // activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() that the - // channel is either in idle mode or is trying to get there. - if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) { - // This CAS operation can fail if an RPC started after we checked for - // activity at the top of this method, or one was ongoing from before - // the last time we were here. In both case, reset the timer and return. - m.resetIdleTimer(time.Duration(m.timeout)) - return - } - - // Now that we've set the active calls count to -math.MaxInt32, it's time to - // actually move to idle mode. + // Now that we've checked that there has been no activity, attempt to enter + // idle mode, which is very likely to succeed. if m.tryEnterIdleMode() { // Successfully entered idle mode. No timer needed until we exit idle. return @@ -174,8 +135,7 @@ func (m *manager) handleIdleTimeout() { // Failed to enter idle mode due to a concurrent RPC that kept the channel // active, or because of an error from the channel. Undo the attempt to // enter idle, and reset the timer to try again later. - atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) - m.resetIdleTimer(time.Duration(m.timeout)) + m.resetIdleTimer(m.timeout) } // tryEnterIdleMode instructs the channel to enter idle mode. But before @@ -185,36 +145,49 @@ func (m *manager) handleIdleTimeout() { // Return value indicates whether or not the channel moved to idle mode. // // Holds idleMu which ensures mutual exclusion with exitIdleMode. -func (m *manager) tryEnterIdleMode() bool { +func (m *Manager) tryEnterIdleMode() bool { + // Setting the activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() + // that the channel is either in idle mode or is trying to get there. + if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) { + // This CAS operation can fail if an RPC started after we checked for + // activity in the timer handler, or one was ongoing from before the + // last time the timer fired, or if a test is attempting to enter idle + // mode without checking. In all cases, abort going into idle mode. + return false + } + // N.B. if we fail to enter idle mode after this, we must re-add + // math.MaxInt32 to m.activeCallsCount. + m.idleMu.Lock() defer m.idleMu.Unlock() if atomic.LoadInt32(&m.activeCallsCount) != -math.MaxInt32 { // We raced and lost to a new RPC. Very rare, but stop entering idle. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) return false } if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { - // An very short RPC could have come in (and also finished) after we + // A very short RPC could have come in (and also finished) after we // checked for calls count and activity in handleIdleTimeout(), but // before the CAS operation. So, we need to check for activity again. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) return false } - // No new RPCs have come in since we last set the active calls count value - // -math.MaxInt32 in the timer callback. And since we have the lock, it is - // safe to enter idle mode now. - if err := m.enforcer.EnterIdleMode(); err != nil { - m.logger.Errorf("Failed to enter idle mode: %v", err) - return false - } - - // Successfully entered idle mode. + // No new RPCs have come in since we set the active calls count value to + // -math.MaxInt32. And since we have the lock, it is safe to enter idle mode + // unconditionally now. + m.enforcer.EnterIdleMode() m.actuallyIdle = true return true } +func (m *Manager) EnterIdleModeForTesting() { + m.tryEnterIdleMode() +} + // OnCallBegin is invoked at the start of every RPC. -func (m *manager) OnCallBegin() error { +func (m *Manager) OnCallBegin() error { if m.isClosed() { return nil } @@ -227,7 +200,7 @@ func (m *manager) OnCallBegin() error { // Channel is either in idle mode or is in the process of moving to idle // mode. Attempt to exit idle mode to allow this RPC. - if err := m.exitIdleMode(); err != nil { + if err := m.ExitIdleMode(); err != nil { // Undo the increment to calls count, and return an error causing the // RPC to fail. atomic.AddInt32(&m.activeCallsCount, -1) @@ -238,28 +211,30 @@ func (m *manager) OnCallBegin() error { return nil } -// exitIdleMode instructs the channel to exit idle mode. -// -// Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. -func (m *manager) exitIdleMode() error { +// ExitIdleMode instructs m to call the enforcer's ExitIdleMode and update m's +// internal state. +func (m *Manager) ExitIdleMode() error { + // Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. m.idleMu.Lock() defer m.idleMu.Unlock() - if !m.actuallyIdle { - // This can happen in two scenarios: + if m.isClosed() || !m.actuallyIdle { + // This can happen in three scenarios: // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called // tryEnterIdleMode(). But before the latter could grab the lock, an RPC // came in and OnCallBegin() noticed that the calls count is negative. // - Channel is in idle mode, and multiple new RPCs come in at the same // time, all of them notice a negative calls count in OnCallBegin and get // here. The first one to get the lock would got the channel to exit idle. + // - Channel is not in idle mode, and the user calls Connect which calls + // m.ExitIdleMode. // - // Either way, nothing to do here. + // In any case, there is nothing to do here. return nil } if err := m.enforcer.ExitIdleMode(); err != nil { - return fmt.Errorf("channel failed to exit idle mode: %v", err) + return fmt.Errorf("failed to exit idle mode: %w", err) } // Undo the idle entry process. This also respects any new RPC attempts. @@ -267,12 +242,12 @@ func (m *manager) exitIdleMode() error { m.actuallyIdle = false // Start a new timer to fire after the configured idle timeout. - m.timer = timeAfterFunc(time.Duration(m.timeout), m.handleIdleTimeout) + m.resetIdleTimerLocked(m.timeout) return nil } // OnCallEnd is invoked at the end of every RPC. -func (m *manager) OnCallEnd() { +func (m *Manager) OnCallEnd() { if m.isClosed() { return } @@ -287,15 +262,17 @@ func (m *manager) OnCallEnd() { atomic.AddInt32(&m.activeCallsCount, -1) } -func (m *manager) isClosed() bool { +func (m *Manager) isClosed() bool { return atomic.LoadInt32(&m.closed) == 1 } -func (m *manager) Close() { +func (m *Manager) Close() { atomic.StoreInt32(&m.closed, 1) m.idleMu.Lock() - m.timer.Stop() - m.timer = nil + if m.timer != nil { + m.timer.Stop() + m.timer = nil + } m.idleMu.Unlock() } diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index c8a8c76d6..6c7ea6a53 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -57,7 +57,7 @@ var ( // GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo // stored in the passed in attributes. This is set by // credentials/xds/xds.go. - GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *xds.HandshakeInfo + GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *unsafe.Pointer // GetServerCredentials returns the transport credentials configured on a // gRPC server. An xDS-enabled server needs to know what type of credentials // is configured on the underlying gRPC server. This is set by server.go. @@ -68,11 +68,11 @@ var ( // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. CanonicalString any // func (codes.Code) string - // DrainServerTransports initiates a graceful close of existing connections - // on a gRPC server accepted on the provided listener address. An - // xDS-enabled server invokes this method on a grpc.Server when a particular - // listener moves to "not-serving" mode. - DrainServerTransports any // func(*grpc.Server, string) + // IsRegisteredMethod returns whether the passed in method is registered as + // a method on the server. + IsRegisteredMethod any // func(*grpc.Server, string) bool + // ServerFromContext returns the server from the context. + ServerFromContext any // func(context.Context) *grpc.Server // AddGlobalServerOptions adds an array of ServerOption that will be // effective globally for newly created servers. The priority will be: 1. // user-provided; 2. this method; 3. default values. @@ -175,6 +175,27 @@ var ( // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra // metadata to RPCs. GRPCResolverSchemeExtraMetadata string = "xds" + + // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode. + EnterIdleModeForTesting any // func(*grpc.ClientConn) + + // ExitIdleModeForTesting gets the ClientConn to exit IDLE mode. + ExitIdleModeForTesting any // func(*grpc.ClientConn) error + + ChannelzTurnOffForTesting func() + + // TriggerXDSResourceNameNotFoundForTesting triggers the resource-not-found + // error for a given resource type and name. This is usually triggered when + // the associated watch timer fires. For testing purposes, having this + // function makes events more predictable than relying on timer events. + TriggerXDSResourceNameNotFoundForTesting any // func(func(xdsresource.Type, string), string, string) error + + // TriggerXDSResourceNotFoundClient invokes the testing xDS Client singleton + // to invoke resource not found for a resource type name and resource name. + TriggerXDSResourceNameNotFoundClient any // func(string, string) error + + // FromOutgoingContextRaw returns the un-merged, intermediary contents of metadata.rawMD. + FromOutgoingContextRaw any // func(context.Context) (metadata.MD, [][]string, bool) ) // HealthChecker defines the signature of the client-side LB channel health checking function. diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 99e1e5b36..b66dcb213 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -23,7 +23,6 @@ package dns import ( "context" "encoding/json" - "errors" "fmt" "net" "os" @@ -37,6 +36,7 @@ import ( "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/resolver/dns/internal" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) @@ -47,15 +47,11 @@ var EnableSRVLookups = false var logger = grpclog.Component("dns") -// Globals to stub out in tests. TODO: Perhaps these two can be combined into a -// single variable for testing the resolver? -var ( - newTimer = time.NewTimer - newTimerDNSResRate = time.NewTimer -) - func init() { resolver.Register(NewBuilder()) + internal.TimeAfterFunc = time.After + internal.NewNetResolver = newNetResolver + internal.AddressDialer = addressDialer } const ( @@ -70,23 +66,6 @@ const ( txtAttribute = "grpc_config=" ) -var ( - errMissingAddr = errors.New("dns resolver: missing address") - - // Addresses ending with a colon that is supposed to be the separator - // between host and port is not allowed. E.g. "::" is a valid address as - // it is an IPv6 address (host only) and "[::]:" is invalid as it ends with - // a colon as the host and port separator - errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") -) - -var ( - defaultResolver netResolver = net.DefaultResolver - // To prevent excessive re-resolution, we enforce a rate limit on DNS - // resolution requests. - minDNSResRate = 30 * time.Second -) - var addressDialer = func(address string) func(context.Context, string, string) (net.Conn, error) { return func(ctx context.Context, network, _ string) (net.Conn, error) { var dialer net.Dialer @@ -94,7 +73,11 @@ var addressDialer = func(address string) func(context.Context, string, string) ( } } -var newNetResolver = func(authority string) (netResolver, error) { +var newNetResolver = func(authority string) (internal.NetResolver, error) { + if authority == "" { + return net.DefaultResolver, nil + } + host, port, err := parseTarget(authority, defaultDNSSvrPort) if err != nil { return nil, err @@ -104,7 +87,7 @@ var newNetResolver = func(authority string) (netResolver, error) { return &net.Resolver{ PreferGo: true, - Dial: addressDialer(authorityWithPort), + Dial: internal.AddressDialer(authorityWithPort), }, nil } @@ -142,13 +125,9 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts disableServiceConfig: opts.DisableServiceConfig, } - if target.URL.Host == "" { - d.resolver = defaultResolver - } else { - d.resolver, err = newNetResolver(target.URL.Host) - if err != nil { - return nil, err - } + d.resolver, err = internal.NewNetResolver(target.URL.Host) + if err != nil { + return nil, err } d.wg.Add(1) @@ -161,12 +140,6 @@ func (b *dnsBuilder) Scheme() string { return "dns" } -type netResolver interface { - LookupHost(ctx context.Context, host string) (addrs []string, err error) - LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) - LookupTXT(ctx context.Context, name string) (txts []string, err error) -} - // deadResolver is a resolver that does nothing. type deadResolver struct{} @@ -178,7 +151,7 @@ func (deadResolver) Close() {} type dnsResolver struct { host string port string - resolver netResolver + resolver internal.NetResolver ctx context.Context cancel context.CancelFunc cc resolver.ClientConn @@ -223,29 +196,27 @@ func (d *dnsResolver) watcher() { err = d.cc.UpdateState(*state) } - var timer *time.Timer + var waitTime time.Duration if err == nil { // Success resolving, wait for the next ResolveNow. However, also wait 30 // seconds at the very least to prevent constantly re-resolving. backoffIndex = 1 - timer = newTimerDNSResRate(minDNSResRate) + waitTime = internal.MinResolutionRate select { case <-d.ctx.Done(): - timer.Stop() return case <-d.rn: } } else { // Poll on an error found in DNS Resolver or an error received from // ClientConn. - timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex)) + waitTime = backoff.DefaultExponential.Backoff(backoffIndex) backoffIndex++ } select { case <-d.ctx.Done(): - timer.Stop() return - case <-timer.C: + case <-internal.TimeAfterFunc(waitTime): } } } @@ -387,7 +358,7 @@ func formatIP(addr string) (addrIP string, ok bool) { // target: ":80" defaultPort: "443" returns host: "localhost", port: "80" func parseTarget(target, defaultPort string) (host, port string, err error) { if target == "" { - return "", "", errMissingAddr + return "", "", internal.ErrMissingAddr } if ip := net.ParseIP(target); ip != nil { // target is an IPv4 or IPv6(without brackets) address @@ -397,7 +368,7 @@ func parseTarget(target, defaultPort string) (host, port string, err error) { if port == "" { // If the port field is empty (target ends with colon), e.g. "[::1]:", // this is an error. - return "", "", errEndsWithColon + return "", "", internal.ErrEndsWithColon } // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port if host == "" { diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go new file mode 100644 index 000000000..c7fc557d0 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains functionality internal to the dns resolver package. +package internal + +import ( + "context" + "errors" + "net" + "time" +) + +// NetResolver groups the methods on net.Resolver that are used by the DNS +// resolver implementation. This allows the default net.Resolver instance to be +// overidden from tests. +type NetResolver interface { + LookupHost(ctx context.Context, host string) (addrs []string, err error) + LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) + LookupTXT(ctx context.Context, name string) (txts []string, err error) +} + +var ( + // ErrMissingAddr is the error returned when building a DNS resolver when + // the provided target name is empty. + ErrMissingAddr = errors.New("dns resolver: missing address") + + // ErrEndsWithColon is the error returned when building a DNS resolver when + // the provided target name ends with a colon that is supposed to be the + // separator between host and port. E.g. "::" is a valid address as it is + // an IPv6 address (host only) and "[::]:" is invalid as it ends with a + // colon as the host and port separator + ErrEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") +) + +// The following vars are overridden from tests. +var ( + // MinResolutionRate is the minimum rate at which re-resolutions are + // allowed. This helps to prevent excessive re-resolution. + MinResolutionRate = 30 * time.Second + + // TimeAfterFunc is used by the DNS resolver to wait for the given duration + // to elapse. In non-test code, this is implemented by time.After. In test + // code, this can be used to control the amount of time the resolver is + // blocked waiting for the duration to elapse. + TimeAfterFunc func(time.Duration) <-chan time.Time + + // NewNetResolver returns the net.Resolver instance for the given target. + NewNetResolver func(string) (NetResolver, error) + + // AddressDialer is the dialer used to dial the DNS server. It accepts the + // Host portion of the URL corresponding to the user's dial target and + // returns a dial function. + AddressDialer func(address string) func(context.Context, string, string) (net.Conn, error) +) diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go index 160911687..27cd81af9 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go +++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go @@ -61,6 +61,10 @@ func (b *builder) Scheme() string { return b.scheme } +func (b *builder) OverrideAuthority(resolver.Target) string { + return "localhost" +} + type nopResolver struct { } diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index 4cf85cad9..03ef2fedd 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -43,6 +43,34 @@ type Status struct { s *spb.Status } +// NewWithProto returns a new status including details from statusProto. This +// is meant to be used by the gRPC library only. +func NewWithProto(code codes.Code, message string, statusProto []string) *Status { + if len(statusProto) != 1 { + // No grpc-status-details bin header, or multiple; just ignore. + return &Status{s: &spb.Status{Code: int32(code), Message: message}} + } + st := &spb.Status{} + if err := proto.Unmarshal([]byte(statusProto[0]), st); err != nil { + // Probably not a google.rpc.Status proto; do not provide details. + return &Status{s: &spb.Status{Code: int32(code), Message: message}} + } + if st.Code == int32(code) { + // The codes match between the grpc-status header and the + // grpc-status-details-bin header; use the full details proto. + return &Status{s: st} + } + return &Status{ + s: &spb.Status{ + Code: int32(codes.Internal), + Message: fmt.Sprintf( + "grpc-status-details-bin mismatch: grpc-status=%v, grpc-message=%q, grpc-status-details-bin=%+v", + code, message, st, + ), + }, + } +} + // New returns a Status representing c and msg. func New(c codes.Code, msg string) *Status { return &Status{s: &spb.Status{Code: int32(c), Message: msg}} diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go new file mode 100644 index 000000000..4f347edd4 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go @@ -0,0 +1,29 @@ +//go:build !unix && !windows + +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" +) + +// NetDialerWithTCPKeepalive returns a vanilla net.Dialer on non-unix platforms. +func NetDialerWithTCPKeepalive() *net.Dialer { + return &net.Dialer{} +} diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go new file mode 100644 index 000000000..078137b7f --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go @@ -0,0 +1,54 @@ +//go:build unix + +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" + "syscall" + "time" + + "golang.org/x/sys/unix" +) + +// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on +// the underlying connection with OS default values for keepalive parameters. +// +// TODO: Once https://github.com/golang/go/issues/62254 lands, and the +// appropriate Go version becomes less than our least supported Go version, we +// should look into using the new API to make things more straightforward. +func NetDialerWithTCPKeepalive() *net.Dialer { + return &net.Dialer{ + // Setting a negative value here prevents the Go stdlib from overriding + // the values of TCP keepalive time and interval. It also prevents the + // Go stdlib from enabling TCP keepalives by default. + KeepAlive: time.Duration(-1), + // This method is called after the underlying network socket is created, + // but before dialing the socket (or calling its connect() method). The + // combination of unconditionally enabling TCP keepalives here, and + // disabling the overriding of TCP keepalive parameters by setting the + // KeepAlive field to a negative value above, results in OS defaults for + // the TCP keealive interval and time parameters. + Control: func(_, _ string, c syscall.RawConn) error { + return c.Control(func(fd uintptr) { + unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1) + }) + }, + } +} diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go new file mode 100644 index 000000000..fd7d43a89 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go @@ -0,0 +1,54 @@ +//go:build windows + +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" + "syscall" + "time" + + "golang.org/x/sys/windows" +) + +// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on +// the underlying connection with OS default values for keepalive parameters. +// +// TODO: Once https://github.com/golang/go/issues/62254 lands, and the +// appropriate Go version becomes less than our least supported Go version, we +// should look into using the new API to make things more straightforward. +func NetDialerWithTCPKeepalive() *net.Dialer { + return &net.Dialer{ + // Setting a negative value here prevents the Go stdlib from overriding + // the values of TCP keepalive time and interval. It also prevents the + // Go stdlib from enabling TCP keepalives by default. + KeepAlive: time.Duration(-1), + // This method is called after the underlying network socket is created, + // but before dialing the socket (or calling its connect() method). The + // combination of unconditionally enabling TCP keepalives here, and + // disabling the overriding of TCP keepalive parameters by setting the + // KeepAlive field to a negative value above, results in OS defaults for + // the TCP keealive interval and time parameters. + Control: func(_, _ string, c syscall.RawConn) error { + return c.Control(func(fd uintptr) { + windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1) + }) + }, + } +} diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 98f80e3fa..a9d70e2a1 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -75,11 +75,25 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s return nil, errors.New(msg) } + var localAddr net.Addr + if la := r.Context().Value(http.LocalAddrContextKey); la != nil { + localAddr, _ = la.(net.Addr) + } + var authInfo credentials.AuthInfo + if r.TLS != nil { + authInfo = credentials.TLSInfo{State: *r.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}} + } + p := peer.Peer{ + Addr: strAddr(r.RemoteAddr), + LocalAddr: localAddr, + AuthInfo: authInfo, + } st := &serverHandlerTransport{ rw: w, req: r, closedCh: make(chan struct{}), writes: make(chan func()), + peer: p, contentType: contentType, contentSubtype: contentSubtype, stats: stats, @@ -134,6 +148,8 @@ type serverHandlerTransport struct { headerMD metadata.MD + peer peer.Peer + closeOnce sync.Once closedCh chan struct{} // closed on Close @@ -165,7 +181,13 @@ func (ht *serverHandlerTransport) Close(err error) { }) } -func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } +func (ht *serverHandlerTransport) Peer() *peer.Peer { + return &peer.Peer{ + Addr: ht.peer.Addr, + LocalAddr: ht.peer.LocalAddr, + AuthInfo: ht.peer.AuthInfo, + } +} // strAddr is a net.Addr backed by either a TCP "ip:port" string, or // the empty string if unknown. @@ -220,18 +242,20 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro h.Set("Grpc-Message", encodeGrpcMessage(m)) } + s.hdrMu.Lock() if p := st.Proto(); p != nil && len(p.Details) > 0 { + delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. panic(err) } - h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes)) + h.Set(grpcStatusDetailsBinHeader, encodeBinHeader(stBytes)) } - if md := s.Trailer(); len(md) > 0 { - for k, vv := range md { + if len(s.trailer) > 0 { + for k, vv := range s.trailer { // Clients don't tolerate reading restricted headers after some non restricted ones were sent. if isReservedHeader(k) { continue @@ -243,6 +267,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } } } + s.hdrMu.Unlock() }) if err == nil { // transport has not been closed @@ -287,7 +312,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { } // writeCustomHeaders sets custom headers set on the stream via SetHeader -// on the first write call (Write, WriteHeader, or WriteStatus). +// on the first write call (Write, WriteHeader, or WriteStatus) func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { h := ht.rw.Header() @@ -344,10 +369,8 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { return err } -func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) { +func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*Stream)) { // With this transport type there will be exactly 1 stream: this HTTP request. - - ctx := ht.req.Context() var cancel context.CancelFunc if ht.timeoutSet { ctx, cancel = context.WithTimeout(ctx, ht.timeout) @@ -367,34 +390,19 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace ht.Close(errors.New("request is done processing")) }() + ctx = metadata.NewIncomingContext(ctx, ht.headerMD) req := ht.req - s := &Stream{ - id: 0, // irrelevant - requestRead: func(int) {}, - cancel: cancel, - buf: newRecvBuffer(), - st: ht, - method: req.URL.Path, - recvCompress: req.Header.Get("grpc-encoding"), - contentSubtype: ht.contentSubtype, - } - pr := &peer.Peer{ - Addr: ht.RemoteAddr(), - } - if req.TLS != nil { - pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}} - } - ctx = metadata.NewIncomingContext(ctx, ht.headerMD) - s.ctx = peer.NewContext(ctx, pr) - for _, sh := range ht.stats { - s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) - inHeader := &stats.InHeader{ - FullMethod: s.method, - RemoteAddr: ht.RemoteAddr(), - Compression: s.recvCompress, - } - sh.HandleRPC(s.ctx, inHeader) + id: 0, // irrelevant + ctx: ctx, + requestRead: func(int) {}, + cancel: cancel, + buf: newRecvBuffer(), + st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + contentSubtype: ht.contentSubtype, + headerWireLength: 0, // won't have access to header wire length until golang/go#18997. } s.trReader = &transportReader{ reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index badab8acf..c33ac5961 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -36,6 +36,7 @@ import ( "golang.org/x/net/http2/hpack" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" icredentials "google.golang.org/grpc/internal/credentials" "google.golang.org/grpc/internal/grpclog" @@ -43,7 +44,7 @@ import ( "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" istatus "google.golang.org/grpc/internal/status" - "google.golang.org/grpc/internal/syscall" + isyscall "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" @@ -58,6 +59,8 @@ import ( // atomically. var clientConnectionCounter uint64 +var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool)) + // http2Client implements the ClientTransport interface with HTTP2. type http2Client struct { lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. @@ -176,7 +179,7 @@ func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error if networkType == "tcp" && useProxy { return proxyDial(ctx, address, grpcUA) } - return (&net.Dialer{}).DialContext(ctx, networkType, address) + return internal.NetDialerWithTCPKeepalive().DialContext(ctx, networkType, address) } func isTemporary(err error) bool { @@ -262,7 +265,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } keepaliveEnabled := false if kp.Time != infinity { - if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + if err = isyscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) } keepaliveEnabled = true @@ -493,8 +496,9 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { func (t *http2Client) getPeer() *peer.Peer { return &peer.Peer{ - Addr: t.remoteAddr, - AuthInfo: t.authInfo, // Can be nil + Addr: t.remoteAddr, + AuthInfo: t.authInfo, // Can be nil + LocalAddr: t.localAddr, } } @@ -566,7 +570,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) } - if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { + if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok { var k string for k, vv := range md { // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. @@ -1321,10 +1325,8 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { for streamID, stream := range t.activeStreams { if streamID > id && streamID <= upperLimit { // The stream was unprocessed by the server. - if streamID > id && streamID <= upperLimit { - atomic.StoreUint32(&stream.unprocessed, 1) - streamsToClose = append(streamsToClose, stream) - } + atomic.StoreUint32(&stream.unprocessed, 1) + streamsToClose = append(streamsToClose, stream) } } t.mu.Unlock() @@ -1399,7 +1401,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { mdata = make(map[string][]string) contentTypeErr = "malformed header: missing HTTP content-type" grpcMessage string - statusGen *status.Status recvCompress string httpStatusCode *int httpStatusErr string @@ -1434,12 +1435,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { rawStatusCode = codes.Code(uint32(code)) case "grpc-message": grpcMessage = decodeGrpcMessage(hf.Value) - case "grpc-status-details-bin": - var err error - statusGen, err = decodeGRPCStatusDetails(hf.Value) - if err != nil { - headerError = fmt.Sprintf("transport: malformed grpc-status-details-bin: %v", err) - } case ":status": if hf.Value == "200" { httpStatusErr = "" @@ -1548,14 +1543,12 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } - if statusGen == nil { - statusGen = status.New(rawStatusCode, grpcMessage) - } + status := istatus.NewWithProto(rawStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader]) // If client received END_STREAM from server while stream was still active, // send RST_STREAM. rstStream := s.getState() == streamActive - t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, statusGen, mdata, true) + t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, status, mdata, true) } // readServerPreface reads and handles the initial settings frame from the diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index c06db679d..f6bac0e8a 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -68,18 +68,15 @@ var serverConnectionCounter uint64 // http2Server implements the ServerTransport interface with HTTP2. type http2Server struct { - lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. - ctx context.Context - done chan struct{} - conn net.Conn - loopy *loopyWriter - readerDone chan struct{} // sync point to enable testing. - writerDone chan struct{} // sync point to enable testing. - remoteAddr net.Addr - localAddr net.Addr - authInfo credentials.AuthInfo // auth info about the connection - inTapHandle tap.ServerInHandle - framer *framer + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + done chan struct{} + conn net.Conn + loopy *loopyWriter + readerDone chan struct{} // sync point to enable testing. + loopyWriterDone chan struct{} + peer peer.Peer + inTapHandle tap.ServerInHandle + framer *framer // The max number of concurrent streams. maxStreams uint32 // controlBuf delivers all the control related tasks (e.g., window @@ -243,16 +240,18 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, } done := make(chan struct{}) + peer := peer.Peer{ + Addr: conn.RemoteAddr(), + LocalAddr: conn.LocalAddr(), + AuthInfo: authInfo, + } t := &http2Server{ - ctx: setConnection(context.Background(), rawConn), done: done, conn: conn, - remoteAddr: conn.RemoteAddr(), - localAddr: conn.LocalAddr(), - authInfo: authInfo, + peer: peer, framer: framer, readerDone: make(chan struct{}), - writerDone: make(chan struct{}), + loopyWriterDone: make(chan struct{}), maxStreams: config.MaxStreams, inTapHandle: config.InTapHandle, fc: &trInFlow{limit: uint32(icwz)}, @@ -267,8 +266,6 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, bufferPool: newBufferPool(), } t.logger = prefixLoggerForServerTransport(t) - // Add peer information to the http2server context. - t.ctx = peer.NewContext(t.ctx, t.getPeer()) t.controlBuf = newControlBuffer(t.done) if dynamicWindow { @@ -277,15 +274,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, updateFlowControl: t.updateFlowControl, } } - for _, sh := range t.stats { - t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - }) - connBegin := &stats.ConnBegin{} - sh.HandleConn(t.ctx, connBegin) - } - t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.peer.Addr, t.peer.LocalAddr)) if err != nil { return nil, err } @@ -334,7 +323,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler t.loopy.run() - close(t.writerDone) + close(t.loopyWriterDone) }() go t.keepalive() return t, nil @@ -342,7 +331,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, // operateHeaders takes action on the decoded headers. Returns an error if fatal // error encountered and transport needs to close, otherwise returns nil. -func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) error { +func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*Stream)) error { // Acquire max stream ID lock for entire duration t.maxStreamMu.Lock() defer t.maxStreamMu.Unlock() @@ -369,10 +358,11 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( buf := newRecvBuffer() s := &Stream{ - id: streamID, - st: t, - buf: buf, - fc: &inFlow{limit: uint32(t.initialWindowSize)}, + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + headerWireLength: int(frame.Header().Length), } var ( // if false, content-type was missing or invalid @@ -511,9 +501,9 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( s.state = streamReadDone } if timeoutSet { - s.ctx, s.cancel = context.WithTimeout(t.ctx, timeout) + s.ctx, s.cancel = context.WithTimeout(ctx, timeout) } else { - s.ctx, s.cancel = context.WithCancel(t.ctx) + s.ctx, s.cancel = context.WithCancel(ctx) } // Attach the received metadata to the context. @@ -561,7 +551,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } if t.inTapHandle != nil { var err error - if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil { + if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method, Header: mdata}); err != nil { t.mu.Unlock() if t.logger.V(logLevel) { t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err) @@ -592,19 +582,6 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( s.requestRead = func(n int) { t.adjustWindow(s, uint32(n)) } - s.ctx = traceCtx(s.ctx, s.method) - for _, sh := range t.stats { - s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) - inHeader := &stats.InHeader{ - FullMethod: s.method, - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - Compression: s.recvCompress, - WireLength: int(frame.Header().Length), - Header: mdata.Copy(), - } - sh.HandleRPC(s.ctx, inHeader) - } s.ctxDone = s.ctx.Done() s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) s.trReader = &transportReader{ @@ -630,8 +607,11 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( // HandleStreams receives incoming streams using the given handler. This is // typically run in a separate goroutine. // traceCtx attaches trace to ctx and returns the new context. -func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { - defer close(t.readerDone) +func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { + defer func() { + <-t.loopyWriterDone + close(t.readerDone) + }() for { t.controlBuf.throttle() frame, err := t.framer.fr.ReadFrame() @@ -665,7 +645,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. } switch frame := frame.(type) { case *http2.MetaHeadersFrame: - if err := t.operateHeaders(frame, handle, traceCtx); err != nil { + if err := t.operateHeaders(ctx, frame, handle); err != nil { t.Close(err) break } @@ -980,7 +960,12 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { } } if err := t.writeHeaderLocked(s); err != nil { - return status.Convert(err).Err() + switch e := err.(type) { + case ConnectionError: + return status.Error(codes.Unavailable, e.Desc) + default: + return status.Convert(err).Err() + } } return nil } @@ -1053,12 +1038,15 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) if p := st.Proto(); p != nil && len(p.Details) > 0 { + // Do not use the user's grpc-status-details-bin (if present) if we are + // even attempting to set our own. + delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err) } else { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) + headerFields = append(headerFields, hpack.HeaderField{Name: grpcStatusDetailsBinHeader, Value: encodeBinHeader(stBytes)}) } } @@ -1240,10 +1228,6 @@ func (t *http2Server) Close(err error) { for _, s := range streams { s.cancel() } - for _, sh := range t.stats { - connEnd := &stats.ConnEnd{} - sh.HandleConn(t.ctx, connEnd) - } } // deleteStream deletes the stream s from transport's active streams. @@ -1309,10 +1293,6 @@ func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eo }) } -func (t *http2Server) RemoteAddr() net.Addr { - return t.remoteAddr -} - func (t *http2Server) Drain(debugData string) { t.mu.Lock() defer t.mu.Unlock() @@ -1395,11 +1375,11 @@ func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric { LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), LocalFlowControlWindow: int64(t.fc.getSize()), SocketOptions: channelz.GetSocketOption(t.conn), - LocalAddr: t.localAddr, - RemoteAddr: t.remoteAddr, + LocalAddr: t.peer.LocalAddr, + RemoteAddr: t.peer.Addr, // RemoteName : } - if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { + if au, ok := t.peer.AuthInfo.(credentials.ChannelzSecurityInfo); ok { s.Security = au.GetSecurityValue() } s.RemoteFlowControlWindow = t.getOutFlowWindow() @@ -1431,10 +1411,12 @@ func (t *http2Server) getOutFlowWindow() int64 { } } -func (t *http2Server) getPeer() *peer.Peer { +// Peer returns the peer of the transport. +func (t *http2Server) Peer() *peer.Peer { return &peer.Peer{ - Addr: t.remoteAddr, - AuthInfo: t.authInfo, // Can be nil + Addr: t.peer.Addr, + LocalAddr: t.peer.LocalAddr, + AuthInfo: t.peer.AuthInfo, // Can be nil } } @@ -1459,6 +1441,6 @@ func GetConnection(ctx context.Context) net.Conn { // SetConnection adds the connection to the context to be able to get // information about the destination ip and port for an incoming RPC. This also // allows any unary or streaming interceptors to see the connection. -func setConnection(ctx context.Context, conn net.Conn) context.Context { +func SetConnection(ctx context.Context, conn net.Conn) context.Context { return context.WithValue(ctx, connectionKey{}, conn) } diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 195814008..dc29d590e 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -34,12 +34,9 @@ import ( "time" "unicode/utf8" - "github.com/golang/protobuf/proto" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" - spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" ) const ( @@ -88,6 +85,8 @@ var ( } ) +var grpcStatusDetailsBinHeader = "grpc-status-details-bin" + // isReservedHeader checks whether hdr belongs to HTTP2 headers // reserved by gRPC protocol. Any other headers are classified as the // user-specified metadata. @@ -103,7 +102,6 @@ func isReservedHeader(hdr string) bool { "grpc-message", "grpc-status", "grpc-timeout", - "grpc-status-details-bin", // Intentionally exclude grpc-previous-rpc-attempts and // grpc-retry-pushback-ms, which are "reserved", but their API // intentionally works via metadata. @@ -154,18 +152,6 @@ func decodeMetadataHeader(k, v string) (string, error) { return v, nil } -func decodeGRPCStatusDetails(rawDetails string) (*status.Status, error) { - v, err := decodeBinHeader(rawDetails) - if err != nil { - return nil, err - } - st := &spb.Status{} - if err = proto.Unmarshal(v, st); err != nil { - return nil, err - } - return status.FromProto(st), nil -} - type timeoutUnit uint8 const ( diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go index 415961987..24fa10325 100644 --- a/vendor/google.golang.org/grpc/internal/transport/proxy.go +++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -28,6 +28,8 @@ import ( "net/http" "net/http/httputil" "net/url" + + "google.golang.org/grpc/internal" ) const proxyAuthHeaderKey = "Proxy-Authorization" @@ -112,7 +114,7 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri // proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy // is necessary, dials, does the HTTP CONNECT handshake, and returns the // connection. -func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, err error) { +func proxyDial(ctx context.Context, addr string, grpcUA string) (net.Conn, error) { newAddr := addr proxyURL, err := mapAddress(addr) if err != nil { @@ -122,15 +124,15 @@ func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, newAddr = proxyURL.Host } - conn, err = (&net.Dialer{}).DialContext(ctx, "tcp", newAddr) + conn, err := internal.NetDialerWithTCPKeepalive().DialContext(ctx, "tcp", newAddr) if err != nil { - return + return nil, err } - if proxyURL != nil { + if proxyURL == nil { // proxy is disabled if proxyURL is nil. - conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA) + return conn, err } - return + return doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA) } func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 74a811fc0..b7b8fec18 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -37,6 +37,7 @@ import ( "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" @@ -265,7 +266,8 @@ type Stream struct { // headerValid indicates whether a valid header was received. Only // meaningful after headerChan is closed (always call waitOnHeader() before // reading its value). Not valid on server side. - headerValid bool + headerValid bool + headerWireLength int // Only set on server side. // hdrMu protects header and trailer metadata on the server-side. hdrMu sync.Mutex @@ -425,6 +427,12 @@ func (s *Stream) Context() context.Context { return s.ctx } +// SetContext sets the context of the stream. This will be deleted once the +// stats handler callouts all move to gRPC layer. +func (s *Stream) SetContext(ctx context.Context) { + s.ctx = ctx +} + // Method returns the method for the stream. func (s *Stream) Method() string { return s.method @@ -437,6 +445,12 @@ func (s *Stream) Status() *status.Status { return s.status } +// HeaderWireLength returns the size of the headers of the stream as received +// from the wire. Valid only on the server. +func (s *Stream) HeaderWireLength() int { + return s.headerWireLength +} + // SetHeader sets the header metadata. This can be called multiple times. // Server side only. // This should not be called in parallel to other data writes. @@ -698,7 +712,7 @@ type ClientTransport interface { // Write methods for a given Stream will be called serially. type ServerTransport interface { // HandleStreams receives incoming streams using the given handler. - HandleStreams(func(*Stream), func(context.Context, string) context.Context) + HandleStreams(context.Context, func(*Stream)) // WriteHeader sends the header metadata for the given stream. // WriteHeader may not be called on all streams. @@ -717,8 +731,8 @@ type ServerTransport interface { // handlers will be terminated asynchronously. Close(err error) - // RemoteAddr returns the remote network address. - RemoteAddr() net.Addr + // Peer returns the peer of the server transport. + Peer() *peer.Peer // Drain notifies the client this ServerTransport stops accepting new RPCs. Drain(debugData string) diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index a2cdcaf12..1e9485fd6 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -25,8 +25,14 @@ import ( "context" "fmt" "strings" + + "google.golang.org/grpc/internal" ) +func init() { + internal.FromOutgoingContextRaw = fromOutgoingContextRaw +} + // DecodeKeyValue returns k, v, nil. // // Deprecated: use k and v directly instead. @@ -153,14 +159,16 @@ func Join(mds ...MD) MD { type mdIncomingKey struct{} type mdOutgoingKey struct{} -// NewIncomingContext creates a new context with incoming md attached. +// NewIncomingContext creates a new context with incoming md attached. md must +// not be modified after calling this function. func NewIncomingContext(ctx context.Context, md MD) context.Context { return context.WithValue(ctx, mdIncomingKey{}, md) } // NewOutgoingContext creates a new context with outgoing md attached. If used // in conjunction with AppendToOutgoingContext, NewOutgoingContext will -// overwrite any previously-appended metadata. +// overwrite any previously-appended metadata. md must not be modified after +// calling this function. func NewOutgoingContext(ctx context.Context, md MD) context.Context { return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md}) } @@ -203,7 +211,8 @@ func FromIncomingContext(ctx context.Context) (MD, bool) { } // ValueFromIncomingContext returns the metadata value corresponding to the metadata -// key from the incoming metadata if it exists. Key must be lower-case. +// key from the incoming metadata if it exists. Keys are matched in a case insensitive +// manner. // // # Experimental // @@ -219,33 +228,29 @@ func ValueFromIncomingContext(ctx context.Context, key string) []string { return copyOf(v) } for k, v := range md { - // We need to manually convert all keys to lower case, because MD is a - // map, and there's no guarantee that the MD attached to the context is - // created using our helper functions. - if strings.ToLower(k) == key { + // Case insenitive comparison: MD is a map, and there's no guarantee + // that the MD attached to the context is created using our helper + // functions. + if strings.EqualFold(k, key) { return copyOf(v) } } return nil } -// the returned slice must not be modified in place func copyOf(v []string) []string { vals := make([]string, len(v)) copy(vals, v) return vals } -// FromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. +// fromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. // // Remember to perform strings.ToLower on the keys, for both the returned MD (MD // is a map, there's no guarantee it's created using our helper functions) and // the extra kv pairs (AppendToOutgoingContext doesn't turn them into // lowercase). -// -// This is intended for gRPC-internal use ONLY. Users should use -// FromOutgoingContext instead. -func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { +func fromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) if !ok { return nil, nil, false diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go index e01d219ff..a821ff9b2 100644 --- a/vendor/google.golang.org/grpc/peer/peer.go +++ b/vendor/google.golang.org/grpc/peer/peer.go @@ -32,6 +32,8 @@ import ( type Peer struct { // Addr is the peer address. Addr net.Addr + // LocalAddr is the local address. + LocalAddr net.Addr // AuthInfo is the authentication information of the transport. // It is nil if there is no transport security being used. AuthInfo credentials.AuthInfo diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index 236837f41..bf56faa76 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -37,7 +37,6 @@ import ( type pickerWrapper struct { mu sync.Mutex done bool - idle bool blockingCh chan struct{} picker balancer.Picker statsHandlers []stats.Handler // to record blocking picker calls @@ -53,11 +52,7 @@ func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper { // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. func (pw *pickerWrapper) updatePicker(p balancer.Picker) { pw.mu.Lock() - if pw.done || pw.idle { - // There is a small window where a picker update from the LB policy can - // race with the channel going to idle mode. If the picker is idle here, - // it is because the channel asked it to do so, and therefore it is sage - // to ignore the update from the LB policy. + if pw.done { pw.mu.Unlock() return } @@ -210,23 +205,15 @@ func (pw *pickerWrapper) close() { close(pw.blockingCh) } -func (pw *pickerWrapper) enterIdleMode() { - pw.mu.Lock() - defer pw.mu.Unlock() - if pw.done { - return - } - pw.idle = true -} - -func (pw *pickerWrapper) exitIdleMode() { +// reset clears the pickerWrapper and prepares it for being used again when idle +// mode is exited. +func (pw *pickerWrapper) reset() { pw.mu.Lock() defer pw.mu.Unlock() if pw.done { return } pw.blockingCh = make(chan struct{}) - pw.idle = false } // dropError is a wrapper error that indicates the LB policy wishes to drop the diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index 2e9cf66b4..5128f9364 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -25,7 +25,6 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal/envconfig" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/pretty" @@ -65,19 +64,6 @@ type pfConfig struct { } func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - if !envconfig.PickFirstLBConfig { - // Prior to supporting loadbalancing configuration, the pick_first LB - // policy did not implement the balancer.ConfigParser interface. This - // meant that if a non-empty configuration was passed to it, the service - // config unmarshaling code would throw a warning log, but would - // continue using the pick_first LB policy. The code below ensures the - // same behavior is retained if the env var is not set. - if string(js) != "{}" { - logger.Warningf("Ignoring non-empty balancer configuration %q for the pick_first LB policy", string(js)) - } - return nil, nil - } - var cfg pfConfig if err := json.Unmarshal(js, &cfg); err != nil { return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go new file mode 100644 index 000000000..14aa6f20a --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package dns implements a dns resolver to be installed as the default resolver +// in grpc. +// +// Deprecated: this package is imported by grpc and should not need to be +// imported directly by users. +package dns + +import ( + "google.golang.org/grpc/internal/resolver/dns" + "google.golang.org/grpc/resolver" +) + +// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. +// +// Deprecated: import grpc and use resolver.Get("dns") instead. +func NewBuilder() resolver.Builder { + return dns.NewBuilder() +} diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go index 804be887d..ada5b9bb7 100644 --- a/vendor/google.golang.org/grpc/resolver/map.go +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -136,3 +136,116 @@ func (a *AddressMap) Values() []any { } return ret } + +type endpointNode struct { + addrs map[string]struct{} +} + +// Equal returns whether the unordered set of addrs are the same between the +// endpoint nodes. +func (en *endpointNode) Equal(en2 *endpointNode) bool { + if len(en.addrs) != len(en2.addrs) { + return false + } + for addr := range en.addrs { + if _, ok := en2.addrs[addr]; !ok { + return false + } + } + return true +} + +func toEndpointNode(endpoint Endpoint) endpointNode { + en := make(map[string]struct{}) + for _, addr := range endpoint.Addresses { + en[addr.Addr] = struct{}{} + } + return endpointNode{ + addrs: en, + } +} + +// EndpointMap is a map of endpoints to arbitrary values keyed on only the +// unordered set of address strings within an endpoint. This map is not thread +// safe, thus it is unsafe to access concurrently. Must be created via +// NewEndpointMap; do not construct directly. +type EndpointMap struct { + endpoints map[*endpointNode]any +} + +// NewEndpointMap creates a new EndpointMap. +func NewEndpointMap() *EndpointMap { + return &EndpointMap{ + endpoints: make(map[*endpointNode]any), + } +} + +// Get returns the value for the address in the map, if present. +func (em *EndpointMap) Get(e Endpoint) (value any, ok bool) { + en := toEndpointNode(e) + if endpoint := em.find(en); endpoint != nil { + return em.endpoints[endpoint], true + } + return nil, false +} + +// Set updates or adds the value to the address in the map. +func (em *EndpointMap) Set(e Endpoint, value any) { + en := toEndpointNode(e) + if endpoint := em.find(en); endpoint != nil { + em.endpoints[endpoint] = value + return + } + em.endpoints[&en] = value +} + +// Len returns the number of entries in the map. +func (em *EndpointMap) Len() int { + return len(em.endpoints) +} + +// Keys returns a slice of all current map keys, as endpoints specifying the +// addresses present in the endpoint keys, in which uniqueness is determined by +// the unordered set of addresses. Thus, endpoint information returned is not +// the full endpoint data (drops duplicated addresses and attributes) but can be +// used for EndpointMap accesses. +func (em *EndpointMap) Keys() []Endpoint { + ret := make([]Endpoint, 0, len(em.endpoints)) + for en := range em.endpoints { + var endpoint Endpoint + for addr := range en.addrs { + endpoint.Addresses = append(endpoint.Addresses, Address{Addr: addr}) + } + ret = append(ret, endpoint) + } + return ret +} + +// Values returns a slice of all current map values. +func (em *EndpointMap) Values() []any { + ret := make([]any, 0, len(em.endpoints)) + for _, val := range em.endpoints { + ret = append(ret, val) + } + return ret +} + +// find returns a pointer to the endpoint node in em if the endpoint node is +// already present. If not found, nil is returned. The comparisons are done on +// the unordered set of addresses within an endpoint. +func (em EndpointMap) find(e endpointNode) *endpointNode { + for endpoint := range em.endpoints { + if e.Equal(endpoint) { + return endpoint + } + } + return nil +} + +// Delete removes the specified endpoint from the map. +func (em *EndpointMap) Delete(e Endpoint) { + en := toEndpointNode(e) + if entry := em.find(en); entry != nil { + delete(em.endpoints, entry) + } +} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index 11384e228..adf89dd9c 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -240,11 +240,6 @@ type ClientConn interface { // // Deprecated: Use UpdateState instead. NewAddress(addresses []Address) - // NewServiceConfig is called by resolver to notify ClientConn a new - // service config. The service config should be provided as a json string. - // - // Deprecated: Use UpdateState instead. - NewServiceConfig(serviceConfig string) // ParseServiceConfig parses the provided service config and returns an // object that provides the parsed config. ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult @@ -286,6 +281,11 @@ func (t Target) Endpoint() string { return strings.TrimPrefix(endpoint, "/") } +// String returns a string representation of Target. +func (t Target) String() string { + return t.URL.String() +} + // Builder creates a resolver that will be used to watch name resolution updates. type Builder interface { // Build creates a new resolver for the given target. @@ -314,3 +314,13 @@ type Resolver interface { // Close closes the resolver. Close() } + +// AuthorityOverrider is implemented by Builders that wish to override the +// default authority for the ClientConn. +// By default, the authority used is target.Endpoint(). +type AuthorityOverrider interface { + // OverrideAuthority returns the authority to use for a ClientConn with the + // given target. The implementation must generate it without blocking, + // typically in line, and must keep it unchanged. + OverrideAuthority(Target) string +} diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go deleted file mode 100644 index d68330560..000000000 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ /dev/null @@ -1,247 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "strings" - "sync" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/serviceconfig" -) - -// resolverStateUpdater wraps the single method used by ccResolverWrapper to -// report a state update from the actual resolver implementation. -type resolverStateUpdater interface { - updateResolverState(s resolver.State, err error) error -} - -// ccResolverWrapper is a wrapper on top of cc for resolvers. -// It implements resolver.ClientConn interface. -type ccResolverWrapper struct { - // The following fields are initialized when the wrapper is created and are - // read-only afterwards, and therefore can be accessed without a mutex. - cc resolverStateUpdater - channelzID *channelz.Identifier - ignoreServiceConfig bool - opts ccResolverWrapperOpts - serializer *grpcsync.CallbackSerializer // To serialize all incoming calls. - serializerCancel context.CancelFunc // To close the serializer, accessed only from close(). - - // All incoming (resolver --> gRPC) calls are guaranteed to execute in a - // mutually exclusive manner as they are scheduled on the serializer. - // Fields accessed *only* in these serializer callbacks, can therefore be - // accessed without a mutex. - curState resolver.State - - // mu guards access to the below fields. - mu sync.Mutex - closed bool - resolver resolver.Resolver // Accessed only from outgoing calls. -} - -// ccResolverWrapperOpts wraps the arguments to be passed when creating a new -// ccResolverWrapper. -type ccResolverWrapperOpts struct { - target resolver.Target // User specified dial target to resolve. - builder resolver.Builder // Resolver builder to use. - bOpts resolver.BuildOptions // Resolver build options to use. - channelzID *channelz.Identifier // Channelz identifier for the channel. -} - -// newCCResolverWrapper uses the resolver.Builder to build a Resolver and -// returns a ccResolverWrapper object which wraps the newly built resolver. -func newCCResolverWrapper(cc resolverStateUpdater, opts ccResolverWrapperOpts) (*ccResolverWrapper, error) { - ctx, cancel := context.WithCancel(context.Background()) - ccr := &ccResolverWrapper{ - cc: cc, - channelzID: opts.channelzID, - ignoreServiceConfig: opts.bOpts.DisableServiceConfig, - opts: opts, - serializer: grpcsync.NewCallbackSerializer(ctx), - serializerCancel: cancel, - } - - // Cannot hold the lock at build time because the resolver can send an - // update or error inline and these incoming calls grab the lock to schedule - // a callback in the serializer. - r, err := opts.builder.Build(opts.target, ccr, opts.bOpts) - if err != nil { - cancel() - return nil, err - } - - // Any error reported by the resolver at build time that leads to a - // re-resolution request from the balancer is dropped by grpc until we - // return from this function. So, we don't have to handle pending resolveNow - // requests here. - ccr.mu.Lock() - ccr.resolver = r - ccr.mu.Unlock() - - return ccr, nil -} - -func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { - ccr.mu.Lock() - defer ccr.mu.Unlock() - - // ccr.resolver field is set only after the call to Build() returns. But in - // the process of building, the resolver may send an error update which when - // propagated to the balancer may result in a re-resolution request. - if ccr.closed || ccr.resolver == nil { - return - } - ccr.resolver.ResolveNow(o) -} - -func (ccr *ccResolverWrapper) close() { - ccr.mu.Lock() - if ccr.closed { - ccr.mu.Unlock() - return - } - - channelz.Info(logger, ccr.channelzID, "Closing the name resolver") - - // Close the serializer to ensure that no more calls from the resolver are - // handled, before actually closing the resolver. - ccr.serializerCancel() - ccr.closed = true - r := ccr.resolver - ccr.mu.Unlock() - - // Give enqueued callbacks a chance to finish. - <-ccr.serializer.Done() - - // Spawn a goroutine to close the resolver (since it may block trying to - // cleanup all allocated resources) and return early. - go r.Close() -} - -// serializerScheduleLocked is a convenience method to schedule a function to be -// run on the serializer while holding ccr.mu. -func (ccr *ccResolverWrapper) serializerScheduleLocked(f func(context.Context)) { - ccr.mu.Lock() - ccr.serializer.Schedule(f) - ccr.mu.Unlock() -} - -// UpdateState is called by resolver implementations to report new state to gRPC -// which includes addresses and service config. -func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { - errCh := make(chan error, 1) - if s.Endpoints == nil { - s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses)) - for _, a := range s.Addresses { - ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} - ep.Addresses[0].BalancerAttributes = nil - s.Endpoints = append(s.Endpoints, ep) - } - } - ok := ccr.serializer.Schedule(func(context.Context) { - ccr.addChannelzTraceEvent(s) - ccr.curState = s - if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { - errCh <- balancer.ErrBadResolverState - return - } - errCh <- nil - }) - if !ok { - // The only time when Schedule() fail to add the callback to the - // serializer is when the serializer is closed, and this happens only - // when the resolver wrapper is closed. - return nil - } - return <-errCh -} - -// ReportError is called by resolver implementations to report errors -// encountered during name resolution to gRPC. -func (ccr *ccResolverWrapper) ReportError(err error) { - ccr.serializerScheduleLocked(func(_ context.Context) { - channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) - ccr.cc.updateResolverState(resolver.State{}, err) - }) -} - -// NewAddress is called by the resolver implementation to send addresses to -// gRPC. -func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { - ccr.serializerScheduleLocked(func(_ context.Context) { - ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) - ccr.curState.Addresses = addrs - ccr.cc.updateResolverState(ccr.curState, nil) - }) -} - -// NewServiceConfig is called by the resolver implementation to send service -// configs to gRPC. -func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { - ccr.serializerScheduleLocked(func(_ context.Context) { - channelz.Infof(logger, ccr.channelzID, "ccResolverWrapper: got new service config: %s", sc) - if ccr.ignoreServiceConfig { - channelz.Info(logger, ccr.channelzID, "Service config lookups disabled; ignoring config") - return - } - scpr := parseServiceConfig(sc) - if scpr.Err != nil { - channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) - return - } - ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) - ccr.curState.ServiceConfig = scpr - ccr.cc.updateResolverState(ccr.curState, nil) - }) -} - -// ParseServiceConfig is called by resolver implementations to parse a JSON -// representation of the service config. -func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { - return parseServiceConfig(scJSON) -} - -// addChannelzTraceEvent adds a channelz trace event containing the new -// state received from resolver implementations. -func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { - var updates []string - var oldSC, newSC *ServiceConfig - var oldOK, newOK bool - if ccr.curState.ServiceConfig != nil { - oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig) - } - if s.ServiceConfig != nil { - newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig) - } - if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { - updates = append(updates, "service config updated") - } - if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { - updates = append(updates, "resolver returned an empty address list") - } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { - updates = append(updates, "resolver returned new addresses") - } - channelz.Infof(logger, ccr.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) -} diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go new file mode 100644 index 000000000..c79bab121 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -0,0 +1,197 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "strings" + "sync" + + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// ccResolverWrapper is a wrapper on top of cc for resolvers. +// It implements resolver.ClientConn interface. +type ccResolverWrapper struct { + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc *ClientConn + ignoreServiceConfig bool + serializer *grpcsync.CallbackSerializer + serializerCancel context.CancelFunc + + resolver resolver.Resolver // only accessed within the serializer + + // The following fields are protected by mu. Caller must take cc.mu before + // taking mu. + mu sync.Mutex + curState resolver.State + closed bool +} + +// newCCResolverWrapper initializes the ccResolverWrapper. It can only be used +// after calling start, which builds the resolver. +func newCCResolverWrapper(cc *ClientConn) *ccResolverWrapper { + ctx, cancel := context.WithCancel(cc.ctx) + return &ccResolverWrapper{ + cc: cc, + ignoreServiceConfig: cc.dopts.disableServiceConfig, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, + } +} + +// start builds the name resolver using the resolver.Builder in cc and returns +// any error encountered. It must always be the first operation performed on +// any newly created ccResolverWrapper, except that close may be called instead. +func (ccr *ccResolverWrapper) start() error { + errCh := make(chan error) + ccr.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil { + return + } + opts := resolver.BuildOptions{ + DisableServiceConfig: ccr.cc.dopts.disableServiceConfig, + DialCreds: ccr.cc.dopts.copts.TransportCredentials, + CredsBundle: ccr.cc.dopts.copts.CredsBundle, + Dialer: ccr.cc.dopts.copts.Dialer, + } + var err error + ccr.resolver, err = ccr.cc.resolverBuilder.Build(ccr.cc.parsedTarget, ccr, opts) + errCh <- err + }) + return <-errCh +} + +func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { + ccr.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccr.resolver == nil { + return + } + ccr.resolver.ResolveNow(o) + }) +} + +// close initiates async shutdown of the wrapper. To determine the wrapper has +// finished shutting down, the channel should block on ccr.serializer.Done() +// without cc.mu held. +func (ccr *ccResolverWrapper) close() { + channelz.Info(logger, ccr.cc.channelzID, "Closing the name resolver") + ccr.mu.Lock() + ccr.closed = true + ccr.mu.Unlock() + + ccr.serializer.Schedule(func(context.Context) { + if ccr.resolver == nil { + return + } + ccr.resolver.Close() + ccr.resolver = nil + }) + ccr.serializerCancel() +} + +// UpdateState is called by resolver implementations to report new state to gRPC +// which includes addresses and service config. +func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return nil + } + if s.Endpoints == nil { + s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses)) + for _, a := range s.Addresses { + ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} + ep.Addresses[0].BalancerAttributes = nil + s.Endpoints = append(s.Endpoints, ep) + } + } + ccr.addChannelzTraceEvent(s) + ccr.curState = s + ccr.mu.Unlock() + return ccr.cc.updateResolverStateAndUnlock(s, nil) +} + +// ReportError is called by resolver implementations to report errors +// encountered during name resolution to gRPC. +func (ccr *ccResolverWrapper) ReportError(err error) { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return + } + ccr.mu.Unlock() + channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) + ccr.cc.updateResolverStateAndUnlock(resolver.State{}, err) +} + +// NewAddress is called by the resolver implementation to send addresses to +// gRPC. +func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return + } + s := resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig} + ccr.addChannelzTraceEvent(s) + ccr.curState = s + ccr.mu.Unlock() + ccr.cc.updateResolverStateAndUnlock(s, nil) +} + +// ParseServiceConfig is called by resolver implementations to parse a JSON +// representation of the service config. +func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { + return parseServiceConfig(scJSON) +} + +// addChannelzTraceEvent adds a channelz trace event containing the new +// state received from resolver implementations. +func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { + var updates []string + var oldSC, newSC *ServiceConfig + var oldOK, newOK bool + if ccr.curState.ServiceConfig != nil { + oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig) + } + if s.ServiceConfig != nil { + newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig) + } + if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { + updates = append(updates, "service config updated") + } + if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { + updates = append(updates, "resolver returned an empty address list") + } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { + updates = append(updates, "resolver returned new addresses") + } + channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) +} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index b7723aa09..a4b6bc687 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -640,14 +640,18 @@ func encode(c baseCodec, msg any) ([]byte, error) { return b, nil } -// compress returns the input bytes compressed by compressor or cp. If both -// compressors are nil, returns nil. +// compress returns the input bytes compressed by compressor or cp. +// If both compressors are nil, or if the message has zero length, returns nil, +// indicating no compression was done. // // TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) { if compressor == nil && cp == nil { return nil, nil } + if len(in) == 0 { + return nil, nil + } wrapErr := func(err error) error { return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index eeae92fbe..e89c5ac61 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -70,9 +70,10 @@ func init() { internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials { return srv.opts.creds } - internal.DrainServerTransports = func(srv *Server, addr string) { - srv.drainServerTransports(addr) + internal.IsRegisteredMethod = func(srv *Server, method string) bool { + return srv.isRegisteredMethod(method) } + internal.ServerFromContext = serverFromContext internal.AddGlobalServerOptions = func(opt ...ServerOption) { globalServerOptions = append(globalServerOptions, opt...) } @@ -81,6 +82,7 @@ func init() { } internal.BinaryLogger = binaryLogger internal.JoinServerOptions = newJoinServerOption + internal.RecvBufferPool = recvBufferPool } var statusOK = status.New(codes.OK, "") @@ -134,12 +136,14 @@ type Server struct { quit *grpcsync.Event done *grpcsync.Event channelzRemoveOnce sync.Once - serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop + serveWG sync.WaitGroup // counts active Serve goroutines for Stop/GracefulStop + handlersWG sync.WaitGroup // counts active method handler goroutines channelzID *channelz.Identifier czData *channelzData - serverWorkerChannel chan func() + serverWorkerChannel chan func() + serverWorkerChannelClose func() } type serverOptions struct { @@ -170,6 +174,7 @@ type serverOptions struct { headerTableSize *uint32 numServerWorkers uint32 recvBufferPool SharedBufferPool + waitForHandlers bool } var defaultServerOptions = serverOptions{ @@ -567,6 +572,21 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption { }) } +// WaitForHandlers cause Stop to wait until all outstanding method handlers have +// exited before returning. If false, Stop will return as soon as all +// connections have closed, but method handlers may still be running. By +// default, Stop does not wait for method handlers to return. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WaitForHandlers(w bool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.waitForHandlers = w + }) +} + // RecvBufferPool returns a ServerOption that configures the server // to use the provided shared buffer pool for parsing incoming messages. Depending // on the application's workload, this could result in reduced memory allocation. @@ -578,11 +598,13 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption { // options are used: StatsHandler, EnableTracing, or binary logging. In such // cases, the shared buffer pool will be ignored. // -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. +// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in +// v1.60.0 or later. func RecvBufferPool(bufferPool SharedBufferPool) ServerOption { + return recvBufferPool(bufferPool) +} + +func recvBufferPool(bufferPool SharedBufferPool) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.recvBufferPool = bufferPool }) @@ -616,15 +638,14 @@ func (s *Server) serverWorker() { // connections to reduce the time spent overall on runtime.morestack. func (s *Server) initServerWorkers() { s.serverWorkerChannel = make(chan func()) + s.serverWorkerChannelClose = grpcsync.OnceFunc(func() { + close(s.serverWorkerChannel) + }) for i := uint32(0); i < s.opts.numServerWorkers; i++ { go s.serverWorker() } } -func (s *Server) stopServerWorkers() { - close(s.serverWorkerChannel) -} - // NewServer creates a gRPC server which has no service registered and has not // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { @@ -806,6 +827,18 @@ func (l *listenSocket) Close() error { // Serve returns when lis.Accept fails with fatal errors. lis will be closed when // this method returns. // Serve will return a non-nil error unless Stop or GracefulStop is called. +// +// Note: All supported releases of Go (as of December 2023) override the OS +// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive +// with OS defaults for keepalive time and interval, callers need to do the +// following two things: +// - pass a net.Listener created by calling the Listen method on a +// net.ListenConfig with the `KeepAlive` field set to a negative value. This +// will result in the Go standard library not overriding OS defaults for TCP +// keepalive interval and time. But this will also result in the Go standard +// library not enabling TCP keepalives by default. +// - override the Accept method on the passed in net.Listener and set the +// SO_KEEPALIVE socket option to enable TCP keepalives, with OS defaults. func (s *Server) Serve(lis net.Listener) error { s.mu.Lock() s.printf("serving") @@ -913,24 +946,21 @@ func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) { return } + if cc, ok := rawConn.(interface { + PassServerTransport(transport.ServerTransport) + }); ok { + cc.PassServerTransport(st) + } + if !s.addConn(lisAddr, st) { return } go func() { - s.serveStreams(st) + s.serveStreams(context.Background(), st, rawConn) s.removeConn(lisAddr, st) }() } -func (s *Server) drainServerTransports(addr string) { - s.mu.Lock() - conns := s.conns[addr] - for st := range conns { - st.Drain("") - } - s.mu.Unlock() -} - // newHTTP2Transport sets up a http/2 transport (using the // gRPC http2 server transport in transport/http2_server.go). func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { @@ -971,19 +1001,32 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { return st } -func (s *Server) serveStreams(st transport.ServerTransport) { - defer st.Close(errors.New("finished serving streams for the server transport")) - var wg sync.WaitGroup +func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, rawConn net.Conn) { + ctx = transport.SetConnection(ctx, rawConn) + ctx = peer.NewContext(ctx, st.Peer()) + for _, sh := range s.opts.statsHandlers { + ctx = sh.TagConn(ctx, &stats.ConnTagInfo{ + RemoteAddr: st.Peer().Addr, + LocalAddr: st.Peer().LocalAddr, + }) + sh.HandleConn(ctx, &stats.ConnBegin{}) + } - streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) - st.HandleStreams(func(stream *transport.Stream) { - wg.Add(1) + defer func() { + st.Close(errors.New("finished serving streams for the server transport")) + for _, sh := range s.opts.statsHandlers { + sh.HandleConn(ctx, &stats.ConnEnd{}) + } + }() + streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) + st.HandleStreams(ctx, func(stream *transport.Stream) { + s.handlersWG.Add(1) streamQuota.acquire() f := func() { defer streamQuota.release() - defer wg.Done() - s.handleStream(st, stream, s.traceInfo(st, stream)) + defer s.handlersWG.Done() + s.handleStream(st, stream) } if s.opts.numServerWorkers > 0 { @@ -995,14 +1038,7 @@ func (s *Server) serveStreams(st transport.ServerTransport) { } } go f() - }, func(ctx context.Context, method string) context.Context { - if !EnableTracing { - return ctx - } - tr := trace.New("grpc.Recv."+methodFamily(method), method) - return trace.NewContext(ctx, tr) }) - wg.Wait() } var _ http.Handler = (*Server)(nil) @@ -1046,31 +1082,7 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } defer s.removeConn(listenerAddressForServeHTTP, st) - s.serveStreams(st) -} - -// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. -// If tracing is not enabled, it returns nil. -func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { - if !EnableTracing { - return nil - } - tr, ok := trace.FromContext(stream.Context()) - if !ok { - return nil - } - - trInfo = &traceInfo{ - tr: tr, - firstLine: firstLine{ - client: false, - remoteAddr: st.RemoteAddr(), - }, - } - if dl, ok := stream.Context().Deadline(); ok { - trInfo.firstLine.deadline = time.Until(dl) - } - return trInfo + s.serveStreams(r.Context(), st, nil) } func (s *Server) addConn(addr string, st transport.ServerTransport) bool { @@ -1133,7 +1145,7 @@ func (s *Server) incrCallsFailed() { atomic.AddInt64(&s.czData.callsFailed, 1) } -func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { +func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err) @@ -1152,7 +1164,7 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str err = t.Write(stream, hdr, payload, opts) if err == nil { for _, sh := range s.opts.statsHandlers { - sh.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + sh.HandleRPC(ctx, outPayload(false, msg, data, payload, time.Now())) } } return err @@ -1194,7 +1206,7 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info } } -func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { +func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { shs := s.opts.statsHandlers if len(shs) != 0 || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { @@ -1208,7 +1220,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. IsClientStream: false, IsServerStream: false, } - sh.HandleRPC(stream.Context(), statsBegin) + sh.HandleRPC(ctx, statsBegin) } if trInfo != nil { trInfo.tr.LazyLog(&trInfo.firstLine, false) @@ -1240,7 +1252,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if err != nil && err != io.EOF { end.Error = toRPCErr(err) } - sh.HandleRPC(stream.Context(), end) + sh.HandleRPC(ctx, end) } if channelz.IsOn() { @@ -1262,7 +1274,6 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } } if len(binlogs) != 0 { - ctx := stream.Context() md, _ := metadata.FromIncomingContext(ctx) logEntry := &binarylog.ClientHeader{ Header: md, @@ -1348,7 +1359,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } for _, sh := range shs { - sh.HandleRPC(stream.Context(), &stats.InPayload{ + sh.HandleRPC(ctx, &stats.InPayload{ RecvTime: time.Now(), Payload: v, Length: len(d), @@ -1362,7 +1373,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Message: d, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), cm) + binlog.Log(ctx, cm) } } if trInfo != nil { @@ -1370,7 +1381,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } return nil } - ctx := NewContextWithServerTransportStream(stream.Context(), stream) + ctx = NewContextWithServerTransportStream(ctx, stream) reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt) if appErr != nil { appStatus, ok := status.FromError(appErr) @@ -1395,7 +1406,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Header: h, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), sh) + binlog.Log(ctx, sh) } } st := &binarylog.ServerTrailer{ @@ -1403,7 +1414,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } return appErr @@ -1418,7 +1429,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if stream.SendCompress() != sendCompressorName { comp = encoding.GetCompressor(stream.SendCompress()) } - if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { + if err := s.sendResponse(ctx, t, stream, reply, cp, opts, comp); err != nil { if err == io.EOF { // The entire stream is done (for unary RPC only). return err @@ -1445,8 +1456,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), sh) - binlog.Log(stream.Context(), st) + binlog.Log(ctx, sh) + binlog.Log(ctx, st) } } return err @@ -1460,8 +1471,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Message: reply, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), sh) - binlog.Log(stream.Context(), sm) + binlog.Log(ctx, sh) + binlog.Log(ctx, sm) } } if channelz.IsOn() { @@ -1479,7 +1490,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } return t.WriteStatus(stream, statusOK) @@ -1521,7 +1532,7 @@ func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, inf } } -func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { +func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { if channelz.IsOn() { s.incrCallsStarted() } @@ -1535,10 +1546,10 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp IsServerStream: sd.ServerStreams, } for _, sh := range shs { - sh.HandleRPC(stream.Context(), statsBegin) + sh.HandleRPC(ctx, statsBegin) } } - ctx := NewContextWithServerTransportStream(stream.Context(), stream) + ctx = NewContextWithServerTransportStream(ctx, stream) ss := &serverStream{ ctx: ctx, t: t, @@ -1574,7 +1585,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp end.Error = toRPCErr(err) } for _, sh := range shs { - sh.HandleRPC(stream.Context(), end) + sh.HandleRPC(ctx, end) } } @@ -1616,7 +1627,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp logEntry.PeerAddr = peer.Addr } for _, binlog := range ss.binlogs { - binlog.Log(stream.Context(), logEntry) + binlog.Log(ctx, logEntry) } } @@ -1694,7 +1705,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp Err: appErr, } for _, binlog := range ss.binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } t.WriteStatus(ss.s, appStatus) @@ -1712,53 +1723,87 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp Err: appErr, } for _, binlog := range ss.binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } return t.WriteStatus(ss.s, statusOK) } -func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { + ctx := stream.Context() + ctx = contextWithServer(ctx, s) + var ti *traceInfo + if EnableTracing { + tr := trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()) + ctx = trace.NewContext(ctx, tr) + ti = &traceInfo{ + tr: tr, + firstLine: firstLine{ + client: false, + remoteAddr: t.Peer().Addr, + }, + } + if dl, ok := ctx.Deadline(); ok { + ti.firstLine.deadline = time.Until(dl) + } + } + sm := stream.Method() if sm != "" && sm[0] == '/' { sm = sm[1:] } pos := strings.LastIndex(sm, "/") if pos == -1 { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true) + ti.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ti.tr.SetError() } channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } - if trInfo != nil { - trInfo.tr.Finish() + if ti != nil { + ti.tr.Finish() } return } service := sm[:pos] method := sm[pos+1:] + md, _ := metadata.FromIncomingContext(ctx) + for _, sh := range s.opts.statsHandlers { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) + sh.HandleRPC(ctx, &stats.InHeader{ + FullMethod: stream.Method(), + RemoteAddr: t.Peer().Addr, + LocalAddr: t.Peer().LocalAddr, + Compression: stream.RecvCompress(), + WireLength: stream.HeaderWireLength(), + Header: md, + }) + } + // To have calls in stream callouts work. Will delete once all stats handler + // calls come from the gRPC layer. + stream.SetContext(ctx) + srv, knownService := s.services[service] if knownService { if md, ok := srv.methods[method]; ok { - s.processUnaryRPC(t, stream, srv, md, trInfo) + s.processUnaryRPC(ctx, t, stream, srv, md, ti) return } if sd, ok := srv.streams[method]; ok { - s.processStreamingRPC(t, stream, srv, sd, trInfo) + s.processStreamingRPC(ctx, t, stream, srv, sd, ti) return } } // Unknown service, or known server unknown method. if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { - s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) + s.processStreamingRPC(ctx, t, stream, nil, unknownDesc, ti) return } var errDesc string @@ -1767,19 +1812,19 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str } else { errDesc = fmt.Sprintf("unknown method %v for service %v", method, service) } - if trInfo != nil { - trInfo.tr.LazyPrintf("%s", errDesc) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyPrintf("%s", errDesc) + ti.tr.SetError() } if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ti.tr.SetError() } channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } - if trInfo != nil { - trInfo.tr.Finish() + if ti != nil { + ti.tr.Finish() } } @@ -1834,62 +1879,72 @@ func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream // pending RPCs on the client side will get notified by connection // errors. func (s *Server) Stop() { - s.quit.Fire() + s.stop(false) +} - defer func() { - s.serveWG.Wait() - s.done.Fire() - }() +// GracefulStop stops the gRPC server gracefully. It stops the server from +// accepting new connections and RPCs and blocks until all the pending RPCs are +// finished. +func (s *Server) GracefulStop() { + s.stop(true) +} + +func (s *Server) stop(graceful bool) { + s.quit.Fire() + defer s.done.Fire() s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) s.mu.Lock() - listeners := s.lis - s.lis = nil - conns := s.conns - s.conns = nil - // interrupt GracefulStop if Stop and GracefulStop are called concurrently. - s.cv.Broadcast() + s.closeListenersLocked() + // Wait for serving threads to be ready to exit. Only then can we be sure no + // new conns will be created. s.mu.Unlock() + s.serveWG.Wait() - for lis := range listeners { - lis.Close() + s.mu.Lock() + defer s.mu.Unlock() + + if graceful { + s.drainAllServerTransportsLocked() + } else { + s.closeServerTransportsLocked() } - for _, cs := range conns { - for st := range cs { - st.Close(errors.New("Server.Stop called")) - } + + for len(s.conns) != 0 { + s.cv.Wait() } + s.conns = nil + if s.opts.numServerWorkers > 0 { - s.stopServerWorkers() + // Closing the channel (only once, via grpcsync.OnceFunc) after all the + // connections have been closed above ensures that there are no + // goroutines executing the callback passed to st.HandleStreams (where + // the channel is written to). + s.serverWorkerChannelClose() + } + + if graceful || s.opts.waitForHandlers { + s.handlersWG.Wait() } - s.mu.Lock() if s.events != nil { s.events.Finish() s.events = nil } - s.mu.Unlock() } -// GracefulStop stops the gRPC server gracefully. It stops the server from -// accepting new connections and RPCs and blocks until all the pending RPCs are -// finished. -func (s *Server) GracefulStop() { - s.quit.Fire() - defer s.done.Fire() - - s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) - s.mu.Lock() - if s.conns == nil { - s.mu.Unlock() - return +// s.mu must be held by the caller. +func (s *Server) closeServerTransportsLocked() { + for _, conns := range s.conns { + for st := range conns { + st.Close(errors.New("Server.Stop called")) + } } +} - for lis := range s.lis { - lis.Close() - } - s.lis = nil +// s.mu must be held by the caller. +func (s *Server) drainAllServerTransportsLocked() { if !s.drain { for _, conns := range s.conns { for st := range conns { @@ -1898,22 +1953,14 @@ func (s *Server) GracefulStop() { } s.drain = true } +} - // Wait for serving threads to be ready to exit. Only then can we be sure no - // new conns will be created. - s.mu.Unlock() - s.serveWG.Wait() - s.mu.Lock() - - for len(s.conns) != 0 { - s.cv.Wait() - } - s.conns = nil - if s.events != nil { - s.events.Finish() - s.events = nil +// s.mu must be held by the caller. +func (s *Server) closeListenersLocked() { + for lis := range s.lis { + lis.Close() } - s.mu.Unlock() + s.lis = nil } // contentSubtype must be lowercase @@ -1927,11 +1974,50 @@ func (s *Server) getCodec(contentSubtype string) baseCodec { } codec := encoding.GetCodec(contentSubtype) if codec == nil { + logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name) return encoding.GetCodec(proto.Name) } return codec } +type serverKey struct{} + +// serverFromContext gets the Server from the context. +func serverFromContext(ctx context.Context) *Server { + s, _ := ctx.Value(serverKey{}).(*Server) + return s +} + +// contextWithServer sets the Server in the context. +func contextWithServer(ctx context.Context, server *Server) context.Context { + return context.WithValue(ctx, serverKey{}, server) +} + +// isRegisteredMethod returns whether the passed in method is registered as a +// method on the server. /service/method and service/method will match if the +// service and method are registered on the server. +func (s *Server) isRegisteredMethod(serviceMethod string) bool { + if serviceMethod != "" && serviceMethod[0] == '/' { + serviceMethod = serviceMethod[1:] + } + pos := strings.LastIndex(serviceMethod, "/") + if pos == -1 { // Invalid method name syntax. + return false + } + service := serviceMethod[:pos] + method := serviceMethod[pos+1:] + srv, knownService := s.services[service] + if knownService { + if _, ok := srv.methods[method]; ok { + return true + } + if _, ok := srv.streams[method]; ok { + return true + } + } + return false +} + // SetHeader sets the header metadata to be sent from the server to the client. // The context provided must be the context passed to the server's handler. // diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index b14b2fbea..d621f52b1 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -48,6 +48,8 @@ import ( "google.golang.org/grpc/status" ) +var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool)) + // StreamHandler defines the handler called by gRPC server to complete the // execution of a streaming RPC. // @@ -184,7 +186,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth // when the RPC completes. opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...) - if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { + if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok { // validate md if err := imetadata.Validate(md); err != nil { return nil, status.Error(codes.Internal, err.Error()) diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go index bfa5dfa40..07f012576 100644 --- a/vendor/google.golang.org/grpc/tap/tap.go +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -27,6 +27,8 @@ package tap import ( "context" + + "google.golang.org/grpc/metadata" ) // Info defines the relevant information needed by the handles. @@ -34,6 +36,10 @@ type Info struct { // FullMethodName is the string of grpc method (in the format of // /package.service/method). FullMethodName string + + // Header contains the header metadata received. + Header metadata.MD + // TODO: More to be added. } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 724ad2102..1ad1ba2ad 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.58.3" +const Version = "1.61.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index bbc9e2e3c..5da38a409 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -35,7 +35,6 @@ if [[ "$1" = "-install" ]]; then # Install the pinned versions as defined in module tools. pushd ./test/tools go install \ - golang.org/x/lint/golint \ golang.org/x/tools/cmd/goimports \ honnef.co/go/tools/cmd/staticcheck \ github.com/client9/misspell/cmd/misspell @@ -77,15 +76,19 @@ fi not grep 'func Test[^(]' *_test.go not grep 'func Test[^(]' test/*.go +# - Check for typos in test function names +git grep 'func (s) ' -- "*_test.go" | not grep -v 'func (s) Test' +git grep 'func [A-Z]' -- "*_test.go" | not grep -v 'func Test\|Benchmark\|Example' + # - Do not import x/net/context. not git grep -l 'x/net/context' -- "*.go" # - Do not import math/rand for real library code. Use internal/grpcrand for # thread safety. -git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' +git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^interop/stress\|grpcrand\|^benchmark\|wrr_test' # - Do not use "interface{}"; use "any" instead. -git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc' +git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc\|grpc_testing_not_regenerate' # - Do not call grpclog directly. Use grpclog.Component instead. git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' @@ -93,13 +96,15 @@ git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpc # - Ensure all ptypes proto packages are renamed when importing. not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" +# - Ensure all usages of grpc_testing package are renamed when importing. +not git grep "\(import \|^\s*\)\"google.golang.org/grpc/interop/grpc_testing" -- "*.go" + # - Ensure all xds proto imports are renamed to *pb or *grpc. git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' misspell -error . -# - gofmt, goimports, golint (with exceptions for generated code), go vet, -# go mod tidy. +# - gofmt, goimports, go vet, go mod tidy. # Perform these checks on each module inside gRPC. for MOD_FILE in $(find . -name 'go.mod'); do MOD_DIR=$(dirname ${MOD_FILE}) @@ -107,7 +112,6 @@ for MOD_FILE in $(find . -name 'go.mod'); do go vet -all ./... | fail_on_output gofmt -s -d -l . 2>&1 | fail_on_output goimports -l . 2>&1 | not grep -vE "\.pb\.go" - golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:" go mod tidy -compat=1.19 git status --porcelain 2>&1 | fail_on_output || \ @@ -116,94 +120,71 @@ for MOD_FILE in $(find . -name 'go.mod'); do done # - Collection of static analysis checks -# -# TODO(dfawley): don't use deprecated functions in examples or first-party -# plugins. -# TODO(dfawley): enable ST1019 (duplicate imports) but allow for protobufs. SC_OUT="$(mktemp)" -staticcheck -go 1.19 -checks 'inherit,-ST1015,-ST1019,-SA1019' ./... > "${SC_OUT}" || true -# Error if anything other than deprecation warnings are printed. -not grep -v "is deprecated:.*SA1019" "${SC_OUT}" -# Only ignore the following deprecated types/fields/functions. -not grep -Fv '.CredsBundle -.HeaderMap -.Metadata is deprecated: use Attributes -.NewAddress -.NewServiceConfig -.Type is deprecated: use Attributes -BuildVersion is deprecated -balancer.ErrTransientFailure -balancer.Picker -extDesc.Filename is deprecated -github.com/golang/protobuf/jsonpb is deprecated -grpc.CallCustomCodec -grpc.Code -grpc.Compressor -grpc.CustomCodec -grpc.Decompressor -grpc.MaxMsgSize -grpc.MethodConfig -grpc.NewGZIPCompressor -grpc.NewGZIPDecompressor -grpc.RPCCompressor -grpc.RPCDecompressor -grpc.ServiceConfig -grpc.WithCompressor -grpc.WithDecompressor -grpc.WithDialer -grpc.WithMaxMsgSize -grpc.WithServiceConfig -grpc.WithTimeout -http.CloseNotifier -info.SecurityVersion -proto is deprecated -proto.InternalMessageInfo is deprecated -proto.EnumName is deprecated -proto.ErrInternalBadWireType is deprecated -proto.FileDescriptor is deprecated -proto.Marshaler is deprecated -proto.MessageType is deprecated -proto.RegisterEnum is deprecated -proto.RegisterFile is deprecated -proto.RegisterType is deprecated -proto.RegisterExtension is deprecated -proto.RegisteredExtension is deprecated -proto.RegisteredExtensions is deprecated -proto.RegisterMapType is deprecated -proto.Unmarshaler is deprecated +staticcheck -go 1.19 -checks 'all' ./... > "${SC_OUT}" || true + +# Error for anything other than checks that need exclusions. +grep -v "(ST1000)" "${SC_OUT}" | grep -v "(SA1019)" | grep -v "(ST1003)" | not grep -v "(ST1019)\|\(other import of\)" + +# Exclude underscore checks for generated code. +grep "(ST1003)" "${SC_OUT}" | not grep -v '\(.pb.go:\)\|\(code_string_test.go:\)\|\(grpc_testing_not_regenerate\)' + +# Error for duplicate imports not including grpc protos. +grep "(ST1019)\|\(other import of\)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused +channelz/grpc_channelz_v1" +go-control-plane/envoy +grpclb/grpc_lb_v1" +health/grpc_health_v1" +interop/grpc_testing" +orca/v3" +proto/grpc_gcp" +proto/grpc_lookup_v1" +reflection/grpc_reflection_v1" +reflection/grpc_reflection_v1alpha" +XXXXX PleaseIgnoreUnused' + +# Error for any package comments not in generated code. +grep "(ST1000)" "${SC_OUT}" | not grep -v "\.pb\.go:" + +# Only ignore the following deprecated types/fields/functions and exclude +# generated code. +grep "(SA1019)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused +XXXXX Protobuf related deprecation errors: +"github.com/golang/protobuf +.pb.go: +grpc_testing_not_regenerate +: ptypes. +proto.RegisterType +XXXXX gRPC internal usage deprecation errors: +"google.golang.org/grpc +: grpc. +: v1alpha. +: v1alphareflectionpb. +BalancerAttributes is deprecated: +CredsBundle is deprecated: +Metadata is deprecated: use Attributes instead. +NewSubConn is deprecated: +OverrideServerName is deprecated: +RemoveSubConn is deprecated: +SecurityVersion is deprecated: Target is deprecated: Use the Target field in the BuildOptions instead. -xxx_messageInfo_ -' "${SC_OUT}" - -# - special golint on package comments. -lint_package_comment_per_package() { - # Number of files in this go package. - fileCount=$(go list -f '{{len .GoFiles}}' $1) - if [ ${fileCount} -eq 0 ]; then - return 0 - fi - # Number of package errors generated by golint. - lintPackageCommentErrorsCount=$(golint --min_confidence 0 $1 | grep -c "should have a package comment") - # golint complains about every file that's missing the package comment. If the - # number of files for this package is greater than the number of errors, there's - # at least one file with package comment, good. Otherwise, fail. - if [ ${fileCount} -le ${lintPackageCommentErrorsCount} ]; then - echo "Package $1 (with ${fileCount} files) is missing package comment" - return 1 - fi -} -lint_package_comment() { - set +ex - - count=0 - for i in $(go list ./...); do - lint_package_comment_per_package "$i" - ((count += $?)) - done - - set -ex - return $count -} -lint_package_comment +UpdateAddresses is deprecated: +UpdateSubConnState is deprecated: +balancer.ErrTransientFailure is deprecated: +grpc/reflection/v1alpha/reflection.proto +XXXXX xDS deprecated fields we support +.ExactMatch +.PrefixMatch +.SafeRegexMatch +.SuffixMatch +GetContainsMatch +GetExactMatch +GetMatchSubjectAltNames +GetPrefixMatch +GetSafeRegexMatch +GetSuffixMatch +GetTlsCertificateCertificateProviderInstance +GetValidationContextCertificateProviderInstance +XXXXX PleaseIgnoreUnused' echo SUCCESS diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/CHANGELOG.md b/vendor/gopkg.in/go-jose/go-jose.v2/CHANGELOG.md new file mode 100644 index 000000000..8e6e91323 --- /dev/null +++ b/vendor/gopkg.in/go-jose/go-jose.v2/CHANGELOG.md @@ -0,0 +1,84 @@ +# v4.0.1 + +## Fixed + + - An attacker could send a JWE containing compressed data that used large + amounts of memory and CPU when decompressed by `Decrypt` or `DecryptMulti`. + Those functions now return an error if the decompressed data would exceed + 250kB or 10x the compressed size (whichever is larger). Thanks to + Enze Wang@Alioth and Jianjun Chen@Zhongguancun Lab (@zer0yu and @chenjj) + for reporting. + +# v4.0.0 + +This release makes some breaking changes in order to more thoroughly +address the vulnerabilities discussed in [Three New Attacks Against JSON Web +Tokens][1], "Sign/encrypt confusion", "Billion hash attack", and "Polyglot +token". + +## Changed + + - Limit JWT encryption types (exclude password or public key types) (#78) + - Enforce minimum length for HMAC keys (#85) + - jwt: match any audience in a list, rather than requiring all audiences (#81) + - jwt: accept only Compact Serialization (#75) + - jws: Add expected algorithms for signatures (#74) + - Require specifying expected algorithms for ParseEncrypted, + ParseSigned, ParseDetached, jwt.ParseEncrypted, jwt.ParseSigned, + jwt.ParseSignedAndEncrypted (#69, #74) + - Usually there is a small, known set of appropriate algorithms for a program + to use and it's a mistake to allow unexpected algorithms. For instance the + "billion hash attack" relies in part on programs accepting the PBES2 + encryption algorithm and doing the necessary work even if they weren't + specifically configured to allow PBES2. + - Revert "Strip padding off base64 strings" (#82) + - The specs require base64url encoding without padding. + - Minimum supported Go version is now 1.21 + +## Added + + - ParseSignedCompact, ParseSignedJSON, ParseEncryptedCompact, ParseEncryptedJSON. + - These allow parsing a specific serialization, as opposed to ParseSigned and + ParseEncrypted, which try to automatically detect which serialization was + provided. It's common to require a specific serialization for a specific + protocol - for instance JWT requires Compact serialization. + +[1]: https://i.blackhat.com/BH-US-23/Presentations/US-23-Tervoort-Three-New-Attacks-Against-JSON-Web-Tokens.pdf + +# v3.0.3 + +## Fixed + + - Limit decompression output size to prevent a DoS. Backport from v4.0.1. + +# v3.0.2 + +## Fixed + + - DecryptMulti: handle decompression error (#19) + +## Changed + + - jwe/CompactSerialize: improve performance (#67) + - Increase the default number of PBKDF2 iterations to 600k (#48) + - Return the proper algorithm for ECDSA keys (#45) + +## Added + + - Add Thumbprint support for opaque signers (#38) + +# v3.0.1 + +## Fixed + + - Security issue: an attacker specifying a large "p2c" value can cause + JSONWebEncryption.Decrypt and JSONWebEncryption.DecryptMulti to consume large + amounts of CPU, causing a DoS. Thanks to Matt Schwager (@mschwager) for the + disclosure and to Tom Tervoort for originally publishing the category of attack. + https://i.blackhat.com/BH-US-23/Presentations/US-23-Tervoort-Three-New-Attacks-Against-JSON-Web-Tokens.pdf + +# v2.6.3 + +## Fixed + + - Limit decompression output size to prevent a DoS. Backport from v4.0.1. diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/README.md b/vendor/gopkg.in/go-jose/go-jose.v2/README.md index 46b02d61d..b877f412c 100644 --- a/vendor/gopkg.in/go-jose/go-jose.v2/README.md +++ b/vendor/gopkg.in/go-jose/go-jose.v2/README.md @@ -1,118 +1,4 @@ -# Go JOSE +# go-jose v2 -[![godoc](http://img.shields.io/badge/godoc-version_1-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v1) -[![godoc](http://img.shields.io/badge/godoc-version_2-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2) -[![license](http://img.shields.io/badge/license-apache_2.0-blue.svg?style=flat)](https://raw.githubusercontent.com/go-jose/go-jose/master/LICENSE) -[![build](https://travis-ci.org/go-jose/go-jose.svg?branch=v2)](https://travis-ci.org/go-jose/go-jose) -[![coverage](https://coveralls.io/repos/github/go-jose/go-jose/badge.svg?branch=v2)](https://coveralls.io/r/go-jose/go-jose) - -Package jose aims to provide an implementation of the Javascript Object Signing -and Encryption set of standards. This includes support for JSON Web Encryption, -JSON Web Signature, and JSON Web Token standards. - -**Disclaimer**: This library contains encryption software that is subject to -the U.S. Export Administration Regulations. You may not export, re-export, -transfer or download this code or any part of it in violation of any United -States law, directive or regulation. In particular this software may not be -exported or re-exported in any form or on any media to Iran, North Sudan, -Syria, Cuba, or North Korea, or to denied persons or entities mentioned on any -US maintained blocked list. - -## Overview - -The implementation follows the -[JSON Web Encryption](http://dx.doi.org/10.17487/RFC7516) (RFC 7516), -[JSON Web Signature](http://dx.doi.org/10.17487/RFC7515) (RFC 7515), and -[JSON Web Token](http://dx.doi.org/10.17487/RFC7519) (RFC 7519). -Tables of supported algorithms are shown below. The library supports both -the compact and full serialization formats, and has optional support for -multiple recipients. It also comes with a small command-line utility -([`jose-util`](https://github.com/go-jose/go-jose/tree/v2/jose-util)) -for dealing with JOSE messages in a shell. - -**Note**: We use a forked version of the `encoding/json` package from the Go -standard library which uses case-sensitive matching for member names (instead -of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html)). -This is to avoid differences in interpretation of messages between go-jose and -libraries in other languages. - -### Versions - -We use [gopkg.in](https://gopkg.in) for versioning. - -[Version 2](https://gopkg.in/go-jose/go-jose.v2) -([branch](https://github.com/go-jose/go-jose/tree/v2), -[doc](https://godoc.org/gopkg.in/go-jose/go-jose.v2)) is the current version: - - import "gopkg.in/go-jose/go-jose.v2" - -The old `v1` branch ([go-jose.v1](https://gopkg.in/go-jose/go-jose.v1)) will -still receive backported bug fixes and security fixes, but otherwise -development is frozen. All new feature development takes place on the `v2` -branch. Version 2 also contains additional sub-packages such as the -[jwt](https://godoc.org/gopkg.in/go-jose/go-jose.v2/jwt) implementation -contributed by [@shaxbee](https://github.com/shaxbee). - -### Supported algorithms - -See below for a table of supported algorithms. Algorithm identifiers match -the names in the [JSON Web Algorithms](http://dx.doi.org/10.17487/RFC7518) -standard where possible. The Godoc reference has a list of constants. - - Key encryption | Algorithm identifier(s) - :------------------------- | :------------------------------ - RSA-PKCS#1v1.5 | RSA1_5 - RSA-OAEP | RSA-OAEP, RSA-OAEP-256 - AES key wrap | A128KW, A192KW, A256KW - AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW - ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW - ECDH-ES (direct) | ECDH-ES1 - Direct encryption | dir1 - -1. Not supported in multi-recipient mode - - Signing / MAC | Algorithm identifier(s) - :------------------------- | :------------------------------ - RSASSA-PKCS#1v1.5 | RS256, RS384, RS512 - RSASSA-PSS | PS256, PS384, PS512 - HMAC | HS256, HS384, HS512 - ECDSA | ES256, ES384, ES512 - Ed25519 | EdDSA2 - -2. Only available in version 2 of the package - - Content encryption | Algorithm identifier(s) - :------------------------- | :------------------------------ - AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512 - AES-GCM | A128GCM, A192GCM, A256GCM - - Compression | Algorithm identifiers(s) - :------------------------- | ------------------------------- - DEFLATE (RFC 1951) | DEF - -### Supported key types - -See below for a table of supported key types. These are understood by the -library, and can be passed to corresponding functions such as `NewEncrypter` or -`NewSigner`. Each of these keys can also be wrapped in a JWK if desired, which -allows attaching a key id. - - Algorithm(s) | Corresponding types - :------------------------- | ------------------------------- - RSA | *[rsa.PublicKey](http://golang.org/pkg/crypto/rsa/#PublicKey), *[rsa.PrivateKey](http://golang.org/pkg/crypto/rsa/#PrivateKey) - ECDH, ECDSA | *[ecdsa.PublicKey](http://golang.org/pkg/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](http://golang.org/pkg/crypto/ecdsa/#PrivateKey) - EdDSA1 | [ed25519.PublicKey](https://godoc.org/golang.org/x/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://godoc.org/golang.org/x/crypto/ed25519#PrivateKey) - AES, HMAC | []byte - -1. Only available in version 2 of the package - -## Examples - -[![godoc](http://img.shields.io/badge/godoc-version_1-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v1) -[![godoc](http://img.shields.io/badge/godoc-version_2-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2) - -Examples can be found in the Godoc -reference for this package. The -[`jose-util`](https://github.com/go-jose/go-jose/tree/v2/jose-util) -subdirectory also contains a small command-line utility which might be useful -as an example. +Version 2 of this library is no longer supported. [Please use v4 +instead](https://pkg.go.dev/github.com/go-jose/go-jose/v4). diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/asymmetric.go b/vendor/gopkg.in/go-jose/go-jose.v2/asymmetric.go index 3ca79cc26..43f9ce2fc 100644 --- a/vendor/gopkg.in/go-jose/go-jose.v2/asymmetric.go +++ b/vendor/gopkg.in/go-jose/go-jose.v2/asymmetric.go @@ -285,6 +285,9 @@ func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm switch alg { case RS256, RS384, RS512: + // TODO(https://github.com/go-jose/go-jose/issues/40): As of go1.20, the + // random parameter is legacy and ignored, and it can be nil. + // https://cs.opensource.google/go/go/+/refs/tags/go1.20:src/crypto/rsa/pkcs1v15.go;l=263;bpv=0;bpt=1 out, err = rsa.SignPKCS1v15(RandReader, ctx.privateKey, hash, hashed) case PS256, PS384, PS512: out, err = rsa.SignPSS(RandReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{ diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/crypter.go b/vendor/gopkg.in/go-jose/go-jose.v2/crypter.go index 73aab0fab..0ae2e5eba 100644 --- a/vendor/gopkg.in/go-jose/go-jose.v2/crypter.go +++ b/vendor/gopkg.in/go-jose/go-jose.v2/crypter.go @@ -406,6 +406,9 @@ func (ctx *genericEncrypter) Options() EncrypterOptions { // Decrypt and validate the object and return the plaintext. Note that this // function does not support multi-recipient, if you desire multi-recipient // decryption use DecryptMulti instead. +// +// Automatically decompresses plaintext, but returns an error if the decompressed +// data would be >250kB or >10x the size of the compressed data, whichever is larger. func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) { headers := obj.mergedHeaders(nil) @@ -470,6 +473,9 @@ func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) // with support for multiple recipients. It returns the index of the recipient // for which the decryption was successful, the merged headers for that recipient, // and the plaintext. +// +// Automatically decompresses plaintext, but returns an error if the decompressed +// data would be >250kB or >3x the size of the compressed data, whichever is larger. func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Header, []byte, error) { globalHeaders := obj.mergedHeaders(nil) diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/encoding.go b/vendor/gopkg.in/go-jose/go-jose.v2/encoding.go index 40b688b3d..636f6c8f5 100644 --- a/vendor/gopkg.in/go-jose/go-jose.v2/encoding.go +++ b/vendor/gopkg.in/go-jose/go-jose.v2/encoding.go @@ -21,6 +21,7 @@ import ( "compress/flate" "encoding/base64" "encoding/binary" + "fmt" "io" "math/big" "strings" @@ -85,7 +86,7 @@ func decompress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { } } -// Compress with DEFLATE +// deflate compresses the input. func deflate(input []byte) ([]byte, error) { output := new(bytes.Buffer) @@ -97,15 +98,27 @@ func deflate(input []byte) ([]byte, error) { return output.Bytes(), err } -// Decompress with DEFLATE +// inflate decompresses the input. +// +// Errors if the decompressed data would be >250kB or >10x the size of the +// compressed data, whichever is larger. func inflate(input []byte) ([]byte, error) { output := new(bytes.Buffer) reader := flate.NewReader(bytes.NewBuffer(input)) - _, err := io.Copy(output, reader) - if err != nil { + maxCompressedSize := 10 * int64(len(input)) + if maxCompressedSize < 250000 { + maxCompressedSize = 250000 + } + + limit := maxCompressedSize + 1 + n, err := io.CopyN(output, reader, limit) + if err != nil && err != io.EOF { return nil, err } + if n == limit { + return nil, fmt.Errorf("uncompressed data would be too large (>%d bytes)", maxCompressedSize) + } err = reader.Close() return output.Bytes(), err diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/symmetric.go b/vendor/gopkg.in/go-jose/go-jose.v2/symmetric.go index 2b8076f5f..52c8b62bf 100644 --- a/vendor/gopkg.in/go-jose/go-jose.v2/symmetric.go +++ b/vendor/gopkg.in/go-jose/go-jose.v2/symmetric.go @@ -402,6 +402,11 @@ func (ctx *symmetricKeyCipher) decryptKey(headers rawHeader, recipient *recipien if p2c <= 0 { return nil, fmt.Errorf("go-jose/go-jose: invalid P2C: must be a positive integer") } + if p2c > 1000000 { + // An unauthenticated attacker can set a high P2C value. Set an upper limit to avoid + // DoS attacks. + return nil, fmt.Errorf("go-jose/go-jose: invalid P2C: too high") + } // salt is UTF8(Alg) || 0x00 || Salt Input alg := headers.getAlgorithm() diff --git a/vendor/modules.txt b/vendor/modules.txt index 3c00afa5e..d45c32a2a 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -18,7 +18,7 @@ github.com/Microsoft/go-winio/internal/socket github.com/Microsoft/go-winio/internal/stringbuffer github.com/Microsoft/go-winio/pkg/guid github.com/Microsoft/go-winio/vhd -# github.com/Microsoft/hcsshim v0.12.0-rc.1 +# github.com/Microsoft/hcsshim v0.12.0-rc.3 ## explicit; go 1.18 github.com/Microsoft/hcsshim github.com/Microsoft/hcsshim/computestorage @@ -77,15 +77,18 @@ github.com/checkpoint-restore/go-criu/v7/stats # github.com/chzyer/readline v1.5.1 ## explicit; go 1.15 github.com/chzyer/readline -# github.com/containerd/cgroups/v3 v3.0.2 +# github.com/containerd/cgroups/v3 v3.0.3 ## explicit; go 1.18 github.com/containerd/cgroups/v3/cgroup1/stats -# github.com/containerd/containerd v1.7.9 +# github.com/containerd/containerd v1.7.13 ## explicit; go 1.19 github.com/containerd/containerd/errdefs github.com/containerd/containerd/log github.com/containerd/containerd/pkg/userns github.com/containerd/containerd/platforms +# github.com/containerd/errdefs v0.1.0 +## explicit; go 1.20 +github.com/containerd/errdefs # github.com/containerd/log v0.1.0 ## explicit; go 1.20 github.com/containerd/log @@ -108,10 +111,10 @@ github.com/containernetworking/cni/pkg/types/create github.com/containernetworking/cni/pkg/types/internal github.com/containernetworking/cni/pkg/utils github.com/containernetworking/cni/pkg/version -# github.com/containernetworking/plugins v1.3.0 +# github.com/containernetworking/plugins v1.4.0 ## explicit; go 1.20 github.com/containernetworking/plugins/pkg/ns -# github.com/containers/buildah v1.33.5 +# github.com/containers/buildah v1.35.1 ## explicit; go 1.20 github.com/containers/buildah github.com/containers/buildah/bind @@ -125,6 +128,7 @@ github.com/containers/buildah/internal/config github.com/containers/buildah/internal/mkcw github.com/containers/buildah/internal/mkcw/types github.com/containers/buildah/internal/parse +github.com/containers/buildah/internal/sbom github.com/containers/buildah/internal/tmpdir github.com/containers/buildah/internal/util github.com/containers/buildah/internal/volumes @@ -137,8 +141,9 @@ github.com/containers/buildah/pkg/rusage github.com/containers/buildah/pkg/sshagent github.com/containers/buildah/pkg/util github.com/containers/buildah/util -# github.com/containers/common v0.57.4 -## explicit; go 1.18 +# github.com/containers/common v0.58.0 +## explicit; go 1.20 +github.com/containers/common/internal github.com/containers/common/internal/attributedstring github.com/containers/common/libimage github.com/containers/common/libimage/define @@ -147,6 +152,7 @@ github.com/containers/common/libimage/manifests github.com/containers/common/libimage/platform github.com/containers/common/libnetwork/cni github.com/containers/common/libnetwork/etchosts +github.com/containers/common/libnetwork/internal/rootlessnetns github.com/containers/common/libnetwork/internal/util github.com/containers/common/libnetwork/netavark github.com/containers/common/libnetwork/network @@ -191,7 +197,9 @@ github.com/containers/common/pkg/ssh github.com/containers/common/pkg/subscriptions github.com/containers/common/pkg/supplemented github.com/containers/common/pkg/sysinfo +github.com/containers/common/pkg/systemd github.com/containers/common/pkg/timetype +github.com/containers/common/pkg/timezone github.com/containers/common/pkg/umask github.com/containers/common/pkg/util github.com/containers/common/pkg/version @@ -199,7 +207,7 @@ github.com/containers/common/version # github.com/containers/conmon v2.0.20+incompatible ## explicit github.com/containers/conmon/runner/config -# github.com/containers/image/v5 v5.29.2 +# github.com/containers/image/v5 v5.30.0 ## explicit; go 1.19 github.com/containers/image/v5/copy github.com/containers/image/v5/directory @@ -269,7 +277,7 @@ github.com/containers/image/v5/version # github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 ## explicit github.com/containers/libtrust -# github.com/containers/luksy v0.0.0-20231030195837-b5a7f79da98b +# github.com/containers/luksy v0.0.0-20240212203526-ceb12d4fd50c ## explicit; go 1.20 github.com/containers/luksy # github.com/containers/ocicrypt v1.1.9 @@ -288,94 +296,95 @@ github.com/containers/ocicrypt/keywrap/pkcs7 github.com/containers/ocicrypt/spec github.com/containers/ocicrypt/utils github.com/containers/ocicrypt/utils/keyprovider -# github.com/containers/podman/v4 v4.9.3 +# github.com/containers/podman/v5 v5.0.0 +## explicit; go 1.20 +github.com/containers/podman/v5/cmd/podman/parse +github.com/containers/podman/v5/cmd/podman/registry +github.com/containers/podman/v5/libpod +github.com/containers/podman/v5/libpod/define +github.com/containers/podman/v5/libpod/driver +github.com/containers/podman/v5/libpod/events +github.com/containers/podman/v5/libpod/layers +github.com/containers/podman/v5/libpod/linkmode +github.com/containers/podman/v5/libpod/lock +github.com/containers/podman/v5/libpod/lock/file +github.com/containers/podman/v5/libpod/lock/shm +github.com/containers/podman/v5/libpod/logs +github.com/containers/podman/v5/libpod/logs/reversereader +github.com/containers/podman/v5/libpod/plugin +github.com/containers/podman/v5/libpod/shutdown +github.com/containers/podman/v5/pkg/annotations +github.com/containers/podman/v5/pkg/api/handlers +github.com/containers/podman/v5/pkg/api/handlers/types +github.com/containers/podman/v5/pkg/api/handlers/utils/apiutil +github.com/containers/podman/v5/pkg/auth +github.com/containers/podman/v5/pkg/autoupdate +github.com/containers/podman/v5/pkg/bindings +github.com/containers/podman/v5/pkg/bindings/containers +github.com/containers/podman/v5/pkg/bindings/generate +github.com/containers/podman/v5/pkg/bindings/images +github.com/containers/podman/v5/pkg/bindings/internal/util +github.com/containers/podman/v5/pkg/bindings/kube +github.com/containers/podman/v5/pkg/bindings/manifests +github.com/containers/podman/v5/pkg/bindings/network +github.com/containers/podman/v5/pkg/bindings/play +github.com/containers/podman/v5/pkg/bindings/pods +github.com/containers/podman/v5/pkg/bindings/secrets +github.com/containers/podman/v5/pkg/bindings/system +github.com/containers/podman/v5/pkg/bindings/volumes +github.com/containers/podman/v5/pkg/checkpoint +github.com/containers/podman/v5/pkg/checkpoint/crutils +github.com/containers/podman/v5/pkg/copy +github.com/containers/podman/v5/pkg/criu +github.com/containers/podman/v5/pkg/ctime +github.com/containers/podman/v5/pkg/domain/entities +github.com/containers/podman/v5/pkg/domain/entities/reports +github.com/containers/podman/v5/pkg/domain/entities/types +github.com/containers/podman/v5/pkg/domain/filters +github.com/containers/podman/v5/pkg/domain/infra +github.com/containers/podman/v5/pkg/domain/infra/abi +github.com/containers/podman/v5/pkg/domain/infra/abi/internal/expansion +github.com/containers/podman/v5/pkg/domain/infra/abi/parse +github.com/containers/podman/v5/pkg/domain/infra/abi/terminal +github.com/containers/podman/v5/pkg/domain/infra/tunnel +github.com/containers/podman/v5/pkg/domain/utils +github.com/containers/podman/v5/pkg/emulation +github.com/containers/podman/v5/pkg/env +github.com/containers/podman/v5/pkg/errorhandling +github.com/containers/podman/v5/pkg/inspect +github.com/containers/podman/v5/pkg/k8s.io/api/apps/v1 +github.com/containers/podman/v5/pkg/k8s.io/api/core/v1 +github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/api/resource +github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/apis/meta/v1 +github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/types +github.com/containers/podman/v5/pkg/k8s.io/apimachinery/pkg/util/intstr +github.com/containers/podman/v5/pkg/lookup +github.com/containers/podman/v5/pkg/namespaces +github.com/containers/podman/v5/pkg/parallel +github.com/containers/podman/v5/pkg/parallel/ctr +github.com/containers/podman/v5/pkg/ps +github.com/containers/podman/v5/pkg/ps/define +github.com/containers/podman/v5/pkg/rctl +github.com/containers/podman/v5/pkg/rootless +github.com/containers/podman/v5/pkg/seccomp +github.com/containers/podman/v5/pkg/selinux +github.com/containers/podman/v5/pkg/signal +github.com/containers/podman/v5/pkg/specgen +github.com/containers/podman/v5/pkg/specgen/generate +github.com/containers/podman/v5/pkg/specgen/generate/kube +github.com/containers/podman/v5/pkg/specgenutil +github.com/containers/podman/v5/pkg/specgenutilexternal +github.com/containers/podman/v5/pkg/systemd +github.com/containers/podman/v5/pkg/systemd/define +github.com/containers/podman/v5/pkg/systemd/generate +github.com/containers/podman/v5/pkg/systemd/notifyproxy +github.com/containers/podman/v5/pkg/trust +github.com/containers/podman/v5/pkg/util +github.com/containers/podman/v5/utils +github.com/containers/podman/v5/version +github.com/containers/podman/v5/version/rawversion +# github.com/containers/psgo v1.9.0 ## explicit; go 1.18 -github.com/containers/podman/v4/cmd/podman/parse -github.com/containers/podman/v4/cmd/podman/registry -github.com/containers/podman/v4/libpod -github.com/containers/podman/v4/libpod/define -github.com/containers/podman/v4/libpod/driver -github.com/containers/podman/v4/libpod/events -github.com/containers/podman/v4/libpod/layers -github.com/containers/podman/v4/libpod/linkmode -github.com/containers/podman/v4/libpod/lock -github.com/containers/podman/v4/libpod/lock/file -github.com/containers/podman/v4/libpod/lock/shm -github.com/containers/podman/v4/libpod/logs -github.com/containers/podman/v4/libpod/logs/reversereader -github.com/containers/podman/v4/libpod/plugin -github.com/containers/podman/v4/libpod/shutdown -github.com/containers/podman/v4/pkg/annotations -github.com/containers/podman/v4/pkg/api/handlers -github.com/containers/podman/v4/pkg/api/handlers/types -github.com/containers/podman/v4/pkg/api/handlers/utils/apiutil -github.com/containers/podman/v4/pkg/auth -github.com/containers/podman/v4/pkg/autoupdate -github.com/containers/podman/v4/pkg/bindings -github.com/containers/podman/v4/pkg/bindings/containers -github.com/containers/podman/v4/pkg/bindings/generate -github.com/containers/podman/v4/pkg/bindings/images -github.com/containers/podman/v4/pkg/bindings/internal/util -github.com/containers/podman/v4/pkg/bindings/kube -github.com/containers/podman/v4/pkg/bindings/manifests -github.com/containers/podman/v4/pkg/bindings/network -github.com/containers/podman/v4/pkg/bindings/play -github.com/containers/podman/v4/pkg/bindings/pods -github.com/containers/podman/v4/pkg/bindings/secrets -github.com/containers/podman/v4/pkg/bindings/system -github.com/containers/podman/v4/pkg/bindings/volumes -github.com/containers/podman/v4/pkg/checkpoint -github.com/containers/podman/v4/pkg/checkpoint/crutils -github.com/containers/podman/v4/pkg/copy -github.com/containers/podman/v4/pkg/criu -github.com/containers/podman/v4/pkg/ctime -github.com/containers/podman/v4/pkg/domain/entities -github.com/containers/podman/v4/pkg/domain/entities/reports -github.com/containers/podman/v4/pkg/domain/entities/types -github.com/containers/podman/v4/pkg/domain/filters -github.com/containers/podman/v4/pkg/domain/infra -github.com/containers/podman/v4/pkg/domain/infra/abi -github.com/containers/podman/v4/pkg/domain/infra/abi/parse -github.com/containers/podman/v4/pkg/domain/infra/abi/terminal -github.com/containers/podman/v4/pkg/domain/infra/tunnel -github.com/containers/podman/v4/pkg/domain/utils -github.com/containers/podman/v4/pkg/emulation -github.com/containers/podman/v4/pkg/env -github.com/containers/podman/v4/pkg/errorhandling -github.com/containers/podman/v4/pkg/inspect -github.com/containers/podman/v4/pkg/k8s.io/api/apps/v1 -github.com/containers/podman/v4/pkg/k8s.io/api/core/v1 -github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/api/resource -github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/apis/meta/v1 -github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/types -github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/util/intstr -github.com/containers/podman/v4/pkg/lookup -github.com/containers/podman/v4/pkg/namespaces -github.com/containers/podman/v4/pkg/parallel -github.com/containers/podman/v4/pkg/parallel/ctr -github.com/containers/podman/v4/pkg/ps -github.com/containers/podman/v4/pkg/ps/define -github.com/containers/podman/v4/pkg/rctl -github.com/containers/podman/v4/pkg/rootless -github.com/containers/podman/v4/pkg/seccomp -github.com/containers/podman/v4/pkg/selinux -github.com/containers/podman/v4/pkg/signal -github.com/containers/podman/v4/pkg/specgen -github.com/containers/podman/v4/pkg/specgen/generate -github.com/containers/podman/v4/pkg/specgen/generate/kube -github.com/containers/podman/v4/pkg/specgenutil -github.com/containers/podman/v4/pkg/specgenutilexternal -github.com/containers/podman/v4/pkg/systemd -github.com/containers/podman/v4/pkg/systemd/define -github.com/containers/podman/v4/pkg/systemd/generate -github.com/containers/podman/v4/pkg/systemd/notifyproxy -github.com/containers/podman/v4/pkg/trust -github.com/containers/podman/v4/pkg/util -github.com/containers/podman/v4/utils -github.com/containers/podman/v4/version -github.com/containers/podman/v4/version/rawversion -# github.com/containers/psgo v1.8.0 -## explicit; go 1.14 github.com/containers/psgo github.com/containers/psgo/internal/capabilities github.com/containers/psgo/internal/cgroups @@ -383,8 +392,8 @@ github.com/containers/psgo/internal/dev github.com/containers/psgo/internal/host github.com/containers/psgo/internal/proc github.com/containers/psgo/internal/process -# github.com/containers/storage v1.51.0 -## explicit; go 1.19 +# github.com/containers/storage v1.53.0 +## explicit; go 1.20 github.com/containers/storage github.com/containers/storage/drivers github.com/containers/storage/drivers/aufs @@ -404,12 +413,14 @@ github.com/containers/storage/pkg/chunked github.com/containers/storage/pkg/chunked/compressor github.com/containers/storage/pkg/chunked/dump github.com/containers/storage/pkg/chunked/internal +github.com/containers/storage/pkg/chunked/toc github.com/containers/storage/pkg/config github.com/containers/storage/pkg/devicemapper github.com/containers/storage/pkg/directory github.com/containers/storage/pkg/dmesg github.com/containers/storage/pkg/fileutils github.com/containers/storage/pkg/fsutils +github.com/containers/storage/pkg/fsverity github.com/containers/storage/pkg/homedir github.com/containers/storage/pkg/idmap github.com/containers/storage/pkg/idtools @@ -443,13 +454,13 @@ github.com/coreos/go-systemd/v22/dbus github.com/coreos/go-systemd/v22/internal/dlopen github.com/coreos/go-systemd/v22/journal github.com/coreos/go-systemd/v22/sdjournal -# github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 +# github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f ## explicit github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer # github.com/cyphar/filepath-securejoin v0.2.4 ## explicit; go 1.13 github.com/cyphar/filepath-securejoin -# github.com/davecgh/go-spew v1.1.1 +# github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc ## explicit github.com/davecgh/go-spew/spew # github.com/disiqueira/gotree/v3 v3.0.2 @@ -460,15 +471,16 @@ github.com/disiqueira/gotree/v3 github.com/distribution/reference # github.com/docker/distribution v2.8.3+incompatible ## explicit -github.com/docker/distribution/reference github.com/docker/distribution/registry/api/errcode github.com/docker/distribution/registry/api/v2 github.com/docker/distribution/registry/client/auth/challenge -# github.com/docker/docker v24.0.7+incompatible +# github.com/docker/docker v25.0.3+incompatible ## explicit github.com/docker/docker/api github.com/docker/docker/api/types +github.com/docker/docker/api/types/backend github.com/docker/docker/api/types/blkiodev +github.com/docker/docker/api/types/checkpoint github.com/docker/docker/api/types/container github.com/docker/docker/api/types/events github.com/docker/docker/api/types/filters @@ -479,11 +491,14 @@ github.com/docker/docker/api/types/registry github.com/docker/docker/api/types/strslice github.com/docker/docker/api/types/swarm github.com/docker/docker/api/types/swarm/runtime +github.com/docker/docker/api/types/system github.com/docker/docker/api/types/time github.com/docker/docker/api/types/versions github.com/docker/docker/api/types/volume github.com/docker/docker/client github.com/docker/docker/errdefs +github.com/docker/docker/image/spec/specs-go/v1 +github.com/docker/docker/internal/multierror github.com/docker/docker/pkg/archive github.com/docker/docker/pkg/homedir github.com/docker/docker/pkg/idtools @@ -494,15 +509,16 @@ github.com/docker/docker/pkg/meminfo github.com/docker/docker/pkg/namesgenerator github.com/docker/docker/pkg/parsers github.com/docker/docker/pkg/pools -github.com/docker/docker/pkg/process +github.com/docker/docker/pkg/progress github.com/docker/docker/pkg/stdcopy +github.com/docker/docker/pkg/streamformatter github.com/docker/docker/pkg/system -# github.com/docker/docker-credential-helpers v0.8.0 +# github.com/docker/docker-credential-helpers v0.8.1 ## explicit; go 1.19 github.com/docker/docker-credential-helpers/client github.com/docker/docker-credential-helpers/credentials -# github.com/docker/go-connections v0.4.1-0.20231031175723-0b8c1f4e07a0 -## explicit; go 1.13 +# github.com/docker/go-connections v0.5.0 +## explicit; go 1.18 github.com/docker/go-connections/nat github.com/docker/go-connections/sockets github.com/docker/go-connections/tlsconfig @@ -513,13 +529,16 @@ github.com/docker/go-plugins-helpers/volume # github.com/docker/go-units v0.5.0 ## explicit github.com/docker/go-units +# github.com/felixge/httpsnoop v1.0.4 +## explicit; go 1.13 +github.com/felixge/httpsnoop # github.com/fsnotify/fsnotify v1.7.0 ## explicit; go 1.17 github.com/fsnotify/fsnotify -# github.com/fsouza/go-dockerclient v1.10.0 +# github.com/fsouza/go-dockerclient v1.10.1 ## explicit; go 1.20 github.com/fsouza/go-dockerclient -# github.com/go-jose/go-jose/v3 v3.0.1 +# github.com/go-jose/go-jose/v3 v3.0.3 ## explicit; go 1.12 github.com/go-jose/go-jose/v3 github.com/go-jose/go-jose/v3/cipher @@ -535,6 +554,9 @@ github.com/go-logfmt/logfmt ## explicit; go 1.18 github.com/go-logr/logr github.com/go-logr/logr/funcr +# github.com/go-logr/stdr v1.2.2 +## explicit; go 1.16 +github.com/go-logr/stdr # github.com/go-openapi/analysis v0.21.4 ## explicit; go 1.13 github.com/go-openapi/analysis @@ -544,8 +566,8 @@ github.com/go-openapi/analysis/internal/flatten/operations github.com/go-openapi/analysis/internal/flatten/replace github.com/go-openapi/analysis/internal/flatten/schutils github.com/go-openapi/analysis/internal/flatten/sortref -# github.com/go-openapi/errors v0.20.4 -## explicit; go 1.14 +# github.com/go-openapi/errors v0.21.1 +## explicit; go 1.19 github.com/go-openapi/errors # github.com/go-openapi/jsonpointer v0.19.6 ## explicit; go 1.13 @@ -563,11 +585,11 @@ github.com/go-openapi/runtime # github.com/go-openapi/spec v0.20.9 ## explicit; go 1.13 github.com/go-openapi/spec -# github.com/go-openapi/strfmt v0.21.7 +# github.com/go-openapi/strfmt v0.22.2 ## explicit; go 1.19 github.com/go-openapi/strfmt -# github.com/go-openapi/swag v0.22.4 -## explicit; go 1.18 +# github.com/go-openapi/swag v0.22.10 +## explicit; go 1.19 github.com/go-openapi/swag # github.com/go-openapi/validate v0.22.1 ## explicit; go 1.14 @@ -599,7 +621,7 @@ github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/go-containerregistry v0.16.1 +# github.com/google/go-containerregistry v0.19.0 ## explicit; go 1.18 github.com/google/go-containerregistry/pkg/name github.com/google/go-containerregistry/pkg/v1 @@ -617,14 +639,14 @@ github.com/google/pprof/profile # github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 ## explicit; go 1.13 github.com/google/shlex -# github.com/google/uuid v1.4.0 +# github.com/google/uuid v1.6.0 ## explicit github.com/google/uuid # github.com/gorilla/mux v1.8.1 ## explicit; go 1.20 github.com/gorilla/mux -# github.com/gorilla/schema v1.2.0 -## explicit +# github.com/gorilla/schema v1.2.1 +## explicit; go 1.20 github.com/gorilla/schema # github.com/hashicorp/errwrap v1.1.0 ## explicit @@ -647,8 +669,8 @@ github.com/jpillora/backoff # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go -# github.com/klauspost/compress v1.17.3 -## explicit; go 1.19 +# github.com/klauspost/compress v1.17.7 +## explicit; go 1.20 github.com/klauspost/compress github.com/klauspost/compress/flate github.com/klauspost/compress/fse @@ -663,14 +685,14 @@ github.com/klauspost/pgzip # github.com/kr/fs v0.1.0 ## explicit github.com/kr/fs -# github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 -## explicit; go 1.18 +# github.com/letsencrypt/boulder v0.0.0-20230907030200-6d76a0f91e1e +## explicit; go 1.20 github.com/letsencrypt/boulder/core -github.com/letsencrypt/boulder/errors github.com/letsencrypt/boulder/goodkey github.com/letsencrypt/boulder/identifier github.com/letsencrypt/boulder/probs github.com/letsencrypt/boulder/revocation +github.com/letsencrypt/boulder/strictyaml # github.com/mailru/easyjson v0.7.7 ## explicit; go 1.12 github.com/mailru/easyjson/buffer @@ -687,8 +709,8 @@ github.com/mattn/go-runewidth # github.com/mattn/go-shellwords v1.0.12 ## explicit; go 1.13 github.com/mattn/go-shellwords -# github.com/mattn/go-sqlite3 v1.14.18 -## explicit; go 1.16 +# github.com/mattn/go-sqlite3 v1.14.22 +## explicit; go 1.19 github.com/mattn/go-sqlite3 # github.com/miekg/pkcs11 v1.1.1 ## explicit; go 1.12 @@ -714,6 +736,9 @@ github.com/moby/sys/mountinfo # github.com/moby/sys/sequential v0.5.0 ## explicit; go 1.17 github.com/moby/sys/sequential +# github.com/moby/sys/user v0.1.0 +## explicit; go 1.17 +github.com/moby/sys/user # github.com/moby/term v0.5.0 ## explicit; go 1.18 github.com/moby/term @@ -777,12 +802,11 @@ github.com/onsi/gomega/types # github.com/opencontainers/go-digest v1.0.0 ## explicit; go 1.13 github.com/opencontainers/go-digest -github.com/opencontainers/go-digest/digestset -# github.com/opencontainers/image-spec v1.1.0-rc5 +# github.com/opencontainers/image-spec v1.1.0 ## explicit; go 1.18 github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 -# github.com/opencontainers/runc v1.1.10 => github.com/opencontainers/runc v1.1.1-0.20230904132852-a0466dd76f23 +# github.com/opencontainers/runc v1.1.12 => github.com/opencontainers/runc v1.1.1-0.20230904132852-a0466dd76f23 ## explicit; go 1.20 github.com/opencontainers/runc/libcontainer/apparmor github.com/opencontainers/runc/libcontainer/cgroups @@ -794,7 +818,7 @@ github.com/opencontainers/runc/libcontainer/devices github.com/opencontainers/runc/libcontainer/user github.com/opencontainers/runc/libcontainer/userns github.com/opencontainers/runc/libcontainer/utils -# github.com/opencontainers/runtime-spec v1.1.1-0.20230922153023-c0e90434df2a +# github.com/opencontainers/runtime-spec v1.2.0 ## explicit github.com/opencontainers/runtime-spec/specs-go # github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc @@ -808,7 +832,7 @@ github.com/opencontainers/selinux/go-selinux github.com/opencontainers/selinux/go-selinux/label github.com/opencontainers/selinux/pkg/pwalk github.com/opencontainers/selinux/pkg/pwalkdir -# github.com/openshift/imagebuilder v1.2.6-0.20231110114814-35a50d57f722 +# github.com/openshift/imagebuilder v1.2.6 ## explicit; go 1.19 github.com/openshift/imagebuilder github.com/openshift/imagebuilder/dockerfile/command @@ -826,7 +850,7 @@ github.com/pkg/errors ## explicit; go 1.15 github.com/pkg/sftp github.com/pkg/sftp/internal/encoding/ssh/filexfer -# github.com/pmezard/go-difflib v1.0.0 +# github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 ## explicit github.com/pmezard/go-difflib/difflib # github.com/proglottis/gpgme v0.1.3 @@ -857,13 +881,13 @@ github.com/prometheus/exporter-toolkit/web github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/rivo/uniseg v0.4.4 +# github.com/rivo/uniseg v0.4.7 ## explicit; go 1.18 github.com/rivo/uniseg # github.com/seccomp/libseccomp-golang v0.10.0 ## explicit; go 1.14 github.com/seccomp/libseccomp-golang -# github.com/secure-systems-lab/go-securesystemslib v0.7.0 +# github.com/secure-systems-lab/go-securesystemslib v0.8.0 ## explicit; go 1.20 github.com/secure-systems-lab/go-securesystemslib/encrypted # github.com/sigstore/fulcio v1.4.3 @@ -872,7 +896,7 @@ github.com/sigstore/fulcio/pkg/certificate # github.com/sigstore/rekor v1.2.2 ## explicit; go 1.19 github.com/sigstore/rekor/pkg/generated/models -# github.com/sigstore/sigstore v1.7.5 +# github.com/sigstore/sigstore v1.8.2 ## explicit; go 1.20 github.com/sigstore/sigstore/pkg/cryptoutils github.com/sigstore/sigstore/pkg/signature @@ -890,7 +914,7 @@ github.com/spf13/pflag # github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 ## explicit github.com/stefanberger/go-pkcs11uri -# github.com/sylabs/sif/v2 v2.15.0 +# github.com/sylabs/sif/v2 v2.15.1 ## explicit; go 1.20 github.com/sylabs/sif/v2/pkg/sif # github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 @@ -913,7 +937,7 @@ github.com/ulikunitz/xz/lzma github.com/vbatts/tar-split/archive/tar github.com/vbatts/tar-split/tar/asm github.com/vbatts/tar-split/tar/storage -# github.com/vbauerster/mpb/v8 v8.6.2 +# github.com/vbauerster/mpb/v8 v8.7.2 ## explicit; go 1.17 github.com/vbauerster/mpb/v8 github.com/vbauerster/mpb/v8/cwriter @@ -926,11 +950,11 @@ github.com/vishvananda/netlink/nl # github.com/vishvananda/netns v0.0.4 ## explicit; go 1.17 github.com/vishvananda/netns -# go.etcd.io/bbolt v1.3.8 +# go.etcd.io/bbolt v1.3.9 ## explicit; go 1.17 go.etcd.io/bbolt -# go.mongodb.org/mongo-driver v1.11.3 -## explicit; go 1.13 +# go.mongodb.org/mongo-driver v1.14.0 +## explicit; go 1.18 go.mongodb.org/mongo-driver/bson go.mongodb.org/mongo-driver/bson/bsoncodec go.mongodb.org/mongo-driver/bson/bsonoptions @@ -948,7 +972,31 @@ go.opencensus.io/internal go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/tracestate -# golang.org/x/crypto v0.18.0 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 +## explicit; go 1.19 +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil +# go.opentelemetry.io/otel v1.22.0 +## explicit; go 1.20 +go.opentelemetry.io/otel +go.opentelemetry.io/otel/attribute +go.opentelemetry.io/otel/baggage +go.opentelemetry.io/otel/codes +go.opentelemetry.io/otel/internal +go.opentelemetry.io/otel/internal/attribute +go.opentelemetry.io/otel/internal/baggage +go.opentelemetry.io/otel/internal/global +go.opentelemetry.io/otel/propagation +go.opentelemetry.io/otel/semconv/v1.17.0 +# go.opentelemetry.io/otel/metric v1.22.0 +## explicit; go 1.20 +go.opentelemetry.io/otel/metric +go.opentelemetry.io/otel/metric/embedded +# go.opentelemetry.io/otel/trace v1.22.0 +## explicit; go 1.20 +go.opentelemetry.io/otel/trace +go.opentelemetry.io/otel/trace/embedded +# golang.org/x/crypto v0.21.0 ## explicit; go 1.18 golang.org/x/crypto/argon2 golang.org/x/crypto/bcrypt @@ -980,15 +1028,15 @@ golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/knownhosts golang.org/x/crypto/twofish golang.org/x/crypto/xts -# golang.org/x/exp v0.0.0-20231006140011-7918f672742d +# golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 ## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/maps golang.org/x/exp/slices -# golang.org/x/mod v0.14.0 +# golang.org/x/mod v0.15.0 ## explicit; go 1.18 golang.org/x/mod/semver -# golang.org/x/net v0.20.0 +# golang.org/x/net v0.22.0 ## explicit; go 1.18 golang.org/x/net/context golang.org/x/net/html @@ -1003,7 +1051,7 @@ golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace -# golang.org/x/oauth2 v0.16.0 +# golang.org/x/oauth2 v0.18.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/clientcredentials @@ -1012,14 +1060,14 @@ golang.org/x/oauth2/internal ## explicit; go 1.18 golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.16.0 +# golang.org/x/sys v0.18.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.16.0 +# golang.org/x/term v0.18.0 ## explicit; go 1.18 golang.org/x/term # golang.org/x/text v0.14.0 @@ -1044,7 +1092,10 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/tools v0.17.0 +# golang.org/x/time v0.3.0 +## explicit +golang.org/x/time/rate +# golang.org/x/tools v0.18.0 ## explicit; go 1.18 golang.org/x/tools/cmd/stringer golang.org/x/tools/cover @@ -1075,10 +1126,10 @@ google.golang.org/appengine/internal/log google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0 ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.58.3 +# google.golang.org/grpc v1.61.0 ## explicit; go 1.19 google.golang.org/grpc google.golang.org/grpc/attributes @@ -1114,6 +1165,7 @@ google.golang.org/grpc/internal/metadata google.golang.org/grpc/internal/pretty google.golang.org/grpc/internal/resolver google.golang.org/grpc/internal/resolver/dns +google.golang.org/grpc/internal/resolver/dns/internal google.golang.org/grpc/internal/resolver/passthrough google.golang.org/grpc/internal/resolver/unix google.golang.org/grpc/internal/serviceconfig @@ -1125,6 +1177,7 @@ google.golang.org/grpc/keepalive google.golang.org/grpc/metadata google.golang.org/grpc/peer google.golang.org/grpc/resolver +google.golang.org/grpc/resolver/dns google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status @@ -1166,7 +1219,7 @@ google.golang.org/protobuf/types/gofeaturespb google.golang.org/protobuf/types/known/anypb google.golang.org/protobuf/types/known/durationpb google.golang.org/protobuf/types/known/timestamppb -# gopkg.in/go-jose/go-jose.v2 v2.6.1 +# gopkg.in/go-jose/go-jose.v2 v2.6.3 ## explicit gopkg.in/go-jose/go-jose.v2 gopkg.in/go-jose/go-jose.v2/cipher @@ -1183,9 +1236,6 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/kubernetes v1.28.4 -## explicit; go 1.20 -k8s.io/kubernetes/third_party/forked/golang/expansion # sigs.k8s.io/yaml v1.4.0 ## explicit; go 1.12 sigs.k8s.io/yaml